提交 8b3021f3 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

driver: roce: update roce driver from driver team

Sync roce driver from driver team.
Based on c63ba8b3f1dd8882a7cbe237cffc61c7fa1429f8
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 09262653
...@@ -7,8 +7,8 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 ...@@ -7,8 +7,8 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_sysfs.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
hns-roce-hw-v1-objs := hns_roce_hw_v1.o hns-roce-hw-v1-objs := hns_roce_hw_v1.o
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_sysfs_v2.o
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#include "roce_k_compat.h"
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
...@@ -44,42 +45,92 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, ...@@ -44,42 +45,92 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
#ifdef CONFIG_KERNEL_419
const struct ib_gid_attr *gid_attr;
#else
struct ib_gid_attr gid_attr;
union ib_gid sgid;
int ret;
#endif
struct hns_roce_ah *ah; struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff; u16 vlan_tag = 0xffff;
struct in6_addr in6;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
bool vlan_en = false;
ah = kzalloc(sizeof(*ah), GFP_ATOMIC); ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah) if (!ah)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* Get mac address */ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); /* Get mac address */
memcpy(&in6, grh->dgid.raw, sizeof(grh->dgid.raw));
gid_attr = ah_attr->grh.sgid_attr; if (rdma_is_multicast_addr(&in6)) {
if (is_vlan_dev(gid_attr->ndev)) rdma_get_mcast_mac(&in6, ah->av.mac);
vlan_tag = vlan_dev_vlan_id(gid_attr->ndev); } else {
u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) & if (!dmac) {
HNS_ROCE_VLAN_SL_BIT_MASK) << kfree(ah);
HNS_ROCE_VLAN_SL_SHIFT; return ERR_PTR(-EINVAL);
}
ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn | memcpy(ah->av.mac, dmac, ETH_ALEN);
(rdma_ah_get_port_num(ah_attr) << }
HNS_ROCE_PORT_NUM_SHIFT));
ah->av.gid_index = grh->sgid_index; #ifdef CONFIG_KERNEL_419
ah->av.vlan = cpu_to_le16(vlan_tag); gid_attr = ah_attr->grh.sgid_attr;
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index, if (is_vlan_dev(gid_attr->ndev)) {
ah->av.vlan); vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
vlan_en = true;
if (rdma_ah_get_static_rate(ah_attr)) }
ah->av.stat_rate = IB_RATE_10_GBPS; #else
/* Get source gid */
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); ret = ib_get_cached_gid(ibpd->device,
ah->av.sl_tclass_flowlabel = cpu_to_le32(rdma_ah_get_sl(ah_attr) << rdma_ah_get_port_num(ah_attr),
HNS_ROCE_SL_SHIFT); grh->sgid_index, &sgid, &gid_attr);
if (ret) {
dev_err(dev, "get sgid failed! ret = %d\n", ret);
kfree(ah);
return ERR_PTR(ret);
}
if (gid_attr.ndev) {
if (is_vlan_dev(gid_attr.ndev)) {
vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
vlan_en = true;
}
dev_put(gid_attr.ndev);
}
#endif
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) &
HNS_ROCE_VLAN_SL_BIT_MASK) <<
HNS_ROCE_VLAN_SL_SHIFT;
ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
(rdma_ah_get_port_num(ah_attr) <<
HNS_ROCE_PORT_NUM_SHIFT));
ah->av.gid_index = grh->sgid_index;
ah->av.vlan = cpu_to_le16(vlan_tag);
ah->av.vlan_en = vlan_en;
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
ah->av.vlan);
if (rdma_ah_get_static_rate(ah_attr))
ah->av.stat_rate = IB_RATE_10_GBPS;
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
ah->av.sl_tclass_flowlabel =
cpu_to_le32(rdma_ah_get_sl(ah_attr) <<
HNS_ROCE_SL_SHIFT);
ah->av.sl_tclass_flowlabel |=
cpu_to_le32((grh->traffic_class <<
HNS_ROCE_TCLASS_SHIFT) |
grh->flow_label);
ah->av.hop_limit = grh->hop_limit;
}
return &ah->ibah; return &ah->ibah;
} }
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#include "roce_k_compat.h"
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -239,9 +240,13 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, ...@@ -239,9 +240,13 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{ {
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_cleanup_srq_table(hr_dev);
hns_roce_cleanup_qp_table(hr_dev); hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev); hns_roce_cleanup_cq_table(hr_dev);
hns_roce_cleanup_mr_table(hr_dev); hns_roce_cleanup_mr_table(hr_dev);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
hns_roce_cleanup_xrcd_table(hr_dev);
hns_roce_cleanup_pd_table(hr_dev); hns_roce_cleanup_pd_table(hr_dev);
hns_roce_cleanup_uar_table(hr_dev); hns_roce_cleanup_uar_table(hr_dev);
} }
...@@ -176,17 +176,33 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, ...@@ -176,17 +176,33 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op, unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout) unsigned long timeout)
{ {
if (hr_dev->is_reset) int ret;
return 0;
if (hr_dev->hw->rst_prc_mbox) {
ret = hr_dev->hw->rst_prc_mbox(hr_dev);
if (ret == CMD_RST_PRC_SUCCESS)
return 0;
else if (ret == CMD_RST_PRC_EBUSY)
return -EBUSY;
}
if (hr_dev->cmd.use_events) if (hr_dev->cmd.use_events)
return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
in_modifier, op_modifier, op, in_modifier, op_modifier, op,
timeout); timeout);
else else
return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
in_modifier, op_modifier, op, in_modifier, op_modifier, op,
timeout); timeout);
if (ret == CMD_RST_PRC_EBUSY)
return -EBUSY;
if (ret && (hr_dev->hw->rst_prc_mbox &&
hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS))
return 0;
return ret;
} }
EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox); EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
......
...@@ -53,6 +53,7 @@ enum { ...@@ -53,6 +53,7 @@ enum {
HNS_ROCE_CMD_QUERY_QPC = 0x42, HNS_ROCE_CMD_QUERY_QPC = 0x42,
HNS_ROCE_CMD_MODIFY_CQC = 0x52, HNS_ROCE_CMD_MODIFY_CQC = 0x52,
HNS_ROCE_CMD_QUERY_CQC = 0x53,
/* CQC BT commands */ /* CQC BT commands */
HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10, HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11, HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
...@@ -89,6 +90,18 @@ enum { ...@@ -89,6 +90,18 @@ enum {
HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39, HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a, HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
/* CTX BT commands */
HNS_ROCE_CMD_READ_SCC_CTX_BT0 = 0xa4,
HNS_ROCE_CMD_WRITE_SCC_CTX_BT0 = 0xa5,
/* QPC TIMER commands */
HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 = 0x33,
HNS_ROCE_CMD_READ_QPC_TIMER_BT0 = 0x37,
/* CQC TIMER commands */
HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 = 0x23,
HNS_ROCE_CMD_READ_CQC_TIMER_BT0 = 0x27,
/* EQC commands */ /* EQC commands */
HNS_ROCE_CMD_CREATE_AEQC = 0x80, HNS_ROCE_CMD_CREATE_AEQC = 0x80,
HNS_ROCE_CMD_MODIFY_AEQC = 0x81, HNS_ROCE_CMD_MODIFY_AEQC = 0x81,
...@@ -120,6 +133,10 @@ enum { ...@@ -120,6 +133,10 @@ enum {
HNS_ROCE_CMD_SQD2RTS_QP = 0x20, HNS_ROCE_CMD_SQD2RTS_QP = 0x20,
HNS_ROCE_CMD_2RST_QP = 0x21, HNS_ROCE_CMD_2RST_QP = 0x21,
HNS_ROCE_CMD_QUERY_QP = 0x22, HNS_ROCE_CMD_QUERY_QP = 0x22,
HNS_ROCE_CMD_SW2HW_SRQ = 0x70,
HNS_ROCE_CMD_MODIFY_SRQC = 0x72,
HNS_ROCE_CMD_QUERY_SRQC = 0x73,
HNS_ROCE_CMD_HW2SW_SRQ = 0x74,
}; };
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
......
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
#ifndef _HNS_ROCE_COMMON_H #ifndef _HNS_ROCE_COMMON_H
#define _HNS_ROCE_COMMON_H #define _HNS_ROCE_COMMON_H
#include "roce_k_compat.h"
#ifndef assert #ifndef assert
#define assert(cond) #define assert(cond)
#endif #endif
...@@ -376,9 +378,6 @@ ...@@ -376,9 +378,6 @@
#define ROCEE_RX_CMQ_TAIL_REG 0x07024 #define ROCEE_RX_CMQ_TAIL_REG 0x07024
#define ROCEE_RX_CMQ_HEAD_REG 0x07028 #define ROCEE_RX_CMQ_HEAD_REG 0x07028
#define ROCEE_VF_MB_CFG0_REG 0x40
#define ROCEE_VF_MB_STATUS_REG 0x58
#define ROCEE_VF_EQ_DB_CFG0_REG 0x238 #define ROCEE_VF_EQ_DB_CFG0_REG 0x238
#define ROCEE_VF_EQ_DB_CFG1_REG 0x23C #define ROCEE_VF_EQ_DB_CFG1_REG 0x23C
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#include "roce_k_compat.h"
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include "roce_k_compat.h"
#include "hns_roce_device.h" #include "hns_roce_device.h"
int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt, int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
......
...@@ -88,6 +88,7 @@ ...@@ -88,6 +88,7 @@
#define BITMAP_RR 1 #define BITMAP_RR 1
#define MR_TYPE_MR 0x00 #define MR_TYPE_MR 0x00
#define MR_TYPE_FRMR 0x01
#define MR_TYPE_DMA 0x03 #define MR_TYPE_DMA 0x03
#define PKEY_ID 0xffff #define PKEY_ID 0xffff
...@@ -95,11 +96,16 @@ ...@@ -95,11 +96,16 @@
#define NODE_DESC_SIZE 64 #define NODE_DESC_SIZE 64
#define DB_REG_OFFSET 0x1000 #define DB_REG_OFFSET 0x1000
#define HNS_ROCE_CEQ_MAX_BURST_NUM 0xffff
#define HNS_ROCE_CEQ_MAX_INTERVAL 0xffff
#define HNS_ROCE_EQ_MAXCNT_MASK 1
#define HNS_ROCE_EQ_PERIOD_MASK 2
#define SERV_TYPE_RC 0 #define SERV_TYPE_RC 0
#define SERV_TYPE_RD 1 #define SERV_TYPE_RD 2
#define SERV_TYPE_UC 2 #define SERV_TYPE_UC 1
#define SERV_TYPE_UD 3 #define SERV_TYPE_UD 3
#define SERV_TYPE_XRC 5
/* Configure to HW for PAGE_SIZE larger than 4KB */ /* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12) #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
...@@ -108,6 +114,12 @@ ...@@ -108,6 +114,12 @@
#define PAGES_SHIFT_24 24 #define PAGES_SHIFT_24 24
#define PAGES_SHIFT_32 32 #define PAGES_SHIFT_32 32
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define HNS_ROCE_FRMR_MAX_PA 512
#define SRQ_DB_REG 0x230
enum { enum {
HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0, HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1, HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
...@@ -193,17 +205,51 @@ enum { ...@@ -193,17 +205,51 @@ enum {
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2), HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3), HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4), HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
HNS_ROCE_CAP_FLAG_XRC = BIT(6),
HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
HNS_ROCE_CAP_FLAG_MW = BIT(7),
HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
}; };
enum hns_roce_mtt_type { enum hns_roce_mtt_type {
MTT_TYPE_WQE, MTT_TYPE_WQE,
MTT_TYPE_CQE, MTT_TYPE_CQE,
MTT_TYPE_SRQWQE,
MTT_TYPE_IDX
}; };
enum { enum {
HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4 HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
}; };
enum hns_roce_reset_stage {
HNS_ROCE_STATE_NON_RST,
HNS_ROCE_STATE_RST_BEF_DOWN,
HNS_ROCE_STATE_RST_DOWN,
HNS_ROCE_STATE_RST_UNINIT,
HNS_ROCE_STATE_RST_INIT,
HNS_ROCE_STATE_RST_INITED,
};
enum hns_roce_instance_state {
HNS_ROCE_STATE_NON_INIT,
HNS_ROCE_STATE_INIT,
HNS_ROCE_STATE_INITED,
HNS_ROCE_STATE_UNINIT,
};
enum {
HNS_ROCE_RST_DIRECT_RETURN = 0,
};
enum {
CMD_RST_PRC_OTHERS,
CMD_RST_PRC_SUCCESS,
CMD_RST_PRC_EBUSY,
};
#define HNS_ROCE_CMD_SUCCESS 1 #define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_PORT_DOWN 0 #define HNS_ROCE_PORT_DOWN 0
...@@ -213,6 +259,8 @@ enum { ...@@ -213,6 +259,8 @@ enum {
#define PAGE_ADDR_SHIFT 12 #define PAGE_ADDR_SHIFT 12
#define HNS_ROCE_DISABLE_DB 1
struct hns_roce_uar { struct hns_roce_uar {
u64 pfn; u64 pfn;
unsigned long index; unsigned long index;
...@@ -239,6 +287,13 @@ struct hns_roce_pd { ...@@ -239,6 +287,13 @@ struct hns_roce_pd {
unsigned long pdn; unsigned long pdn;
}; };
struct hns_roce_xrcd {
struct ib_xrcd ibxrcd;
unsigned long xrcdn;
struct ib_pd *pd;
struct ib_cq *cq;
};
struct hns_roce_bitmap { struct hns_roce_bitmap {
/* Bitmap Traversal last a bit which is 1 */ /* Bitmap Traversal last a bit which is 1 */
unsigned long last; unsigned long last;
...@@ -293,6 +348,16 @@ struct hns_roce_mtt { ...@@ -293,6 +348,16 @@ struct hns_roce_mtt {
enum hns_roce_mtt_type mtt_type; enum hns_roce_mtt_type mtt_type;
}; };
struct hns_roce_mw {
struct ib_mw ibmw;
u32 pdn;
u32 rkey;
int enabled; /* MW's active status */
u32 pbl_buf_pg_sz;
u32 pbl_ba_pg_sz;
u32 pbl_hop_num;
};
/* Only support 4K page size for mr register */ /* Only support 4K page size for mr register */
#define MR_SIZE_4K 0 #define MR_SIZE_4K 0
...@@ -304,6 +369,7 @@ struct hns_roce_mr { ...@@ -304,6 +369,7 @@ struct hns_roce_mr {
u32 key; /* Key of MR */ u32 key; /* Key of MR */
u32 pd; /* PD num of MR */ u32 pd; /* PD num of MR */
u32 access;/* Access permission of MR */ u32 access;/* Access permission of MR */
u32 npages;
int enabled; /* MR's active status */ int enabled; /* MR's active status */
int type; /* MR's register type */ int type; /* MR's register type */
u64 *pbl_buf;/* MR's PBL space */ u64 *pbl_buf;/* MR's PBL space */
...@@ -330,6 +396,10 @@ struct hns_roce_mr_table { ...@@ -330,6 +396,10 @@ struct hns_roce_mr_table {
struct hns_roce_hem_table mtpt_table; struct hns_roce_hem_table mtpt_table;
struct hns_roce_buddy mtt_cqe_buddy; struct hns_roce_buddy mtt_cqe_buddy;
struct hns_roce_hem_table mtt_cqe_table; struct hns_roce_hem_table mtt_cqe_table;
struct hns_roce_buddy mtt_srqwqe_buddy;
struct hns_roce_hem_table mtt_srqwqe_table;
struct hns_roce_buddy mtt_idx_buddy;
struct hns_roce_hem_table mtt_idx_table;
}; };
struct hns_roce_wq { struct hns_roce_wq {
...@@ -420,9 +490,37 @@ struct hns_roce_cq { ...@@ -420,9 +490,37 @@ struct hns_roce_cq {
struct completion free; struct completion free;
}; };
struct hns_roce_idx_que {
struct hns_roce_buf idx_buf;
int entry_sz;
u32 buf_size;
struct ib_umem *umem;
struct hns_roce_mtt mtt;
u64 *bitmap;
};
struct hns_roce_srq { struct hns_roce_srq {
struct ib_srq ibsrq; struct ib_srq ibsrq;
int srqn; void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
unsigned long srqn;
int max;
int max_gs;
int wqe_shift;
void __iomem *db_reg_l;
refcount_t refcount;
struct completion free;
struct hns_roce_buf buf;
u64 *wrid;
struct ib_umem *umem;
struct hns_roce_mtt mtt;
struct hns_roce_idx_que idx_que;
spinlock_t lock;
int head;
int tail;
u16 wqe_ctr;
struct mutex mutex;
}; };
struct hns_roce_uar_table { struct hns_roce_uar_table {
...@@ -435,6 +533,14 @@ struct hns_roce_qp_table { ...@@ -435,6 +533,14 @@ struct hns_roce_qp_table {
struct hns_roce_hem_table qp_table; struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table; struct hns_roce_hem_table irrl_table;
struct hns_roce_hem_table trrl_table; struct hns_roce_hem_table trrl_table;
struct hns_roce_hem_table scc_ctx_table;
};
struct hns_roce_qpc_timer_table {
struct hns_roce_bitmap bitmap;
spinlock_t lock;
struct radix_tree_root tree;
struct hns_roce_hem_table table;
}; };
struct hns_roce_cq_table { struct hns_roce_cq_table {
...@@ -444,6 +550,20 @@ struct hns_roce_cq_table { ...@@ -444,6 +550,20 @@ struct hns_roce_cq_table {
struct hns_roce_hem_table table; struct hns_roce_hem_table table;
}; };
struct hns_roce_cqc_timer_table {
struct hns_roce_bitmap bitmap;
spinlock_t lock;
struct radix_tree_root tree;
struct hns_roce_hem_table table;
};
struct hns_roce_srq_table {
struct hns_roce_bitmap bitmap;
spinlock_t lock;
struct radix_tree_root tree;
struct hns_roce_hem_table table;
};
struct hns_roce_raq_table { struct hns_roce_raq_table {
struct hns_roce_buf_list *e_raq_buf; struct hns_roce_buf_list *e_raq_buf;
}; };
...@@ -457,6 +577,7 @@ struct hns_roce_av { ...@@ -457,6 +577,7 @@ struct hns_roce_av {
u8 dgid[HNS_ROCE_GID_SIZE]; u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[6]; u8 mac[6];
__le16 vlan; __le16 vlan;
bool vlan_en;
}; };
struct hns_roce_ah { struct hns_roce_ah {
...@@ -541,6 +662,7 @@ struct hns_roce_qp { ...@@ -541,6 +662,7 @@ struct hns_roce_qp {
struct hns_roce_mtt mtt; struct hns_roce_mtt mtt;
u32 buff_size; u32 buff_size;
struct mutex mutex; struct mutex mutex;
u16 xrcdn;
u8 port; u8 port;
u8 phy_port; u8 phy_port;
u8 sl; u8 sl;
...@@ -576,7 +698,7 @@ struct hns_roce_ib_iboe { ...@@ -576,7 +698,7 @@ struct hns_roce_ib_iboe {
enum { enum {
HNS_ROCE_EQ_STAT_INVALID = 0, HNS_ROCE_EQ_STAT_INVALID = 0,
HNS_ROCE_EQ_STAT_VALID = 2, HNS_ROCE_EQ_STAT_VALID = 1,
}; };
struct hns_roce_ceqe { struct hns_roce_ceqe {
...@@ -592,6 +714,12 @@ struct hns_roce_aeqe { ...@@ -592,6 +714,12 @@ struct hns_roce_aeqe {
u32 rsv1; u32 rsv1;
} qp_event; } qp_event;
struct {
__le32 srq;
u32 rsv0;
u32 rsv1;
} srq_event;
struct { struct {
__le32 cq; __le32 cq;
u32 rsv0; u32 rsv0;
...@@ -656,19 +784,29 @@ struct hns_roce_eq_table { ...@@ -656,19 +784,29 @@ struct hns_roce_eq_table {
}; };
struct hns_roce_caps { struct hns_roce_caps {
u64 fw_ver;
u8 num_ports; u8 num_ports;
int gid_table_len[HNS_ROCE_MAX_PORTS]; int gid_table_len[HNS_ROCE_MAX_PORTS];
int pkey_table_len[HNS_ROCE_MAX_PORTS]; int pkey_table_len[HNS_ROCE_MAX_PORTS];
int local_ca_ack_delay; int local_ca_ack_delay;
int num_uars; int num_uars;
u32 phy_num_uars; u32 phy_num_uars;
u32 max_sq_sg; /* 2 */ u32 max_sq_sg;
u32 max_sq_inline; /* 32 */ u32 max_sq_inline; /* 32 */
u32 max_rq_sg; /* 2 */ u32 max_rq_sg;
int num_qps; /* 256k */ u32 max_extend_sg;
u32 max_wqes; /* 16k */ int num_qps;
u32 max_sq_desc_sz; /* 64 */ int reserved_qps;
u32 max_rq_desc_sz; /* 64 */ int num_qpc_timer;
int num_cqc_timer;
u32 max_srq_sg;
int num_srqs;
u32 max_wqes;
u32 max_srqs;
u32 max_srq_wrs;
u32 max_srq_sges;
u32 max_sq_desc_sz;
u32 max_rq_desc_sz;
u32 max_srq_desc_sz; u32 max_srq_desc_sz;
int max_qp_init_rdma; int max_qp_init_rdma;
int max_qp_dest_rdma; int max_qp_dest_rdma;
...@@ -677,16 +815,22 @@ struct hns_roce_caps { ...@@ -677,16 +815,22 @@ struct hns_roce_caps {
int min_cqes; int min_cqes;
u32 min_wqes; u32 min_wqes;
int reserved_cqs; int reserved_cqs;
int num_aeq_vectors; /* 1 */ int reserved_srqs;
u32 max_srqwqes;
int num_aeq_vectors;
int num_comp_vectors; int num_comp_vectors;
int num_other_vectors; int num_other_vectors;
int num_mtpts; int num_mtpts;
u32 num_mtt_segs; u32 num_mtt_segs;
u32 num_cqe_segs; u32 num_cqe_segs;
u32 num_srqwqe_segs;
u32 num_idx_segs;
int reserved_mrws; int reserved_mrws;
int reserved_uars; int reserved_uars;
int num_pds; int num_pds;
int reserved_pds; int reserved_pds;
int num_xrcds;
int reserved_xrcds;
u32 mtt_entry_sz; u32 mtt_entry_sz;
u32 cq_entry_sz; u32 cq_entry_sz;
u32 page_size_cap; u32 page_size_cap;
...@@ -696,6 +840,11 @@ struct hns_roce_caps { ...@@ -696,6 +840,11 @@ struct hns_roce_caps {
int irrl_entry_sz; int irrl_entry_sz;
int trrl_entry_sz; int trrl_entry_sz;
int cqc_entry_sz; int cqc_entry_sz;
int srqc_entry_sz;
int idx_entry_sz;
int scc_ctx_entry_sz;
int qpc_timer_entry_sz;
int cqc_timer_entry_sz;
u32 pbl_ba_pg_sz; u32 pbl_ba_pg_sz;
u32 pbl_buf_pg_sz; u32 pbl_buf_pg_sz;
u32 pbl_hop_num; u32 pbl_hop_num;
...@@ -703,9 +852,12 @@ struct hns_roce_caps { ...@@ -703,9 +852,12 @@ struct hns_roce_caps {
int ceqe_depth; int ceqe_depth;
enum ib_mtu max_mtu; enum ib_mtu max_mtu;
u32 qpc_bt_num; u32 qpc_bt_num;
u32 qpc_timer_bt_num;
u32 srqc_bt_num; u32 srqc_bt_num;
u32 cqc_bt_num; u32 cqc_bt_num;
u32 cqc_timer_bt_num;
u32 mpt_bt_num; u32 mpt_bt_num;
u32 scc_ctx_bt_num;
u32 qpc_ba_pg_sz; u32 qpc_ba_pg_sz;
u32 qpc_buf_pg_sz; u32 qpc_buf_pg_sz;
u32 qpc_hop_num; u32 qpc_hop_num;
...@@ -721,9 +873,24 @@ struct hns_roce_caps { ...@@ -721,9 +873,24 @@ struct hns_roce_caps {
u32 mtt_ba_pg_sz; u32 mtt_ba_pg_sz;
u32 mtt_buf_pg_sz; u32 mtt_buf_pg_sz;
u32 mtt_hop_num; u32 mtt_hop_num;
u32 scc_ctx_ba_pg_sz;
u32 scc_ctx_buf_pg_sz;
u32 scc_ctx_hop_num;
u32 qpc_timer_ba_pg_sz;
u32 qpc_timer_buf_pg_sz;
u32 qpc_timer_hop_num;
u32 cqc_timer_ba_pg_sz;
u32 cqc_timer_buf_pg_sz;
u32 cqc_timer_hop_num;
u32 cqe_ba_pg_sz; u32 cqe_ba_pg_sz;
u32 cqe_buf_pg_sz; u32 cqe_buf_pg_sz;
u32 cqe_hop_num; u32 cqe_hop_num;
u32 srqwqe_ba_pg_sz;
u32 srqwqe_buf_pg_sz;
u32 srqwqe_hop_num;
u32 idx_ba_pg_sz;
u32 idx_buf_pg_sz;
u32 idx_hop_num;
u32 eqe_ba_pg_sz; u32 eqe_ba_pg_sz;
u32 eqe_buf_pg_sz; u32 eqe_buf_pg_sz;
u32 eqe_hop_num; u32 eqe_hop_num;
...@@ -738,9 +905,40 @@ struct hns_roce_work { ...@@ -738,9 +905,40 @@ struct hns_roce_work {
struct hns_roce_dev *hr_dev; struct hns_roce_dev *hr_dev;
struct work_struct work; struct work_struct work;
u32 qpn; u32 qpn;
u32 cqn;
int event_type; int event_type;
int sub_type; int sub_type;
}; };
struct hns_roce_stat {
int cqn;
int srqn;
u32 ceqn;
u32 qpn;
u32 aeqn;
int key;
};
struct hns_roce_dfx_hw {
int (*query_cqc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_cmd_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_qpc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_aeqc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_srqc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_pkt_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_mpt_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_ceqc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*modify_eq)(struct hns_roce_dev *hr_dev,
u16 eq_count, u16 eq_period, u16 type);
};
struct hns_roce_hw { struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable); int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
...@@ -753,8 +951,14 @@ struct hns_roce_hw { ...@@ -753,8 +951,14 @@ struct hns_roce_hw {
u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event); u16 token, int event);
int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout); int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
#ifdef CONFIG_KERNEL_419
int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr); const union ib_gid *gid, const struct ib_gid_attr *attr);
#else
int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
union ib_gid *gid, const struct ib_gid_attr *attr);
#endif
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu); enum ib_mtu mtu);
...@@ -764,6 +968,8 @@ struct hns_roce_hw { ...@@ -764,6 +968,8 @@ struct hns_roce_hw {
struct hns_roce_mr *mr, int flags, u32 pdn, struct hns_roce_mr *mr, int flags, u32 pdn,
int mr_access_flags, u64 iova, u64 size, int mr_access_flags, u64 iova, u64 size,
void *mb_buf); void *mb_buf);
int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev, void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector); dma_addr_t dma_handle, int nent, u32 vector);
...@@ -778,10 +984,19 @@ struct hns_roce_hw { ...@@ -778,10 +984,19 @@ struct hns_roce_hw {
int attr_mask, enum ib_qp_state cur_state, int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state); enum ib_qp_state new_state);
int (*destroy_qp)(struct ib_qp *ibqp); int (*destroy_qp)(struct ib_qp *ibqp);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
#ifdef CONFIG_KERNEL_419
int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr, int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr); const struct ib_send_wr **bad_wr);
int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr); const struct ib_recv_wr **bad_recv_wr);
#else
int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
#endif
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr); int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
...@@ -789,6 +1004,22 @@ struct hns_roce_hw { ...@@ -789,6 +1004,22 @@ struct hns_roce_hw {
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*init_eq)(struct hns_roce_dev *hr_dev); int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev); void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
void (*write_srqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq, u32 pdn, u16 xrcd, u32 cqn,
void *mb_buf, u64 *mtts_wqe, u64 *mtts_idx,
dma_addr_t dma_handle_wqe,
dma_addr_t dma_handle_idx);
int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int (*query_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
#ifdef CONFIG_KERNEL_419
int (*post_srq_recv)(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
#else
int (*post_srq_recv)(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
#endif
}; };
struct hns_roce_dev { struct hns_roce_dev {
...@@ -802,6 +1033,8 @@ struct hns_roce_dev { ...@@ -802,6 +1033,8 @@ struct hns_roce_dev {
spinlock_t bt_cmd_lock; spinlock_t bt_cmd_lock;
bool active; bool active;
bool is_reset; bool is_reset;
bool dis_db;
unsigned long reset_cnt;
struct hns_roce_ib_iboe iboe; struct hns_roce_ib_iboe iboe;
struct list_head pgdir_list; struct list_head pgdir_list;
...@@ -820,21 +1053,28 @@ struct hns_roce_dev { ...@@ -820,21 +1053,28 @@ struct hns_roce_dev {
struct hns_roce_cmdq cmd; struct hns_roce_cmdq cmd;
struct hns_roce_bitmap pd_bitmap; struct hns_roce_bitmap pd_bitmap;
struct hns_roce_bitmap xrcd_bitmap;
struct hns_roce_uar_table uar_table; struct hns_roce_uar_table uar_table;
struct hns_roce_mr_table mr_table; struct hns_roce_mr_table mr_table;
struct hns_roce_cq_table cq_table; struct hns_roce_cq_table cq_table;
struct hns_roce_srq_table srq_table;
struct hns_roce_qp_table qp_table; struct hns_roce_qp_table qp_table;
struct hns_roce_eq_table eq_table; struct hns_roce_eq_table eq_table;
struct hns_roce_qpc_timer_table qpc_timer_table;
struct hns_roce_cqc_timer_table cqc_timer_table;
int cmd_mod; int cmd_mod;
int loop_idc; int loop_idc;
u32 sdb_offset; u32 sdb_offset;
u32 odb_offset; u32 odb_offset;
dma_addr_t tptr_dma_addr; /*only for hw v1*/ dma_addr_t uar2_dma_addr;
u32 tptr_size; /*only for hw v1*/ u32 uar2_size;
const struct hns_roce_hw *hw; const struct hns_roce_hw *hw;
const struct hns_roce_dfx_hw *dfx;
void *priv; void *priv;
struct workqueue_struct *irq_workq; struct workqueue_struct *irq_workq;
struct hns_roce_stat hr_stat;
u32 func_num;
}; };
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
...@@ -853,6 +1093,11 @@ static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd) ...@@ -853,6 +1093,11 @@ static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
return container_of(ibpd, struct hns_roce_pd, ibpd); return container_of(ibpd, struct hns_roce_pd, ibpd);
} }
static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
{
return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
}
static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah) static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
{ {
return container_of(ibah, struct hns_roce_ah, ibah); return container_of(ibah, struct hns_roce_ah, ibah);
...@@ -863,6 +1108,11 @@ static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr) ...@@ -863,6 +1108,11 @@ static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
return container_of(ibmr, struct hns_roce_mr, ibmr); return container_of(ibmr, struct hns_roce_mr, ibmr);
} }
static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
{
return container_of(ibmw, struct hns_roce_mw, ibmw);
}
static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp) static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
{ {
return container_of(ibqp, struct hns_roce_qp, ibqp); return container_of(ibqp, struct hns_roce_qp, ibqp);
...@@ -926,16 +1176,20 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, ...@@ -926,16 +1176,20 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct hns_roce_buf *buf); struct hns_roce_mtt *mtt, struct hns_roce_buf *buf);
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev); int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj); int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj, void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
...@@ -961,6 +1215,11 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, ...@@ -961,6 +1215,11 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
struct ib_udata *udata); struct ib_udata *udata);
int hns_roce_dealloc_pd(struct ib_pd *pd); int hns_roce_dealloc_pd(struct ib_pd *pd);
struct ib_xrcd *hns_roce_ib_alloc_xrcd(struct ib_device *ib_dev,
struct ib_ucontext *context,
struct ib_udata *udata);
int hns_roce_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags, u64 virt_addr, int access_flags,
...@@ -968,12 +1227,20 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -968,12 +1227,20 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd, u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata); struct ib_udata *udata);
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr); int hns_roce_dereg_mr(struct ib_mr *ibmr);
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox, struct hns_roce_cmd_mailbox *mailbox,
unsigned long mpt_index); unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key); unsigned long key_to_hw_index(u32 key);
struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
struct ib_udata *udata);
int hns_roce_dealloc_mw(struct ib_mw *ibmw);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf); struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
...@@ -982,6 +1249,16 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, ...@@ -982,6 +1249,16 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct ib_umem *umem); struct hns_roce_mtt *mtt, struct ib_umem *umem);
struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int hns_roce_destroy_srq(struct ib_srq *ibsrq);
struct hns_roce_srq *hns_roce_srq_lookup(struct hns_roce_dev *hr_dev, u32 srqn);
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
...@@ -1001,7 +1278,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); ...@@ -1001,7 +1278,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
int cnt); int cnt);
__be32 send_ieth(const struct ib_send_wr *wr); __be32 send_ieth(struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type); int to_hr_qp_type(int qp_type);
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
...@@ -1023,8 +1300,10 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db); ...@@ -1023,8 +1300,10 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
int hns_roce_init(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev); void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_register_sysfs(struct hns_roce_dev *hr_dev);
void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev);
#endif /* _HNS_ROCE_DEVICE_H */ #endif /* _HNS_ROCE_DEVICE_H */
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#include "roce_k_compat.h"
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include "hns_roce_device.h" #include "hns_roce_device.h"
...@@ -45,8 +46,13 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) ...@@ -45,8 +46,13 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
(hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) || (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
(hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) || (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
(hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) || (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
(hr_dev->caps.scc_ctx_hop_num && type == HEM_TYPE_SCC_CTX) ||
(hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
(hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
(hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) || (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
(hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT)) (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
(hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
(hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX))
return true; return true;
return false; return false;
...@@ -123,6 +129,30 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -123,6 +129,30 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
mhop->ba_l0_num = hr_dev->caps.cqc_bt_num; mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
mhop->hop_num = hr_dev->caps.cqc_hop_num; mhop->hop_num = hr_dev->caps.cqc_hop_num;
break; break;
case HEM_TYPE_SCC_CTX:
mhop->buf_chunk_size = 1 << (hr_dev->caps.scc_ctx_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.scc_ctx_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = hr_dev->caps.scc_ctx_bt_num;
mhop->hop_num = hr_dev->caps.scc_ctx_hop_num;
break;
case HEM_TYPE_QPC_TIMER:
mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
break;
case HEM_TYPE_CQC_TIMER:
mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
case HEM_TYPE_SRQC: case HEM_TYPE_SRQC:
mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
...@@ -147,6 +177,22 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -147,6 +177,22 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
mhop->ba_l0_num = mhop->bt_chunk_size / 8; mhop->ba_l0_num = mhop->bt_chunk_size / 8;
mhop->hop_num = hr_dev->caps.cqe_hop_num; mhop->hop_num = hr_dev->caps.cqe_hop_num;
break; break;
case HEM_TYPE_SRQWQE:
mhop->buf_chunk_size = 1 << (hr_dev->caps.srqwqe_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8;
mhop->hop_num = hr_dev->caps.srqwqe_hop_num;
break;
case HEM_TYPE_IDX:
mhop->buf_chunk_size = 1 << (hr_dev->caps.idx_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8;
mhop->hop_num = hr_dev->caps.idx_hop_num;
break;
default: default:
dev_err(dev, "Table %d not support multi-hop addressing!\n", dev_err(dev, "Table %d not support multi-hop addressing!\n",
table->type); table->type);
...@@ -157,7 +203,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -157,7 +203,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
return 0; return 0;
/* /*
* QPC/MTPT/CQC/SRQC alloc hem for buffer pages. * QPC/MTPT/CQC/SRQC/SCC_CTX alloc hem for buffer pages.
* MTT/CQE alloc hem for bt pages. * MTT/CQE alloc hem for bt pages.
*/ */
bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
...@@ -468,7 +514,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, ...@@ -468,7 +514,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
} }
/* /*
* alloc buffer space chunk for QPC/MTPT/CQC/SRQC. * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCC_CTX.
* alloc bt space chunk for MTT/CQE. * alloc bt space chunk for MTT/CQE.
*/ */
size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size; size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
...@@ -575,6 +621,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev, ...@@ -575,6 +621,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
mutex_unlock(&table->mutex); mutex_unlock(&table->mutex);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(hns_roce_table_get);
static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, struct hns_roce_hem_table *table,
...@@ -640,7 +687,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, ...@@ -640,7 +687,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
} }
/* /*
* free buffer space chunk for QPC/MTPT/CQC/SRQC. * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCC_CTX.
* free bt space chunk for MTT/CQE. * free bt space chunk for MTT/CQE.
*/ */
hns_roce_free_hem(hr_dev, table->hem[hem_idx]); hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
...@@ -717,6 +764,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, ...@@ -717,6 +764,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
mutex_unlock(&table->mutex); mutex_unlock(&table->mutex);
} }
EXPORT_SYMBOL_GPL(hns_roce_table_put);
void *hns_roce_table_find(struct hns_roce_dev *hr_dev, void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, struct hns_roce_hem_table *table,
...@@ -886,6 +934,30 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -886,6 +934,30 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
num_bt_l0 = hr_dev->caps.cqc_bt_num; num_bt_l0 = hr_dev->caps.cqc_bt_num;
hop_num = hr_dev->caps.cqc_hop_num; hop_num = hr_dev->caps.cqc_hop_num;
break; break;
case HEM_TYPE_SCC_CTX:
buf_chunk_size = 1 << (hr_dev->caps.scc_ctx_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.scc_ctx_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.scc_ctx_bt_num;
hop_num = hr_dev->caps.scc_ctx_hop_num;
break;
case HEM_TYPE_QPC_TIMER:
buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
hop_num = hr_dev->caps.qpc_timer_hop_num;
break;
case HEM_TYPE_CQC_TIMER:
buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
case HEM_TYPE_SRQC: case HEM_TYPE_SRQC:
buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
...@@ -906,6 +978,18 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -906,6 +978,18 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
bt_chunk_size = buf_chunk_size; bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.cqe_hop_num; hop_num = hr_dev->caps.cqe_hop_num;
break; break;
case HEM_TYPE_SRQWQE:
buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.srqwqe_hop_num;
break;
case HEM_TYPE_IDX:
buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.idx_hop_num;
break;
default: default:
dev_err(dev, dev_err(dev,
"Table %d not support to init hem table here!\n", "Table %d not support to init hem table here!\n",
...@@ -1041,7 +1125,25 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, ...@@ -1041,7 +1125,25 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
{ {
if ((hr_dev->caps.num_idx_segs))
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_idx_table);
if (hr_dev->caps.num_srqwqe_segs)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_srqwqe_table);
if (hr_dev->caps.srqc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->srq_table.table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
if (hr_dev->caps.qpc_timer_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qpc_timer_table.table);
if (hr_dev->caps.cqc_timer_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->cqc_timer_table.table);
if (hr_dev->caps.scc_ctx_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.scc_ctx_table);
if (hr_dev->caps.trrl_entry_sz) if (hr_dev->caps.trrl_entry_sz)
hns_roce_cleanup_hem_table(hr_dev, hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.trrl_table); &hr_dev->qp_table.trrl_table);
......
...@@ -44,10 +44,15 @@ enum { ...@@ -44,10 +44,15 @@ enum {
HEM_TYPE_MTPT, HEM_TYPE_MTPT,
HEM_TYPE_CQC, HEM_TYPE_CQC,
HEM_TYPE_SRQC, HEM_TYPE_SRQC,
HEM_TYPE_SCC_CTX,
HEM_TYPE_QPC_TIMER,
HEM_TYPE_CQC_TIMER,
/* UNMAP HEM */ /* UNMAP HEM */
HEM_TYPE_MTT, HEM_TYPE_MTT,
HEM_TYPE_CQE, HEM_TYPE_CQE,
HEM_TYPE_SRQWQE,
HEM_TYPE_IDX,
HEM_TYPE_IRRL, HEM_TYPE_IRRL,
HEM_TYPE_TRRL, HEM_TYPE_TRRL,
}; };
......
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2016-2017 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/acpi.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <net/addrconf.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_umem.h>
#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_v2_mpt_entry *mpt_ctx;
struct hns_roce_cmd_mailbox *mailbox;
u64 bt0_ba = 0;
u64 bt1_ba = 0;
int *mpt;
int ret;
int i;
char *buff;
int key = hr_dev->hr_stat.key;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0,
HNS_ROCE_CMD_READ_MPT_BT0,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba));
else
goto err_cmd;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0,
HNS_ROCE_CMD_READ_MPT_BT1,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(&bt1_ba, mailbox->buf, sizeof(bt1_ba));
else
goto err_cmd;
mpt_ctx = kzalloc(sizeof(*mpt_ctx), GFP_KERNEL);
if (!mpt_ctx) {
ret = -ENOMEM;
goto err_cmd;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0,
HNS_ROCE_CMD_QUERY_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(mpt_ctx, mailbox->buf, sizeof(*mpt_ctx));
else
goto err_mailbox;
*desc += sprintf(buff + *desc, "MPT(0x%x) BT0: 0x%llx\n", key, bt0_ba);
*desc += sprintf(buff + *desc, "MPT(0x%x) BT1: 0x%llx\n", key, bt1_ba);
mpt = (int *)mpt_ctx;
for (i = 0; i < (sizeof(*mpt_ctx) >> 2); i += 8) {
*desc += sprintf(buff + *desc,
"MPT(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
key, *mpt, *(mpt + 1), *(mpt + 2),
*(mpt + 3), *(mpt + 4), *(mpt + 5),
*(mpt + 6), *(mpt + 7));
mpt += 8;
}
memcpy(buf, buff, *desc);
err_mailbox:
kfree(mpt_ctx);
err_cmd:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
kfree(buff);
return ret;
}
int hns_roce_v2_query_srqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_srq_context *srq_context;
u64 bt0_ba = 0;
u64 bt1_ba = 0;
int *srqc;
int ret;
int i = 0;
char *buff;
int srqn = hr_dev->hr_stat.srqn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srqn, 0,
HNS_ROCE_CMD_READ_SRQC_BT0,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba));
else
goto err_cmd;
srq_context = kzalloc(sizeof(*srq_context), GFP_KERNEL);
if (!srq_context) {
ret = -ENOMEM;
goto err_cmd;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srqn, 0,
HNS_ROCE_CMD_QUERY_SRQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
else
goto err_mailbox;
*desc += sprintf(buff + *desc,
"SRQC(0x%x) BT0: 0x%llx\n", srqn, bt0_ba);
*desc += sprintf(buff + *desc,
"SRQC(0x%x) BT1: 0x%llx\n", srqn, bt1_ba);
srqc = (int *)srq_context;
for (i = 0; i < (sizeof(*srq_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc,
"SRQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
srqn, *srqc, *(srqc + 1), *(srqc + 2),
*(srqc + 3), *(srqc + 4), *(srqc + 5),
*(srqc + 6), *(srqc + 7));
srqc += 8;
}
memcpy(buf, buff, *desc);
err_mailbox:
kfree(srq_context);
err_cmd:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
kfree(buff);
return ret;
}
int hns_roce_v2_query_qpc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_v2_qp_context *qp_context;
u64 bt0_ba = 0;
u64 bt1_ba = 0;
int *qpc;
int ret;
int i = 0;
char *buff;
int qpn = hr_dev->hr_stat.qpn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0,
HNS_ROCE_CMD_READ_QPC_BT0,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba));
else
goto err_cmd;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0,
HNS_ROCE_CMD_READ_QPC_BT1,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(&bt1_ba, mailbox->buf, sizeof(bt1_ba));
else
goto err_cmd;
qp_context = kzalloc(sizeof(*qp_context), GFP_KERNEL);
if (!qp_context) {
ret = -ENOMEM;
goto err_cmd;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0,
HNS_ROCE_CMD_QUERY_QPC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(qp_context, mailbox->buf, sizeof(*qp_context));
else
goto err_mailbox;
*desc += sprintf(buff + *desc, "QPC(0x%x) BT0: 0x%llx\n", qpn, bt0_ba);
*desc += sprintf(buff + *desc, "QPC(0x%x) BT1: 0x%llx\n", qpn, bt1_ba);
qpc = (int *)qp_context;
for (i = 0; i < (sizeof(*qp_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc,
"QPC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
qpn, *qpc, *(qpc + 1), *(qpc + 2),
*(qpc + 3), *(qpc + 4), *(qpc + 5),
*(qpc + 6), *(qpc + 7));
qpc += 8;
}
memcpy(buf, buff, *desc);
err_mailbox:
kfree(qp_context);
err_cmd:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
kfree(buff);
return ret;
}
int hns_roce_v2_query_aeqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_eq_context *eq_context;
int *aeqc;
int ret;
int i = 0;
char *buff;
int aeqn;
aeqn = hr_dev->hr_stat.aeqn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
goto err_aeqc_buff;
}
eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL);
if (!eq_context) {
ret = -ENOMEM;
goto err_context;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, aeqn, 0,
HNS_ROCE_CMD_QUERY_AEQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(eq_context, mailbox->buf, sizeof(*eq_context));
else
goto err_mailbox;
aeqc = (int *)eq_context;
for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc,
"AEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
aeqn, *aeqc, *(aeqc + 1), *(aeqc + 2),
*(aeqc + 3), *(aeqc + 4), *(aeqc + 5),
*(aeqc + 6), *(aeqc + 7));
aeqc += 8;
}
memcpy(buf, buff, *desc);
err_mailbox:
kfree(eq_context);
err_context:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_aeqc_buff:
kfree(buff);
return ret;
}
#define CMD_NUM_QUERY_PKT_CNT (8)
int hns_roce_v2_query_pkt_stat(struct hns_roce_dev *hr_dev,
char *buf, int *buff_size)
{
struct hns_roce_cmq_desc desc[CMD_NUM_QUERY_PKT_CNT] = { {0} };
struct rdfx_query_pkt_cnt *resp_query[CMD_NUM_QUERY_PKT_CNT];
struct hns_roce_cmq_desc desc_cqe = {0};
struct rdfx_query_cqe_cnt *resp_cqe =
(struct rdfx_query_cqe_cnt *)desc_cqe.data;
struct hns_roce_cmq_desc desc_cnp_rx = {0};
struct rdfx_query_cnp_rx_cnt *resp_cnp_rx =
(struct rdfx_query_cnp_rx_cnt *)desc_cnp_rx.data;
struct hns_roce_cmq_desc desc_cnp_tx = {0};
struct rdfx_query_cnp_tx_cnt *resp_cnp_tx =
(struct rdfx_query_cnp_tx_cnt *)desc_cnp_tx.data;
int status;
int i;
char *buff;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
for (i = 0; i < CMD_NUM_QUERY_PKT_CNT; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i],
HNS_ROCE_OPC_QUEYR_PKT_CNT, true);
if (i < (CMD_NUM_QUERY_PKT_CNT - 1))
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
resp_query[i] = (struct rdfx_query_pkt_cnt *)desc[i].data;
}
status = hns_roce_cmq_send(hr_dev, desc, CMD_NUM_QUERY_PKT_CNT);
if (status)
return status;
hns_roce_cmq_setup_basic_desc(&desc_cqe,
HNS_ROCE_OPC_QUEYR_CQE_CNT, true);
status = hns_roce_cmq_send(hr_dev, &desc_cqe, 1);
if (status)
return status;
if (hr_dev->pci_dev->revision == 0x21) {
hns_roce_cmq_setup_basic_desc(&desc_cnp_rx,
HNS_ROCE_OPC_QUEYR_CNP_RX_CNT, true);
status = hns_roce_cmq_send(hr_dev, &desc_cnp_rx, 1);
if (status)
return status;
hns_roce_cmq_setup_basic_desc(&desc_cnp_tx,
HNS_ROCE_OPC_QUEYR_CNP_TX_CNT, true);
status = hns_roce_cmq_send(hr_dev, &desc_cnp_tx, 1);
if (status)
return status;
}
*buff_size += sprintf(buff + *buff_size,
"RX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->rc_pkt_num, resp_query[1]->rc_pkt_num,
resp_query[2]->rc_pkt_num, resp_query[3]->rc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"RX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->uc_pkt_num, resp_query[1]->uc_pkt_num,
resp_query[2]->uc_pkt_num, resp_query[3]->uc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"RX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->ud_pkt_num, resp_query[1]->ud_pkt_num,
resp_query[2]->ud_pkt_num, resp_query[3]->ud_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"RX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->xrc_pkt_num, resp_query[1]->xrc_pkt_num,
resp_query[2]->xrc_pkt_num, resp_query[3]->xrc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"RX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->total_pkt_num, resp_query[1]->total_pkt_num,
resp_query[2]->total_pkt_num, resp_query[3]->total_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"RX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->error_pkt_num, resp_query[1]->error_pkt_num,
resp_query[2]->error_pkt_num, resp_query[3]->error_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->rc_pkt_num, resp_query[5]->rc_pkt_num,
resp_query[6]->rc_pkt_num, resp_query[7]->rc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->uc_pkt_num, resp_query[5]->uc_pkt_num,
resp_query[6]->uc_pkt_num, resp_query[7]->uc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->ud_pkt_num, resp_query[5]->ud_pkt_num,
resp_query[6]->ud_pkt_num, resp_query[7]->ud_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->xrc_pkt_num, resp_query[5]->xrc_pkt_num,
resp_query[6]->xrc_pkt_num, resp_query[7]->xrc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->total_pkt_num, resp_query[5]->total_pkt_num,
resp_query[6]->total_pkt_num, resp_query[7]->total_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->error_pkt_num, resp_query[5]->error_pkt_num,
resp_query[6]->error_pkt_num, resp_query[7]->error_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"CQE : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_cqe->port0_cqe, resp_cqe->port1_cqe,
resp_cqe->port2_cqe, resp_cqe->port3_cqe);
*buff_size += sprintf(buff + *buff_size,
"CNP RX : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_cnp_rx->port0_cnp_rx, resp_cnp_rx->port1_cnp_rx,
resp_cnp_rx->port2_cnp_rx, resp_cnp_rx->port3_cnp_rx);
*buff_size += sprintf(buff + *buff_size,
"CNP TX : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_cnp_tx->port0_cnp_tx, resp_cnp_tx->port1_cnp_tx,
resp_cnp_tx->port2_cnp_tx, resp_cnp_tx->port3_cnp_tx);
memcpy(buf, buff, *buff_size);
kfree(buff);
return status;
}
int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_eq_context *eq_context;
int *ceqc;
int ret;
int i = 0;
char *buff;
int ceqn = hr_dev->hr_stat.ceqn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
goto err_ceqc_buff;
}
eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL);
if (!eq_context) {
ret = -ENOMEM;
goto err_context;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, ceqn, 0,
HNS_ROCE_CMD_QUERY_CEQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(eq_context, mailbox->buf, sizeof(*eq_context));
else
goto err_mailbox;
ceqc = (int *)eq_context;
for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc,
"CEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
ceqn, *ceqc, *(ceqc + 1), *(ceqc + 2),
*(ceqc + 3), *(ceqc + 4), *(ceqc + 5),
*(ceqc + 6), *(ceqc + 7));
ceqc += 8;
}
memcpy(buf, buff, *desc);
err_mailbox:
kfree(eq_context);
err_context:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_ceqc_buff:
kfree(buff);
return ret;
}
int hns_roce_v2_query_cmd_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_cmq_desc desc_cnt;
struct hns_roce_query_mbdb_cnt *resp_cnt =
(struct hns_roce_query_mbdb_cnt *)desc_cnt.data;
struct hns_roce_cmq_desc desc_dfx;
int status;
char *buff;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
hns_roce_cmq_setup_basic_desc(&desc_cnt,
HNS_ROCE_OPC_QUEYR_MBDB_CNT, true);
status = hns_roce_cmq_send(hr_dev, &desc_cnt, 1);
if (status)
return status;
hns_roce_cmq_setup_basic_desc(&desc_dfx,
HNS_ROCE_OPC_QUEYR_MDB_DFX, true);
status = hns_roce_cmq_send(hr_dev, &desc_dfx, 1);
if (status)
return status;
*desc += sprintf(buff + *desc, "MB ISSUE CNT : 0x%08x\n",
resp_cnt->mailbox_issue_cnt);
*desc += sprintf(buff + *desc, "MB EXEC CNT : 0x%08x\n",
resp_cnt->mailbox_exe_cnt);
*desc += sprintf(buff + *desc, "DB ISSUE CNT : 0x%08x\n",
resp_cnt->doorbell_issue_cnt);
*desc += sprintf(buff + *desc, "DB EXEC CNT : 0x%08x\n",
resp_cnt->doorbell_exe_cnt);
*desc += sprintf(buff + *desc, "EQDB ISSUE CNT : 0x%08x\n",
resp_cnt->eq_doorbell_issue_cnt);
*desc += sprintf(buff + *desc, "EQDB EXEC CNT : 0x%08x\n",
resp_cnt->eq_doorbell_exe_cnt);
memcpy(buf, buff, *desc);
kfree(buff);
return status;
}
int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev,
u64 *bt0_ba, u64 *bt1_ba, int cqn,
struct hns_roce_v2_cq_context *cq_context)
{
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
HNS_ROCE_CMD_READ_CQC_BT0,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(bt0_ba, mailbox->buf, sizeof(*bt0_ba));
else {
pr_err("QUERY CQ bt0 cmd process error\n");
goto out;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
HNS_ROCE_CMD_READ_CQC_BT1,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(bt1_ba, mailbox->buf, sizeof(*bt1_ba));
else {
pr_err("QUERY CQ bt1 cmd process error\n");
goto out;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
HNS_ROCE_CMD_QUERY_CQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
memcpy(cq_context, mailbox->buf, sizeof(*cq_context));
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_v2_cq_context *cq_context;
u64 bt0_ba = 0;
u64 bt1_ba = 0;
int *cqc;
int i, ret;
int cqn = hr_dev->hr_stat.cqn;
cq_context = kzalloc(sizeof(*cq_context), GFP_KERNEL);
if (!cq_context)
return -ENOMEM;
ret = hns_roce_v2_query_cqc(hr_dev, &bt0_ba, &bt1_ba, cqn, cq_context);
if (ret)
goto out;
*desc += sprintf(buf + *desc, "CQC(0x%x) BT0: 0x%llx\n", cqn, bt0_ba);
*desc += sprintf(buf + *desc, "CQC(0x%x) BT1: 0x%llx\n", cqn, bt1_ba);
cqc = (int *)cq_context;
for (i = 0; i < (sizeof(*cq_context) >> 2); i += 8) {
*desc += sprintf(buf + *desc,
"CQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
cqn, *cqc, *(cqc + 1), *(cqc + 2),
*(cqc + 3), *(cqc + 4), *(cqc + 5),
*(cqc + 6), *(cqc + 7));
cqc += 8;
}
out:
kfree(cq_context);
return ret;
}
int hns_roce_v2_modify_eq(struct hns_roce_dev *hr_dev,
u16 eq_count, u16 eq_period, u16 type)
{
struct hns_roce_eq *eq = hr_dev->eq_table.eq;
struct hns_roce_eq_context *eqc;
struct hns_roce_eq_context *eqc_mask;
struct hns_roce_cmd_mailbox *mailbox;
unsigned int eq_cmd;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
eqc = mailbox->buf;
eqc_mask = (struct hns_roce_eq_context *)mailbox->buf + 1;
memset(eqc_mask, 0xff, sizeof(*eqc_mask));
if (type == HNS_ROCE_EQ_MAXCNT_MASK) {
roce_set_field(eqc->byte_12,
HNS_ROCE_EQC_MAX_CNT_M,
HNS_ROCE_EQC_MAX_CNT_S, eq_count);
roce_set_field(eqc_mask->byte_12,
HNS_ROCE_EQC_MAX_CNT_M,
HNS_ROCE_EQC_MAX_CNT_S, 0);
} else if (type == HNS_ROCE_EQ_PERIOD_MASK) {
roce_set_field(eqc->byte_12,
HNS_ROCE_EQC_PERIOD_M,
HNS_ROCE_EQC_PERIOD_S, eq_period);
roce_set_field(eqc_mask->byte_12,
HNS_ROCE_EQC_PERIOD_M,
HNS_ROCE_EQC_PERIOD_S, 0);
}
eq_cmd = HNS_ROCE_CMD_MODIFY_CEQC;
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 1,
eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
dev_err(hr_dev->dev, "MODIFY EQ Failed to cmd mailbox.\n");
return ret;
}
...@@ -43,6 +43,8 @@ ...@@ -43,6 +43,8 @@
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
#include "hns_roce_hw_v1.h" #include "hns_roce_hw_v1.h"
static int loopback;
static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg) static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
{ {
dseg->lkey = cpu_to_le32(sg->lkey); dseg->lkey = cpu_to_le32(sg->lkey);
...@@ -58,9 +60,14 @@ static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr, ...@@ -58,9 +60,14 @@ static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
rseg->len = 0; rseg->len = 0;
} }
#ifdef CONFIG_KERNEL_419
static int hns_roce_v1_post_send(struct ib_qp *ibqp, static int hns_roce_v1_post_send(struct ib_qp *ibqp,
const struct ib_send_wr *wr, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr) const struct ib_send_wr **bad_wr)
#else
static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
#endif
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
...@@ -347,9 +354,14 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, ...@@ -347,9 +354,14 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
return ret; return ret;
} }
#ifdef CONFIG_KERNEL_419
static int hns_roce_v1_post_recv(struct ib_qp *ibqp, static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
const struct ib_recv_wr *wr, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr) const struct ib_recv_wr **bad_wr)
#else
static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
#endif
{ {
int ret = 0; int ret = 0;
int nreq = 0; int nreq = 0;
...@@ -999,8 +1011,12 @@ static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp) ...@@ -999,8 +1011,12 @@ static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
#ifdef CONFIG_KERNEL_419
struct ib_send_wr send_wr; struct ib_send_wr send_wr;
const struct ib_send_wr *bad_wr; const struct ib_send_wr *bad_wr;
#else
struct ib_send_wr send_wr, *bad_wr;
#endif
int ret; int ret;
memset(&send_wr, 0, sizeof(send_wr)); memset(&send_wr, 0, sizeof(send_wr));
...@@ -1398,8 +1414,8 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev) ...@@ -1398,8 +1414,8 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
if (!tptr_buf->buf) if (!tptr_buf->buf)
return -ENOMEM; return -ENOMEM;
hr_dev->tptr_dma_addr = tptr_buf->map; hr_dev->uar2_dma_addr = tptr_buf->map;
hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE; hr_dev->uar2_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
return 0; return 0;
} }
...@@ -1480,15 +1496,22 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset) ...@@ -1480,15 +1496,22 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
} }
fwnode = &dsaf_node->fwnode; fwnode = &dsaf_node->fwnode;
} else if (is_acpi_device_node(dev->fwnode)) { } else if (is_acpi_device_node(dev->fwnode)) {
#ifdef CONFIG_KERNEL_419
struct fwnode_reference_args args; struct fwnode_reference_args args;
#else
struct acpi_reference_args args;
#endif
ret = acpi_node_get_property_reference(dev->fwnode, ret = acpi_node_get_property_reference(dev->fwnode,
"dsaf-handle", 0, &args); "dsaf-handle", 0, &args);
if (ret) { if (ret) {
dev_err(dev, "could not find dsaf-handle\n"); dev_err(dev, "could not find dsaf-handle\n");
return ret; return ret;
} }
#ifdef CONFIG_KERNEL_419
fwnode = args.fwnode; fwnode = args.fwnode;
#else
fwnode = acpi_fwnode_handle(args.adev);
#endif
} else { } else {
dev_err(dev, "cannot read data from DT or ACPI\n"); dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO; return -ENXIO;
...@@ -1776,9 +1799,15 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev, ...@@ -1776,9 +1799,15 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
return 0; return 0;
} }
#ifdef CONFIG_KERNEL_419
static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
int gid_index, const union ib_gid *gid, int gid_index, const union ib_gid *gid,
const struct ib_gid_attr *attr) const struct ib_gid_attr *attr)
#else
static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
int gid_index, union ib_gid *gid,
const struct ib_gid_attr *attr)
#endif
{ {
u32 *p = NULL; u32 *p = NULL;
u8 gid_idx = 0; u8 gid_idx = 0;
...@@ -4917,14 +4946,24 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) ...@@ -4917,14 +4946,24 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
continue; continue;
pdev = of_find_device_by_node(net_node); pdev = of_find_device_by_node(net_node);
} else if (is_acpi_device_node(dev->fwnode)) { } else if (is_acpi_device_node(dev->fwnode)) {
#ifdef CONFIG_KERNEL_419
struct fwnode_reference_args args; struct fwnode_reference_args args;
#else
struct acpi_reference_args args;
struct fwnode_handle *fwnode;
#endif
ret = acpi_node_get_property_reference(dev->fwnode, ret = acpi_node_get_property_reference(dev->fwnode,
"eth-handle", "eth-handle",
i, &args); i, &args);
if (ret) if (ret)
continue; continue;
#ifdef CONFIG_KERNEL_419
pdev = hns_roce_find_pdev(args.fwnode); pdev = hns_roce_find_pdev(args.fwnode);
#else
fwnode = acpi_fwnode_handle(args.adev);
pdev = hns_roce_find_pdev(fwnode);
#endif
} else { } else {
dev_err(dev, "cannot read data from DT or ACPI\n"); dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO; return -ENXIO;
...@@ -4954,7 +4993,7 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) ...@@ -4954,7 +4993,7 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* cmd issue mode: 0 is poll, 1 is event */ /* cmd issue mode: 0 is poll, 1 is event */
hr_dev->cmd_mod = 1; hr_dev->cmd_mod = 1;
hr_dev->loop_idc = 0; hr_dev->loop_idc = loopback;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG; hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
...@@ -5067,3 +5106,5 @@ MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); ...@@ -5067,3 +5106,5 @@ MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>"); MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver"); MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
module_param(loopback, int, 0444);
MODULE_PARM_DESC(loopback, "default: 0");
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <net/addrconf.h> #include <net/addrconf.h>
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include "hnae3.h" #include "hnae3.h"
...@@ -46,6 +47,10 @@ ...@@ -46,6 +47,10 @@
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h" #include "hns_roce_hw_v2.h"
static int loopback;
static int dcqcn;
static int is_d;
static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
struct ib_sge *sg) struct ib_sge *sg)
{ {
...@@ -54,8 +59,77 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, ...@@ -54,8 +59,77 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length); dseg->len = cpu_to_le32(sg->length);
} }
#ifdef CONFIG_KERNEL_419
static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
struct hns_roce_wqe_frmr_seg *fseg,
const struct ib_reg_wr *wr)
#else
static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
struct hns_roce_wqe_frmr_seg *fseg,
struct ib_reg_wr *wr)
#endif
{
struct hns_roce_mr *mr = to_hr_mr(wr->mr);
/* use ib_access_flags */
roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_FRMR_WQE_BYTE_4_RR_S,
wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_FRMR_WQE_BYTE_4_RW_S,
wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_FRMR_WQE_BYTE_4_LW_S,
wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
/* Data structure reuse may lead to confusion */
rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
rc_sq_wqe->rkey = cpu_to_le32(wr->key);
rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
fseg->pbl_size = cpu_to_le32(mr->pbl_size);
roce_set_field(fseg->mode_buf_pg_sz,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_bit(fseg->mode_buf_pg_sz,
V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
}
#ifdef CONFIG_KERNEL_419
static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
const struct ib_atomic_wr *wr)
#else
static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
struct ib_atomic_wr *wr)
#endif
{
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
aseg->cmp_data = cpu_to_le64(wr->compare_add);
} else {
aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
aseg->cmp_data = 0;
}
}
#ifdef CONFIG_KERNEL_419
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
unsigned int *sge_ind) unsigned int *sge_ind)
#else
static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr,
unsigned int *sge_ind)
#endif
{ {
struct hns_roce_v2_wqe_data_seg *dseg; struct hns_roce_v2_wqe_data_seg *dseg;
struct ib_sge *sg; struct ib_sge *sg;
...@@ -101,10 +175,17 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, ...@@ -101,10 +175,17 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
} }
} }
#ifdef CONFIG_KERNEL_419
static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind, void *wqe, unsigned int *sge_ind,
const struct ib_send_wr **bad_wr) const struct ib_send_wr **bad_wr)
#else
static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind,
struct ib_send_wr **bad_wr)
#endif
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_v2_wqe_data_seg *dseg = wqe; struct hns_roce_v2_wqe_data_seg *dseg = wqe;
...@@ -121,6 +202,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -121,6 +202,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
} }
if (wr->opcode == IB_WR_RDMA_READ) { if (wr->opcode == IB_WR_RDMA_READ) {
*bad_wr = wr;
dev_err(hr_dev->dev, "Not support inline data!\n"); dev_err(hr_dev->dev, "Not support inline data!\n");
return -EINVAL; return -EINVAL;
} }
...@@ -170,15 +252,22 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -170,15 +252,22 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
int attr_mask, enum ib_qp_state cur_state, int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state); enum ib_qp_state new_state);
#ifdef CONFIG_KERNEL_419
static int hns_roce_v2_post_send(struct ib_qp *ibqp, static int hns_roce_v2_post_send(struct ib_qp *ibqp,
const struct ib_send_wr *wr, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr) const struct ib_send_wr **bad_wr)
#else
static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
#endif
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
struct hns_roce_v2_ud_send_wqe *ud_sq_wqe; struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe; struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp); struct hns_roce_qp *qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_wqe_frmr_seg *fseg;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db; struct hns_roce_v2_db sq_db;
struct ib_qp_attr attr; struct ib_qp_attr attr;
...@@ -187,17 +276,19 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, ...@@ -187,17 +276,19 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
unsigned long flags; unsigned long flags;
unsigned int ind; unsigned int ind;
void *wqe = NULL; void *wqe = NULL;
bool loopback;
int attr_mask; int attr_mask;
u32 tmp_len; u32 tmp_len;
int ret = 0; int ret = 0;
u8 *smac; u32 hr_op;
int nreq; int nreq;
int i; int i;
if (unlikely(ibqp->qp_type != IB_QPT_RC && if (unlikely(ibqp->qp_type != IB_QPT_RC &&
ibqp->qp_type != IB_QPT_UC &&
ibqp->qp_type != IB_QPT_GSI && ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_UD)) { ibqp->qp_type != IB_QPT_UD) &&
(ibqp->qp_type != IB_QPT_XRC_INI) &&
(ibqp->qp_type != IB_QPT_XRC_TGT)) {
dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type); dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
*bad_wr = wr; *bad_wr = wr;
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -238,7 +329,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, ...@@ -238,7 +329,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
tmp_len = 0; tmp_len = 0;
/* Corresponding to the QP type, wqe process separately */ /* Corresponding to the QP type, wqe process separately */
if (ibqp->qp_type == IB_QPT_GSI) { if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) {
ud_sq_wqe = wqe; ud_sq_wqe = wqe;
memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe)); memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
...@@ -259,13 +350,10 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, ...@@ -259,13 +350,10 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
ah->av.mac[5]); ah->av.mac[5]);
/* MAC loopback */ /* When lbi is set, the roce port support loopback */
smac = (u8 *)hr_dev->dev_addr[qp->port];
loopback = ether_addr_equal_unaligned(ah->av.mac,
smac) ? 1 : 0;
roce_set_bit(ud_sq_wqe->byte_40, roce_set_bit(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback); V2_UD_SEND_WQE_BYTE_40_LBI_S,
hr_dev->loop_idc);
roce_set_field(ud_sq_wqe->byte_4, roce_set_field(ud_sq_wqe->byte_4,
V2_UD_SEND_WQE_BYTE_4_OPCODE_M, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
...@@ -347,27 +435,30 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, ...@@ -347,27 +435,30 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
ah->av.sl_tclass_flowlabel & ah->av.sl_tclass_flowlabel &
HNS_ROCE_FLOW_LABEL_MASK); HNS_ROCE_FLOW_LABEL_MASK);
roce_set_field(ud_sq_wqe->byte_40, roce_set_field(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_SL_M, V2_UD_SEND_WQE_BYTE_40_SL_M,
V2_UD_SEND_WQE_BYTE_40_SL_S, V2_UD_SEND_WQE_BYTE_40_SL_S,
le32_to_cpu(ah->av.sl_tclass_flowlabel) >> (le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
HNS_ROCE_SL_SHIFT); HNS_ROCE_SL_SHIFT) & 0x7);
roce_set_field(ud_sq_wqe->byte_40, roce_set_field(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_PORTN_M, V2_UD_SEND_WQE_BYTE_40_PORTN_M,
V2_UD_SEND_WQE_BYTE_40_PORTN_S, V2_UD_SEND_WQE_BYTE_40_PORTN_S,
qp->port); qp->port);
roce_set_bit(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
ah->av.vlan_en ? 1 : 0);
roce_set_field(ud_sq_wqe->byte_48, roce_set_field(ud_sq_wqe->byte_48,
V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
hns_get_gid_index(hr_dev, qp->phy_port, ah->av.gid_index);
ah->av.gid_index));
memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
GID_LEN_V2); GID_LEN_V2);
set_extend_sge(qp, wr, &sge_ind); set_extend_sge(qp, wr, &sge_ind);
ind++; ind++;
} else if (ibqp->qp_type == IB_QPT_RC) { } else if (ibqp->qp_type == IB_QPT_RC ||
ibqp->qp_type == IB_QPT_UC) {
rc_sq_wqe = wqe; rc_sq_wqe = wqe;
memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe)); memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
for (i = 0; i < wr->num_sge; i++) for (i = 0; i < wr->num_sge; i++)
...@@ -406,99 +497,98 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, ...@@ -406,99 +497,98 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_bit(rc_sq_wqe->byte_4, roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit); V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
switch (wr->opcode) { switch (wr->opcode) {
case IB_WR_RDMA_READ: case IB_WR_RDMA_READ:
roce_set_field(rc_sq_wqe->byte_4, hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_RDMA_READ);
rc_sq_wqe->rkey = rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey); cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr); cpu_to_le64(rdma_wr(wr)->remote_addr);
break; break;
case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE:
roce_set_field(rc_sq_wqe->byte_4, hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
rc_sq_wqe->rkey = rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey); cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr); cpu_to_le64(rdma_wr(wr)->remote_addr);
break; break;
case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM:
roce_set_field(rc_sq_wqe->byte_4, hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
rc_sq_wqe->rkey = rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey); cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr); cpu_to_le64(rdma_wr(wr)->remote_addr);
break; break;
case IB_WR_SEND: case IB_WR_SEND:
roce_set_field(rc_sq_wqe->byte_4, hr_op = HNS_ROCE_V2_WQE_OP_SEND;
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND);
break; break;
case IB_WR_SEND_WITH_INV: case IB_WR_SEND_WITH_INV:
roce_set_field(rc_sq_wqe->byte_4, hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
break; break;
case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_IMM:
roce_set_field(rc_sq_wqe->byte_4, hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
break; break;
case IB_WR_LOCAL_INV: case IB_WR_LOCAL_INV:
roce_set_field(rc_sq_wqe->byte_4, hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
V2_RC_SEND_WQE_BYTE_4_OPCODE_M, roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
HNS_ROCE_V2_WQE_OP_LOCAL_INV); rc_sq_wqe->inv_key =
cpu_to_le32(wr->ex.invalidate_rkey);
break;
case IB_WR_REG_MR:
hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
fseg = wqe;
set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
break; break;
case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_CMP_AND_SWP:
roce_set_field(rc_sq_wqe->byte_4, hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
V2_RC_SEND_WQE_BYTE_4_OPCODE_M, rc_sq_wqe->rkey =
V2_RC_SEND_WQE_BYTE_4_OPCODE_S, cpu_to_le32(atomic_wr(wr)->rkey);
HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP); rc_sq_wqe->va =
cpu_to_le64(atomic_wr(wr)->remote_addr);
break; break;
case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_ATOMIC_FETCH_AND_ADD:
roce_set_field(rc_sq_wqe->byte_4, hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
V2_RC_SEND_WQE_BYTE_4_OPCODE_M, rc_sq_wqe->rkey =
V2_RC_SEND_WQE_BYTE_4_OPCODE_S, cpu_to_le32(atomic_wr(wr)->rkey);
HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD); rc_sq_wqe->va =
cpu_to_le64(atomic_wr(wr)->remote_addr);
break; break;
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
roce_set_field(rc_sq_wqe->byte_4, hr_op =
V2_RC_SEND_WQE_BYTE_4_OPCODE_M, HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
break; break;
case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
roce_set_field(rc_sq_wqe->byte_4, hr_op =
V2_RC_SEND_WQE_BYTE_4_OPCODE_M, HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
break; break;
default: default:
roce_set_field(rc_sq_wqe->byte_4, hr_op = HNS_ROCE_V2_WQE_OP_MASK;
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_MASK);
break; break;
} }
wqe += sizeof(struct hns_roce_v2_rc_send_wqe); roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
dseg = wqe;
set_data_seg_v2(dseg, wr->sg_list);
wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
set_atomic_seg(wqe, atomic_wr(wr));
roce_set_field(rc_sq_wqe->byte_16,
V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
wr->num_sge);
} else if (wr->opcode != IB_WR_REG_MR) {
ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
wqe, &sge_ind, bad_wr);
if (ret)
goto out;
}
ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
&sge_ind, bad_wr);
if (ret)
goto out;
ind++; ind++;
} else { } else {
dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type); dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
...@@ -527,7 +617,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, ...@@ -527,7 +617,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M, roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
V2_DB_PARAMETER_SL_S, qp->sl); V2_DB_PARAMETER_SL_S, qp->sl);
hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l); hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
qp->sq_next_wqe = ind; qp->sq_next_wqe = ind;
qp->next_sge = sge_ind; qp->next_sge = sge_ind;
...@@ -551,9 +641,14 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, ...@@ -551,9 +641,14 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
return ret; return ret;
} }
#ifdef CONFIG_KERNEL_419
static int hns_roce_v2_post_recv(struct ib_qp *ibqp, static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
const struct ib_recv_wr *wr, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr) const struct ib_recv_wr **bad_wr)
#else
static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
#endif
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
...@@ -652,6 +747,127 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, ...@@ -652,6 +747,127 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
return ret; return ret;
} }
static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
/* When hardware reset has been completed once or more, we should stop
* sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
* function, we should exit with error. If now at HNAE3_INIT_CLIENT
* stage of soft reset process, we should exit with error, and then
* HNAE3_INIT_CLIENT related process can rollback the operation like
* notifing hardware to free resources, HNAE3_INIT_CLIENT related
* process will exit with error to notify NIC driver to reschedule soft
* reset process once again.
*/
hr_dev->is_reset = true;
hr_dev->dis_db = true;
if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
instance_stage == HNS_ROCE_STATE_INIT)
return CMD_RST_PRC_EBUSY;
return CMD_RST_PRC_SUCCESS;
}
static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long end;
/* When hardware reset is detected, we should stop sending mailbox&cmq&
* doorbell to hardware, and wait until hardware reset finished. If now
* in .init_instance() function, we should exit with error. If now at
* HNAE3_INIT_CLIENT stage of soft reset process, we should exit with
* error, and then HNAE3_INIT_CLIENT related process can rollback the
* operation like notifing hardware to free resources, HNAE3_INIT_CLIENT
* related process will exit with error to notify NIC driver to
* reschedule soft reset process once again.
*/
hr_dev->dis_db = true;
end = msecs_to_jiffies(HNS_ROCE_V2_HW_RST_TIMEOUT) + jiffies;
while (ops->get_hw_reset_stat(handle) && time_before(jiffies, end))
udelay(1);
if (!ops->get_hw_reset_stat(handle))
hr_dev->is_reset = true;
else
dev_warn(hr_dev->dev, "hw_resetting!\n");
if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
instance_stage == HNS_ROCE_STATE_INIT)
return CMD_RST_PRC_EBUSY;
return CMD_RST_PRC_SUCCESS;
}
static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long end;
/* When software reset is detected at .init_instance() function, we
* should stop sending mailbox&cmq&doorbell to hardware, and
* wait until hardware reset finished, we should exit with error.
*/
hr_dev->dis_db = true;
end = msecs_to_jiffies(HNS_ROCE_V2_HW_RST_TIMEOUT) + jiffies;
while (ops->ae_dev_reset_cnt(handle) == hr_dev->reset_cnt &&
time_before(jiffies, end))
udelay(1);
if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
hr_dev->is_reset = true;
else
dev_warn(hr_dev->dev, "reset_cnt no change!\n");
return CMD_RST_PRC_EBUSY;
}
static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long instance_stage; /* the current instance stage */
unsigned long reset_stage; /* the current reset stage */
unsigned long reset_cnt;
bool sw_resetting;
bool hw_resetting;
if (hr_dev->is_reset)
return CMD_RST_PRC_SUCCESS;
/* Get information about reset from NIC driver or RoCE driver itself,
* the meaning of the following variables from NIC driver are described
* as below:
* reset_cnt -- The count value of completed hardware reset.
* hw_resetting -- Whether hardware device is resetting now.
* sw_resetting -- Whether NIC's software reset process is running now.
*/
instance_stage = handle->rinfo.instance_state;
reset_stage = handle->rinfo.reset_state;
reset_cnt = ops->ae_dev_reset_cnt(handle);
hw_resetting = ops->get_hw_reset_stat(handle);
sw_resetting = ops->ae_dev_resetting(handle);
if (reset_cnt != hr_dev->reset_cnt)
return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
reset_stage);
else if (hw_resetting)
return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
reset_stage);
else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
return hns_roce_v2_cmd_sw_resetting(hr_dev);
return 0;
}
static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring) static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
{ {
int ntu = ring->next_to_use; int ntu = ring->next_to_use;
...@@ -786,7 +1002,7 @@ static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev) ...@@ -786,7 +1002,7 @@ static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq); hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
} }
static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
enum hns_roce_opcode_type opcode, enum hns_roce_opcode_type opcode,
bool is_read) bool is_read)
{ {
...@@ -832,8 +1048,8 @@ static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev) ...@@ -832,8 +1048,8 @@ static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
return clean; return clean;
} }
static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num) struct hns_roce_cmq_desc *desc, int num)
{ {
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
...@@ -845,9 +1061,6 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, ...@@ -845,9 +1061,6 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
int ret = 0; int ret = 0;
int ntc; int ntc;
if (hr_dev->is_reset)
return 0;
spin_lock_bh(&csq->lock); spin_lock_bh(&csq->lock);
if (num > hns_roce_cmq_space(csq)) { if (num > hns_roce_cmq_space(csq)) {
...@@ -922,6 +1135,30 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, ...@@ -922,6 +1135,30 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
int retval;
int ret;
ret = hns_roce_v2_rst_process_cmd(hr_dev);
if (ret == CMD_RST_PRC_SUCCESS)
return 0;
if (ret == CMD_RST_PRC_EBUSY)
return ret;
ret = __hns_roce_cmq_send(hr_dev, desc, num);
if (ret) {
retval = hns_roce_v2_rst_process_cmd(hr_dev);
if (retval == CMD_RST_PRC_SUCCESS)
return 0;
else if (retval == CMD_RST_PRC_EBUSY)
return retval;
}
return ret;
}
static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_query_version *resp; struct hns_roce_query_version *resp;
...@@ -935,7 +1172,189 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) ...@@ -935,7 +1172,189 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
resp = (struct hns_roce_query_version *)desc.data; resp = (struct hns_roce_query_version *)desc.data;
hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version); hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id); hr_dev->vendor_id = hr_dev->pci_dev->vendor;
return 0;
}
static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long reset_cnt;
bool sw_resetting;
bool hw_resetting;
reset_cnt = ops->ae_dev_reset_cnt(handle);
hw_resetting = ops->get_hw_reset_stat(handle);
sw_resetting = ops->ae_dev_resetting(handle);
if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
return true;
return false;
}
static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
int flag)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long instance_stage;
unsigned long reset_cnt;
unsigned long end;
bool sw_resetting;
bool hw_resetting;
instance_stage = handle->rinfo.instance_state;
reset_cnt = ops->ae_dev_reset_cnt(handle);
hw_resetting = ops->get_hw_reset_stat(handle);
sw_resetting = ops->ae_dev_resetting(handle);
if (reset_cnt != hr_dev->reset_cnt) {
hr_dev->dis_db = true;
hr_dev->is_reset = true;
dev_warn(hr_dev->dev, "Func clear success after reset.\n");
} else if (hw_resetting) {
hr_dev->dis_db = true;
dev_warn(hr_dev->dev,
"Func clear is pending, device in resetting state.\n");
end = msecs_to_jiffies(HNS_ROCE_V2_HW_RST_TIMEOUT) + jiffies;
while (time_before(jiffies, end)) {
if (!ops->get_hw_reset_stat(handle)) {
hr_dev->is_reset = true;
dev_warn(hr_dev->dev,
"Func clear success after reset.\n");
return;
}
msleep(20);
}
dev_warn(hr_dev->dev, "Func clear failed.\n");
} else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
hr_dev->dis_db = true;
dev_warn(hr_dev->dev,
"Func clear is pending, device in resetting state.\n");
end = msecs_to_jiffies(HNS_ROCE_V2_HW_RST_TIMEOUT) + jiffies;
while (time_before(jiffies, end)) {
if (ops->ae_dev_reset_cnt(handle) !=
hr_dev->reset_cnt) {
hr_dev->is_reset = true;
dev_warn(hr_dev->dev,
"Func clear success after reset.\n");
return;
}
msleep(20);
}
dev_warn(hr_dev->dev, "Func clear failed.\n");
} else {
if (retval && !flag)
dev_warn(hr_dev->dev,
"Func clear read failed, ret = %d.\n", retval);
dev_warn(hr_dev->dev, "Func clear failed.\n");
}
}
static void hns_roce_query_func_num(struct hns_roce_dev *hr_dev)
{
int ret = 0;
struct hns_roce_cmq_desc desc;
struct hns_roce_pf_func_num *resp;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_VF_NUM, true);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
dev_err(hr_dev->dev, "Query vf count fail, ret = %d.\n",
ret);
return;
}
resp = (struct hns_roce_pf_func_num *)desc.data;
hr_dev->func_num = resp->pf_own_func_num;
}
static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id)
{
struct hns_roce_func_clear *resp;
struct hns_roce_cmq_desc desc;
unsigned long end;
bool fclr_write_fail_flag = false;
int ret = 0;
if (hns_roce_func_clr_chk_rst(hr_dev))
goto out;
resp = (struct hns_roce_func_clear *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
resp->rst_funcid_en = vf_id;
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
fclr_write_fail_flag = true;
dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
ret);
goto out;
}
end = msecs_to_jiffies(HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS) + jiffies;
msleep(40);
while (time_before(jiffies, end)) {
if (hns_roce_func_clr_chk_rst(hr_dev))
goto out;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
true);
resp->rst_funcid_en = vf_id;
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
msleep(20);
continue;
}
if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
if (vf_id == 0)
hr_dev->is_reset = true;
return;
}
}
out:
dev_err(hr_dev->dev, "Func clear read vf_id %d fail.\n", vf_id);
hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
}
static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
{
int i;
int vf_num = hr_dev->func_num - 1;
/* Clear vf first, then clear pf*/
for (i = vf_num; i >= 0; i--)
hns_roce_clear_func(hr_dev, i);
}
static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
{
struct hns_roce_query_fw_info *resp;
struct hns_roce_cmq_desc desc;
int ret;
hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
resp = (struct hns_roce_query_fw_info *)desc.data;
hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
return 0; return 0;
} }
...@@ -1001,17 +1420,85 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) ...@@ -1001,17 +1420,85 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num, hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
PF_RES_DATA_3_PF_SL_NUM_M, PF_RES_DATA_3_PF_SL_NUM_M,
PF_RES_DATA_3_PF_SL_NUM_S); PF_RES_DATA_3_PF_SL_NUM_S);
hr_dev->caps.scc_ctx_bt_num = roce_get_field(req_b->scc_ctx_bt_idx_num,
PF_RES_DATA_4_PF_SCC_CTX_BT_NUM_M,
PF_RES_DATA_4_PF_SCC_CTX_BT_NUM_S);
return 0;
}
static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc[2];
struct hns_roce_pf_timer_res_a *req_a;
int ret;
int i;
for (i = 0; i < 2; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i],
HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
true);
if (i == 0)
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
}
ret = hns_roce_cmq_send(hr_dev, desc, 2);
if (ret)
return ret;
req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
hr_dev->caps.qpc_timer_bt_num =
roce_get_field(req_a->qpc_timer_bt_idx_num,
PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
hr_dev->caps.cqc_timer_bt_num =
roce_get_field(req_a->cqc_timer_bt_idx_num,
PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
return 0; return 0;
} }
static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
int vf_id)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_vf_switch *swt;
int ret;
swt = (struct hns_roce_vf_switch *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
swt->rocee_sel |= cpu_to_le16(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
roce_set_field(swt->fun_id,
VF_SWITCH_DATA_FUN_ID_VF_ID_M,
VF_SWITCH_DATA_FUN_ID_VF_ID_S,
vf_id);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
desc.flag =
cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 1);
roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_cmq_desc desc[2]; struct hns_roce_cmq_desc desc[2];
struct hns_roce_vf_res_a *req_a; struct hns_roce_vf_res_a *req_a;
struct hns_roce_vf_res_b *req_b; struct hns_roce_vf_res_b *req_b;
int d;
int i; int i;
d = is_d;
req_a = (struct hns_roce_vf_res_a *)desc[0].data; req_a = (struct hns_roce_vf_res_a *)desc[0].data;
req_b = (struct hns_roce_vf_res_b *)desc[1].data; req_b = (struct hns_roce_vf_res_b *)desc[1].data;
memset(req_a, 0, sizeof(*req_a)); memset(req_a, 0, sizeof(*req_a));
...@@ -1032,7 +1519,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) ...@@ -1032,7 +1519,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
roce_set_field(req_a->vf_qpc_bt_idx_num, roce_set_field(req_a->vf_qpc_bt_idx_num,
VF_RES_A_DATA_1_VF_QPC_BT_NUM_M, VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
HNS_ROCE_VF_QPC_BT_NUM); HNS_ROCE_VF_QPC_BT_NUM(d));
roce_set_field(req_a->vf_srqc_bt_idx_num, roce_set_field(req_a->vf_srqc_bt_idx_num,
VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M, VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
...@@ -1040,7 +1527,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) ...@@ -1040,7 +1527,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
roce_set_field(req_a->vf_srqc_bt_idx_num, roce_set_field(req_a->vf_srqc_bt_idx_num,
VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M, VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S, VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
HNS_ROCE_VF_SRQC_BT_NUM); HNS_ROCE_VF_SRQC_BT_NUM(d));
roce_set_field(req_a->vf_cqc_bt_idx_num, roce_set_field(req_a->vf_cqc_bt_idx_num,
VF_RES_A_DATA_3_VF_CQC_BT_IDX_M, VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
...@@ -1048,7 +1535,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) ...@@ -1048,7 +1535,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
roce_set_field(req_a->vf_cqc_bt_idx_num, roce_set_field(req_a->vf_cqc_bt_idx_num,
VF_RES_A_DATA_3_VF_CQC_BT_NUM_M, VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
HNS_ROCE_VF_CQC_BT_NUM); HNS_ROCE_VF_CQC_BT_NUM(d));
roce_set_field(req_a->vf_mpt_bt_idx_num, roce_set_field(req_a->vf_mpt_bt_idx_num,
VF_RES_A_DATA_4_VF_MPT_BT_IDX_M, VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
...@@ -1056,7 +1543,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) ...@@ -1056,7 +1543,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
roce_set_field(req_a->vf_mpt_bt_idx_num, roce_set_field(req_a->vf_mpt_bt_idx_num,
VF_RES_A_DATA_4_VF_MPT_BT_NUM_M, VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
HNS_ROCE_VF_MPT_BT_NUM); HNS_ROCE_VF_MPT_BT_NUM(d));
roce_set_field(req_a->vf_eqc_bt_idx_num, roce_set_field(req_a->vf_eqc_bt_idx_num,
VF_RES_A_DATA_5_VF_EQC_IDX_M, VF_RES_A_DATA_5_VF_EQC_IDX_M,
...@@ -1064,7 +1551,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) ...@@ -1064,7 +1551,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
roce_set_field(req_a->vf_eqc_bt_idx_num, roce_set_field(req_a->vf_eqc_bt_idx_num,
VF_RES_A_DATA_5_VF_EQC_NUM_M, VF_RES_A_DATA_5_VF_EQC_NUM_M,
VF_RES_A_DATA_5_VF_EQC_NUM_S, VF_RES_A_DATA_5_VF_EQC_NUM_S,
HNS_ROCE_VF_EQC_NUM); HNS_ROCE_VF_EQC_NUM(d));
} else { } else {
roce_set_field(req_b->vf_smac_idx_num, roce_set_field(req_b->vf_smac_idx_num,
VF_RES_B_DATA_1_VF_SMAC_IDX_M, VF_RES_B_DATA_1_VF_SMAC_IDX_M,
...@@ -1072,7 +1559,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) ...@@ -1072,7 +1559,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
roce_set_field(req_b->vf_smac_idx_num, roce_set_field(req_b->vf_smac_idx_num,
VF_RES_B_DATA_1_VF_SMAC_NUM_M, VF_RES_B_DATA_1_VF_SMAC_NUM_M,
VF_RES_B_DATA_1_VF_SMAC_NUM_S, VF_RES_B_DATA_1_VF_SMAC_NUM_S,
HNS_ROCE_VF_SMAC_NUM); HNS_ROCE_VF_SMAC_NUM(d));
roce_set_field(req_b->vf_sgid_idx_num, roce_set_field(req_b->vf_sgid_idx_num,
VF_RES_B_DATA_2_VF_SGID_IDX_M, VF_RES_B_DATA_2_VF_SGID_IDX_M,
...@@ -1080,7 +1567,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) ...@@ -1080,7 +1567,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
roce_set_field(req_b->vf_sgid_idx_num, roce_set_field(req_b->vf_sgid_idx_num,
VF_RES_B_DATA_2_VF_SGID_NUM_M, VF_RES_B_DATA_2_VF_SGID_NUM_M,
VF_RES_B_DATA_2_VF_SGID_NUM_S, VF_RES_B_DATA_2_VF_SGID_NUM_S,
HNS_ROCE_VF_SGID_NUM); HNS_ROCE_VF_SGID_NUM(d));
roce_set_field(req_b->vf_qid_idx_sl_num, roce_set_field(req_b->vf_qid_idx_sl_num,
VF_RES_B_DATA_3_VF_QID_IDX_M, VF_RES_B_DATA_3_VF_QID_IDX_M,
...@@ -1101,6 +1588,7 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) ...@@ -1101,6 +1588,7 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
u8 qpc_hop_num = hr_dev->caps.qpc_hop_num; u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
u8 cqc_hop_num = hr_dev->caps.cqc_hop_num; u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
u8 mpt_hop_num = hr_dev->caps.mpt_hop_num; u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
u8 scc_ctx_hop_num = hr_dev->caps.scc_ctx_hop_num;
struct hns_roce_cfg_bt_attr *req; struct hns_roce_cfg_bt_attr *req;
struct hns_roce_cmq_desc desc; struct hns_roce_cmq_desc desc;
...@@ -1148,15 +1636,37 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) ...@@ -1148,15 +1636,37 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num); mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
roce_set_field(req->vf_scc_ctx_cfg,
CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BA_PGSZ_M,
CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BA_PGSZ_S,
hr_dev->caps.scc_ctx_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_scc_ctx_cfg,
CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BUF_PGSZ_S,
hr_dev->caps.scc_ctx_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_scc_ctx_cfg,
CFG_BT_ATTR_DATA_4_VF_SCC_CTX_HOPNUM_M,
CFG_BT_ATTR_DATA_4_VF_SCC_CTX_HOPNUM_S,
scc_ctx_hop_num ==
HNS_ROCE_HOP_NUM_0 ? 0 : scc_ctx_hop_num);
return hns_roce_cmq_send(hr_dev, &desc, 1); return hns_roce_cmq_send(hr_dev, &desc, 1);
} }
static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_caps *caps = &hr_dev->caps; struct hns_roce_caps *caps = &hr_dev->caps;
int d = is_d;
int ret; int ret;
ret = hns_roce_cmq_query_hw_info(hr_dev); ret = hns_roce_cmq_query_hw_info(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
ret);
return ret;
}
ret = hns_roce_query_fw_ver(hr_dev);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n", dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
ret); ret);
...@@ -1178,6 +1688,18 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1178,6 +1688,18 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret; return ret;
} }
hns_roce_query_func_num(hr_dev);
if (hr_dev->pci_dev->revision == 0x21) {
ret = hns_roce_query_pf_timer_resource(hr_dev);
if (ret) {
dev_err(hr_dev->dev,
"Query pf timer resource fail, ret = %d.\n",
ret);
return ret;
}
}
ret = hns_roce_alloc_vf_resource(hr_dev); ret = hns_roce_alloc_vf_resource(hr_dev);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n", dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
...@@ -1185,25 +1707,42 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1185,25 +1707,42 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret; return ret;
} }
hr_dev->vendor_part_id = 0; if (hr_dev->pci_dev->revision == 0x21) {
hr_dev->sys_image_guid = 0; ret = hns_roce_set_vf_switch_param(hr_dev, 0);
if (ret) {
dev_err(hr_dev->dev,
"Set function switch param fail, ret = %d.\n",
ret);
return ret;
}
}
hr_dev->vendor_part_id = hr_dev->pci_dev->device;
hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM; caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM; caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM; caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM; caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM; caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM; caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->max_srq_sg = HNS_ROCE_V2_MAX_SRQ_SGE_NUM;
caps->num_uars = HNS_ROCE_V2_UAR_NUM; caps->num_uars = HNS_ROCE_V2_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM; caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM; caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM; caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM(d);
caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM; caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM; caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS; caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS; caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM; caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA; caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA; caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ; caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
...@@ -1213,15 +1752,20 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1213,15 +1752,20 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ; caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ; caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ; caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ; caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
caps->idx_entry_sz = 4;
caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE; caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
caps->reserved_lkey = 0; caps->reserved_lkey = 0;
caps->reserved_pds = 0; caps->reserved_pds = 0;
caps->reserved_xrcds = 0;
caps->reserved_mrws = 1; caps->reserved_mrws = 1;
caps->reserved_uars = 0; caps->reserved_uars = 0;
caps->reserved_cqs = 0; caps->reserved_cqs = 0;
caps->reserved_srqs = 0;
caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
caps->qpc_ba_pg_sz = 0; caps->qpc_ba_pg_sz = 0;
caps->qpc_buf_pg_sz = 0; caps->qpc_buf_pg_sz = 0;
...@@ -1235,7 +1779,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1235,7 +1779,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->mpt_ba_pg_sz = 0; caps->mpt_ba_pg_sz = 0;
caps->mpt_buf_pg_sz = 0; caps->mpt_buf_pg_sz = 0;
caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
caps->pbl_ba_pg_sz = 0; caps->pbl_ba_pg_sz = 2;
caps->pbl_buf_pg_sz = 0; caps->pbl_buf_pg_sz = 0;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->mtt_ba_pg_sz = 0; caps->mtt_ba_pg_sz = 0;
...@@ -1244,6 +1788,12 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1244,6 +1788,12 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->cqe_ba_pg_sz = 0; caps->cqe_ba_pg_sz = 0;
caps->cqe_buf_pg_sz = 0; caps->cqe_buf_pg_sz = 0;
caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM; caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
caps->srqwqe_ba_pg_sz = 0;
caps->srqwqe_buf_pg_sz = 0;
caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
caps->idx_ba_pg_sz = 0;
caps->idx_buf_pg_sz = 0;
caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
caps->eqe_ba_pg_sz = 0; caps->eqe_ba_pg_sz = 0;
caps->eqe_buf_pg_sz = 0; caps->eqe_buf_pg_sz = 0;
caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM; caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
...@@ -1252,7 +1802,6 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1252,7 +1802,6 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR | caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 | HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
HNS_ROCE_CAP_FLAG_RQ_INLINE |
HNS_ROCE_CAP_FLAG_RECORD_DB | HNS_ROCE_CAP_FLAG_RECORD_DB |
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB; HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
caps->pkey_table_len[0] = 1; caps->pkey_table_len[0] = 1;
...@@ -1262,6 +1811,35 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1262,6 +1811,35 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->local_ca_ack_delay = 0; caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096; caps->max_mtu = IB_MTU_4096;
caps->max_srqs = HNS_ROCE_V2_MAX_SRQ;
caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
if (hr_dev->pci_dev->revision == 0x21) {
caps->flags |= (HNS_ROCE_CAP_FLAG_XRC | HNS_ROCE_CAP_FLAG_SRQ |
HNS_ROCE_CAP_FLAG_MW |
HNS_ROCE_CAP_FLAG_FRMR |
HNS_ROCE_CAP_FLAG_ATOMIC);
caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
caps->qpc_timer_ba_pg_sz = 0;
caps->qpc_timer_buf_pg_sz = 0;
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_ba_pg_sz = 0;
caps->cqc_timer_buf_pg_sz = 0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
if (dcqcn == 1) {
caps->flags |= HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
caps->scc_ctx_entry_sz = HNS_ROCE_V2_SCC_CTX_ENTRY_SZ;
caps->scc_ctx_ba_pg_sz = 0;
caps->scc_ctx_buf_pg_sz = 0;
caps->scc_ctx_hop_num = HNS_ROCE_SCC_CTX_HOP_NUM;
}
}
ret = hns_roce_v2_set_bt(hr_dev); ret = hns_roce_v2_set_bt(hr_dev);
if (ret) if (ret)
dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n", dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
...@@ -1458,16 +2036,51 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev, ...@@ -1458,16 +2036,51 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
link_tbl->table.map); link_tbl->table.map);
} }
static int hns_roce_v2_uar_init(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_buf_list *uar = &priv->uar;
struct device *dev = &hr_dev->pci_dev->dev;
uar->buf = dma_zalloc_coherent(dev, HNS_ROCE_V2_UAR_BUF_SIZE, &uar->map,
GFP_KERNEL);
if (!uar->buf)
return -ENOMEM;
hr_dev->uar2_dma_addr = uar->map;
hr_dev->uar2_size = HNS_ROCE_V2_UAR_BUF_SIZE;
return 0;
}
static void hns_roce_v2_uar_free(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_buf_list *uar = &priv->uar;
struct device *dev = &hr_dev->pci_dev->dev;
dma_free_coherent(dev, HNS_ROCE_V2_UAR_BUF_SIZE, uar->buf, uar->map);
}
static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_priv *priv = hr_dev->priv;
int qpc_count;
int cqc_count;
int ret; int ret;
int i;
ret = hns_roce_v2_uar_init(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "uar init failed, ret = %d.\n", ret);
return ret;
}
/* TSQ includes SQ doorbell and ack doorbell */ /* TSQ includes SQ doorbell and ack doorbell */
ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE); ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret); dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
return ret; goto err_tsq_init_failed;
} }
ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE); ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
...@@ -1476,11 +2089,46 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) ...@@ -1476,11 +2089,46 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
goto err_tpq_init_failed; goto err_tpq_init_failed;
} }
/* Alloc memory for QPC Timer buffer space chunk*/
for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
qpc_count++) {
ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table.table,
qpc_count);
if (ret) {
dev_err(hr_dev->dev, "QPC Timer get failed\n");
goto err_qpc_timer_failed;
}
}
/* Alloc memory for CQC Timer buffer space chunk*/
for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
cqc_count++) {
ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table.table,
cqc_count);
if (ret) {
dev_err(hr_dev->dev, "CQC Timer get failed\n");
goto err_cqc_timer_failed;
}
}
return 0; return 0;
err_cqc_timer_failed:
for (i = 0; i < cqc_count; i++)
hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table.table, i);
err_qpc_timer_failed:
for (i = 0; i < qpc_count; i++)
hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table.table, i);
hns_roce_free_link_table(hr_dev, &priv->tpq);
err_tpq_init_failed: err_tpq_init_failed:
hns_roce_free_link_table(hr_dev, &priv->tsq); hns_roce_free_link_table(hr_dev, &priv->tsq);
err_tsq_init_failed:
hns_roce_v2_uar_free(hr_dev);
return ret; return ret;
} }
...@@ -1488,34 +2136,70 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) ...@@ -1488,34 +2136,70 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_priv *priv = hr_dev->priv;
if (hr_dev->pci_dev->revision == 0x21)
hns_roce_function_clear(hr_dev);
hns_roce_free_link_table(hr_dev, &priv->tpq); hns_roce_free_link_table(hr_dev, &priv->tpq);
hns_roce_free_link_table(hr_dev, &priv->tsq); hns_roce_free_link_table(hr_dev, &priv->tsq);
hns_roce_v2_uar_free(hr_dev);
} }
static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev) static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
{ {
u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG); struct hns_roce_cmq_desc desc;
struct hns_roce_mbox_status *mb_st =
(struct hns_roce_mbox_status *)desc.data;
enum hns_roce_cmd_return_status status;
return status >> HNS_ROCE_HW_RUN_BIT_SHIFT; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
}
static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev) status = hns_roce_cmq_send(hr_dev, &desc, 1);
{ if (status)
u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG); return status;
return mb_st->mb_status_hw_run;
}
static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
{
u32 status = hns_roce_query_mbox_status(hr_dev);
return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
}
static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
{
u32 status = hns_roce_query_mbox_status(hr_dev);
return status & HNS_ROCE_HW_MB_STATUS_MASK; return status & HNS_ROCE_HW_MB_STATUS_MASK;
} }
static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u8 op_modifier,
u16 op, u16 token, int event)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
mb->in_param_l = cpu_to_le64(in_param);
mb->in_param_h = cpu_to_le64(in_param) >> 32;
mb->out_param_l = cpu_to_le64(out_param);
mb->out_param_h = cpu_to_le64(out_param) >> 32;
mb->cmd_tag = in_modifier << 8 | op;
mb->token_event_en = event << 16 | token;
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u8 op_modifier, u64 out_param, u32 in_modifier, u8 op_modifier,
u16 op, u16 token, int event) u16 op, u16 token, int event)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base +
ROCEE_VF_MB_CFG0_REG);
unsigned long end; unsigned long end;
u32 val0 = 0; int ret;
u32 val1 = 0;
end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies; end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
while (hns_roce_v2_cmd_pending(hr_dev)) { while (hns_roce_v2_cmd_pending(hr_dev)) {
...@@ -1527,27 +2211,12 @@ static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -1527,27 +2211,12 @@ static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
cond_resched(); cond_resched();
} }
roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK, ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier); op_modifier, op, token, event);
roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK, if (ret)
HNS_ROCE_VF_MB4_CMD_SHIFT, op); dev_err(dev, "Post mailbox fail(%d)\n", ret);
roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK,
HNS_ROCE_VF_MB5_EVENT_SHIFT, event);
roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
writeq(in_param, hcr + 0);
writeq(out_param, hcr + 2);
/* Memory barrier */
wmb();
writel(val0, hcr + 4);
writel(val1, hcr + 5);
mmiowb();
return 0; return ret;
} }
static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev, static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
...@@ -1568,6 +2237,9 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev, ...@@ -1568,6 +2237,9 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
status = hns_roce_v2_cmd_complete(hr_dev); status = hns_roce_v2_cmd_complete(hr_dev);
if (status != 0x1) { if (status != 0x1) {
if (status == CMD_RST_PRC_EBUSY)
return status;
dev_err(dev, "mailbox status 0x%x!\n", status); dev_err(dev, "mailbox status 0x%x!\n", status);
return -EBUSY; return -EBUSY;
} }
...@@ -1608,9 +2280,15 @@ static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev, ...@@ -1608,9 +2280,15 @@ static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
return hns_roce_cmq_send(hr_dev, &desc, 1); return hns_roce_cmq_send(hr_dev, &desc, 1);
} }
#ifdef CONFIG_KERNEL_419
static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port, static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
int gid_index, const union ib_gid *gid, int gid_index, const union ib_gid *gid,
const struct ib_gid_attr *attr) const struct ib_gid_attr *attr)
#else
static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
int gid_index, union ib_gid *gid,
const struct ib_gid_attr *attr)
#endif
{ {
enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1; enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
int ret; int ret;
...@@ -1688,7 +2366,7 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, ...@@ -1688,7 +2366,7 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
page_addr = sg_dma_address(sg) + page_addr = sg_dma_address(sg) +
(j << mr->umem->page_shift); (j << mr->umem->page_shift);
pages[i] = page_addr >> 6; pages[i] = page_addr >> 6;
/* Record the first 2 entry directly to MTPT table */ /* Record the first 2 entry directly to MTPT table*/
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1) if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
goto found; goto found;
i++; i++;
...@@ -1735,10 +2413,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, ...@@ -1735,10 +2413,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S, roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
(mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
(mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0)); (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
...@@ -1809,6 +2488,87 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, ...@@ -1809,6 +2488,87 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
V2_MPT_BYTE_48_PBL_BA_H_S,
upper_32_bits(mr->pbl_ba >> 3));
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
return 0;
}
static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mw->pdn);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
V2_MPT_BYTE_4_PBL_HOP_NUM_S, mw->pbl_hop_num ==
HNS_ROCE_HOP_NUM_0 ? 0 : mw->pbl_hop_num);
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
mpt_entry->lkey = cpu_to_le32(mw->rkey);
return 0;
}
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{ {
return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf, return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
...@@ -1829,6 +2589,27 @@ static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq) ...@@ -1829,6 +2589,27 @@ static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
return get_sw_cqe_v2(hr_cq, hr_cq->cons_index); return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
} }
static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
{
return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
}
static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
{
u32 bitmap_num;
int bit_num;
/* always called with interrupts disabled. */
spin_lock(&srq->lock);
bitmap_num = wqe_index / (sizeof(u64) * 8);
bit_num = wqe_index % (sizeof(u64) * 8);
srq->idx_que.bitmap[bitmap_num] |= (1ULL << bit_num);
srq->tail++;
spin_unlock(&srq->lock);
}
static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{ {
*hr_cq->set_ci_db = cons_index & 0xffffff; *hr_cq->set_ci_db = cons_index & 0xffffff;
...@@ -1840,6 +2621,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, ...@@ -1840,6 +2621,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
struct hns_roce_v2_cqe *cqe, *dest; struct hns_roce_v2_cqe *cqe, *dest;
u32 prod_index; u32 prod_index;
int nfreed = 0; int nfreed = 0;
int wqe_index;
u8 owner_bit; u8 owner_bit;
for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index); for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
...@@ -1857,7 +2639,13 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, ...@@ -1857,7 +2639,13 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M, if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
V2_CQE_BYTE_16_LCL_QPN_S) & V2_CQE_BYTE_16_LCL_QPN_S) &
HNS_ROCE_V2_CQE_QPN_MASK) == qpn) { HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
/* In v1 engine, not support SRQ */ if (srq &&
roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
wqe_index = roce_get_field(cqe->byte_4,
V2_CQE_BYTE_4_WQE_INDX_M,
V2_CQE_BYTE_4_WQE_INDX_S);
hns_roce_free_srq_wqe(srq, wqe_index);
}
++nfreed; ++nfreed;
} else if (nfreed) { } else if (nfreed) {
dest = get_cqe_v2(hr_cq, (prod_index + nfreed) & dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
...@@ -1967,6 +2755,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, ...@@ -1967,6 +2755,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
enum ib_cq_notify_flags flags) enum ib_cq_notify_flags flags)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
u32 notification_flag; u32 notification_flag;
u32 doorbell[2]; u32 doorbell[2];
...@@ -1992,7 +2781,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, ...@@ -1992,7 +2781,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S, roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
notification_flag); notification_flag);
hns_roce_write64_k(doorbell, hr_cq->cq_db_l); hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
return 0; return 0;
} }
...@@ -2034,6 +2823,7 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe, ...@@ -2034,6 +2823,7 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc) struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{ {
struct hns_roce_srq *srq = NULL;
struct hns_roce_dev *hr_dev; struct hns_roce_dev *hr_dev;
struct hns_roce_v2_cqe *cqe; struct hns_roce_v2_cqe *cqe;
struct hns_roce_qp *hr_qp; struct hns_roce_qp *hr_qp;
...@@ -2076,6 +2866,37 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, ...@@ -2076,6 +2866,37 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->qp = &(*cur_qp)->ibqp; wc->qp = &(*cur_qp)->ibqp;
wc->vendor_err = 0; wc->vendor_err = 0;
if (is_send) {
wq = &(*cur_qp)->sq;
if ((*cur_qp)->sq_signal_bits) {
/*
* If sg_signal_bit is 1,
* firstly tail pointer updated to wqe
* which current cqe correspond to
*/
wqe_ctr = (u16)roce_get_field(cqe->byte_4,
V2_CQE_BYTE_4_WQE_INDX_M,
V2_CQE_BYTE_4_WQE_INDX_S);
wq->tail += (wqe_ctr - (u16)wq->tail) &
(wq->wqe_cnt - 1);
}
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
} else if ((*cur_qp)->ibqp.srq) {
srq = to_hr_srq((*cur_qp)->ibqp.srq);
wqe_ctr = le16_to_cpu(roce_get_field(cqe->byte_4,
V2_CQE_BYTE_4_WQE_INDX_M,
V2_CQE_BYTE_4_WQE_INDX_S));
wc->wr_id = srq->wrid[wqe_ctr];
hns_roce_free_srq_wqe(srq, wqe_ctr);
} else {
/* Update tail pointer, record wr_id */
wq = &(*cur_qp)->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
}
status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M, status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
V2_CQE_BYTE_4_STATUS_S); V2_CQE_BYTE_4_STATUS_S);
switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) { switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
...@@ -2195,23 +3016,6 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, ...@@ -2195,23 +3016,6 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->status = IB_WC_GENERAL_ERR; wc->status = IB_WC_GENERAL_ERR;
break; break;
} }
wq = &(*cur_qp)->sq;
if ((*cur_qp)->sq_signal_bits) {
/*
* If sg_signal_bit is 1,
* firstly tail pointer updated to wqe
* which current cqe correspond to
*/
wqe_ctr = (u16)roce_get_field(cqe->byte_4,
V2_CQE_BYTE_4_WQE_INDX_M,
V2_CQE_BYTE_4_WQE_INDX_S);
wq->tail += (wqe_ctr - (u16)wq->tail) &
(wq->wqe_cnt - 1);
}
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
} else { } else {
/* RQ correspond to CQE */ /* RQ correspond to CQE */
wc->byte_len = le32_to_cpu(cqe->byte_cnt); wc->byte_len = le32_to_cpu(cqe->byte_cnt);
...@@ -2256,16 +3060,12 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, ...@@ -2256,16 +3060,12 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
return -EAGAIN; return -EAGAIN;
} }
/* Update tail pointer, record wr_id */
wq = &(*cur_qp)->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M, wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
V2_CQE_BYTE_32_SL_S); V2_CQE_BYTE_32_SL_S);
wc->src_qp = (u8)roce_get_field(cqe->byte_32, wc->src_qp = (u8)roce_get_field(cqe->byte_32,
V2_CQE_BYTE_32_RMT_QPN_M, V2_CQE_BYTE_32_RMT_QPN_M,
V2_CQE_BYTE_32_RMT_QPN_S); V2_CQE_BYTE_32_RMT_QPN_S);
wc->slid = 0;
wc->wc_flags |= (roce_get_bit(cqe->byte_32, wc->wc_flags |= (roce_get_bit(cqe->byte_32,
V2_CQE_BYTE_32_GRH_S) ? V2_CQE_BYTE_32_GRH_S) ?
IB_WC_GRH : 0); IB_WC_GRH : 0);
...@@ -2279,7 +3079,14 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, ...@@ -2279,7 +3079,14 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->smac[5] = roce_get_field(cqe->byte_28, wc->smac[5] = roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_SMAC_5_M, V2_CQE_BYTE_28_SMAC_5_M,
V2_CQE_BYTE_28_SMAC_5_S); V2_CQE_BYTE_28_SMAC_5_S);
wc->vlan_id = 0xffff; if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_VID_M,
V2_CQE_BYTE_28_VID_S);
} else {
wc->vlan_id = 0xffff;
}
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
wc->network_hdr_type = roce_get_field(cqe->byte_28, wc->network_hdr_type = roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_PORT_TYPE_M, V2_CQE_BYTE_28_PORT_TYPE_M,
...@@ -2367,6 +3174,19 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, ...@@ -2367,6 +3174,19 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
case HEM_TYPE_SRQC: case HEM_TYPE_SRQC:
op = HNS_ROCE_CMD_WRITE_SRQC_BT0; op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
break; break;
case HEM_TYPE_SCC_CTX:
if (step_idx) {
/* No need to notify Hardware when step_idx is 1 or 2*/
return 0;
}
op = HNS_ROCE_CMD_WRITE_SCC_CTX_BT0;
break;
case HEM_TYPE_QPC_TIMER:
op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
break;
case HEM_TYPE_CQC_TIMER:
op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
break;
default: default:
dev_warn(dev, "Table %d not to be written by mailbox!\n", dev_warn(dev, "Table %d not to be written by mailbox!\n",
table->type); table->type);
...@@ -2426,6 +3246,11 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, ...@@ -2426,6 +3246,11 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
case HEM_TYPE_CQC: case HEM_TYPE_CQC:
op = HNS_ROCE_CMD_DESTROY_CQC_BT0; op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
break; break;
case HEM_TYPE_SCC_CTX:
case HEM_TYPE_QPC_TIMER:
case HEM_TYPE_CQC_TIMER:
/* there is no need to destroy these ctx */
return 0;
case HEM_TYPE_SRQC: case HEM_TYPE_SRQC:
op = HNS_ROCE_CMD_DESTROY_SRQC_BT0; op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
break; break;
...@@ -2503,6 +3328,55 @@ static void set_access_flags(struct hns_roce_qp *hr_qp, ...@@ -2503,6 +3328,55 @@ static void set_access_flags(struct hns_roce_qp *hr_qp,
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
} }
static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
{
switch (state) {
case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
case HNS_ROCE_QP_ST_SQ_DRAINING:
case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
default: return -1;
}
}
static inline enum hns_roce_v2_qp_state to_hns_roce_qp_st(
enum ib_qp_state state)
{
switch (state) {
case IB_QPS_RESET: return HNS_ROCE_QP_ST_RST;
case IB_QPS_INIT: return HNS_ROCE_QP_ST_INIT;
case IB_QPS_RTR: return HNS_ROCE_QP_ST_RTR;
case IB_QPS_RTS: return HNS_ROCE_QP_ST_RTS;
case IB_QPS_SQD: return HNS_ROCE_QP_ST_SQD;
case IB_QPS_SQE: return HNS_ROCE_QP_ST_SQER;
case IB_QPS_ERR: return HNS_ROCE_QP_ST_ERR;
default: return -1;
}
}
static void hns_roce_get_cqs(struct ib_qp *ibqp, struct hns_roce_cq **send_cq,
struct hns_roce_cq **recv_cq)
{
switch (ibqp->qp_type) {
case IB_QPT_XRC_TGT:
*send_cq = to_hr_cq(to_hr_xrcd(ibqp->xrcd)->cq);
*recv_cq = *send_cq;
break;
case IB_QPT_XRC_INI:
*send_cq = to_hr_cq(ibqp->send_cq);
*recv_cq = *send_cq;
break;
default:
*send_cq = to_hr_cq(ibqp->send_cq);
*recv_cq = to_hr_cq(ibqp->recv_cq);
break;
}
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp, static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, const struct ib_qp_attr *attr,
int attr_mask, int attr_mask,
...@@ -2511,7 +3385,9 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2511,7 +3385,9 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_cq *send_cq, *recv_cq;
hns_roce_get_cqs(ibqp, &send_cq, &recv_cq);
/* /*
* In v2 engine, software pass context and context mask to hardware * In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context, * when modifying qp. If software need modify some fields in context,
...@@ -2523,7 +3399,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2523,7 +3399,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0); V2_QPC_BYTE_4_TST_S, 0);
if (ibqp->qp_type == IB_QPT_GSI) if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
roce_set_field(context->byte_4_sqpn_tst, roce_set_field(context->byte_4_sqpn_tst,
V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, V2_QPC_BYTE_4_SGE_SHIFT_S,
...@@ -2544,12 +3420,18 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2544,12 +3420,18 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_4_SQPN_S, 0); V2_QPC_BYTE_4_SQPN_S, 0);
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn); V2_QPC_BYTE_16_PD_S,
(hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) ?
to_hr_pd(to_hr_xrcd(ibqp->xrcd)->pd)->pdn :
to_hr_pd(ibqp->pd)->pdn);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, 0); V2_QPC_BYTE_16_PD_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M, roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs)); V2_QPC_BYTE_20_RQWS_S,
(hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
ilog2(hr_qp->rq.max_gs));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M, roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
V2_QPC_BYTE_20_RQWS_S, 0); V2_QPC_BYTE_20_RQWS_S, 0);
...@@ -2561,6 +3443,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2561,6 +3443,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(context->byte_20_smac_sgid_idx, roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
(hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
ilog2((unsigned int)hr_qp->rq.wqe_cnt)); ilog2((unsigned int)hr_qp->rq.wqe_cnt));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx, roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0); V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
...@@ -2570,6 +3454,14 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2570,6 +3454,14 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_24_VLAN_ID_S, 0xfff); V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0); V2_QPC_BYTE_24_VLAN_ID_S, 0);
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQ_VLAN_EN_S,
0);
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQ_VLAN_EN_S,
0);
roce_set_bit(context->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_VLAN_EN_S,
0);
roce_set_bit(qpc_mask->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_VLAN_EN_S,
0);
/* /*
* Set some fields in context to zero, Because the default values * Set some fields in context to zero, Because the default values
...@@ -2581,28 +3473,22 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2581,28 +3473,22 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0); roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0); roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M, roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
V2_QPC_BYTE_60_MAPID_S, 0); V2_QPC_BYTE_60_TEMPID_S, 0);
roce_set_bit(qpc_mask->byte_60_qpst_mapid, roce_set_field(qpc_mask->byte_60_qpst_tempid,
V2_QPC_BYTE_60_INNER_MAP_IND_S, 0); V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S, 0);
0); roce_set_bit(qpc_mask->byte_60_qpst_tempid,
roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S, V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
0); roce_set_bit(qpc_mask->byte_60_qpst_tempid,
roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S, V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
0);
roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
0);
roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0); roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0); roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
if (attr_mask & IB_QP_QKEY) { if (to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC) {
context->qkey_xrcd = attr->qkey; context->qkey_xrcd = hr_qp->xrcdn;
qpc_mask->qkey_xrcd = 0; qpc_mask->qkey_xrcd = 0;
hr_qp->qkey = attr->qkey;
} }
if (hr_qp->rdb_en) { if (hr_qp->rdb_en) {
...@@ -2627,7 +3513,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2627,7 +3513,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn); V2_QPC_BYTE_80_RX_CQN_S, recv_cq->cqn);
roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, 0); V2_QPC_BYTE_80_RX_CQN_S, 0);
if (ibqp->srq) { if (ibqp->srq) {
...@@ -2677,7 +3563,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2677,7 +3563,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M, roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0); V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0); roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
0);
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M, roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0); V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M, roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
...@@ -2686,8 +3573,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2686,8 +3573,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_144_raq, roce_set_field(qpc_mask->byte_144_raq,
V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M, V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0); V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
0);
roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M, roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
V2_QPC_BYTE_144_RAQ_CREDIT_S, 0); V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0); roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
...@@ -2713,14 +3598,12 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2713,14 +3598,12 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M, V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0); V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
roce_set_field(context->byte_168_irrl_idx, roce_set_bit(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, roce_set_bit(qpc_mask->byte_168_irrl_idx,
ilog2((unsigned int)hr_qp->sq.wqe_cnt)); V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
roce_set_field(qpc_mask->byte_168_irrl_idx, roce_set_bit(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
roce_set_bit(qpc_mask->byte_168_irrl_idx, roce_set_bit(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0); V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
roce_set_bit(qpc_mask->byte_168_irrl_idx, roce_set_bit(qpc_mask->byte_168_irrl_idx,
...@@ -2738,6 +3621,9 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2738,6 +3621,9 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S, roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
0); 0);
roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
roce_set_field(qpc_mask->byte_176_msg_pktn, roce_set_field(qpc_mask->byte_176_msg_pktn,
V2_QPC_BYTE_176_MSG_USE_PKTN_M, V2_QPC_BYTE_176_MSG_USE_PKTN_M,
V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0); V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
...@@ -2782,6 +3668,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2782,6 +3668,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_232_IRRL_SGE_IDX_M, V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0); V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
0);
roce_set_bit(qpc_mask->byte_232_irrl_sge,
V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
0);
qpc_mask->irrl_cur_sge_offset = 0; qpc_mask->irrl_cur_sge_offset = 0;
roce_set_field(qpc_mask->byte_240_irrl_tail, roce_set_field(qpc_mask->byte_240_irrl_tail,
...@@ -2794,6 +3687,10 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2794,6 +3687,10 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_240_RX_ACK_MSN_M, V2_QPC_BYTE_240_RX_ACK_MSN_M,
V2_QPC_BYTE_240_RX_ACK_MSN_S, 0); V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
roce_set_bit(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_LCL_OP_FLG_S,
0);
roce_set_bit(qpc_mask->byte_244_rnr_rxack,
V2_QPC_BYTE_244_IRRL_RD_FLG_S, 0);
roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M, roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
V2_QPC_BYTE_248_IRRL_PSN_S, 0); V2_QPC_BYTE_248_IRRL_PSN_S, 0);
roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S, roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
...@@ -2809,9 +3706,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -2809,9 +3706,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
0); 0);
hr_qp->access_flags = attr->qp_access_flags; hr_qp->access_flags = attr->qp_access_flags;
hr_qp->pkey_index = attr->pkey_index;
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); V2_QPC_BYTE_252_TX_CQN_S, send_cq->cqn);
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, 0); V2_QPC_BYTE_252_TX_CQN_S, 0);
...@@ -2832,7 +3728,9 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, ...@@ -2832,7 +3728,9 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
struct hns_roce_v2_qp_context *qpc_mask) struct hns_roce_v2_qp_context *qpc_mask)
{ {
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_cq *send_cq, *recv_cq;
hns_roce_get_cqs(ibqp, &send_cq, &recv_cq);
/* /*
* In v2 engine, software pass context and context mask to hardware * In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context, * when modifying qp. If software need modify some fields in context,
...@@ -2892,30 +3790,21 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, ...@@ -2892,30 +3790,21 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
0); 0);
} }
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
ilog2((unsigned int)hr_qp->sq.wqe_cnt));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
ilog2((unsigned int)hr_qp->rq.wqe_cnt));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn); V2_QPC_BYTE_16_PD_S,
(hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) ?
to_hr_pd(to_hr_xrcd(ibqp->xrcd)->pd)->pdn :
to_hr_pd(ibqp->pd)->pdn);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, 0); V2_QPC_BYTE_16_PD_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn); V2_QPC_BYTE_80_RX_CQN_S, recv_cq->cqn);
roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, 0); V2_QPC_BYTE_80_RX_CQN_S, 0);
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); V2_QPC_BYTE_252_TX_CQN_S, send_cq->cqn);
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, 0); V2_QPC_BYTE_252_TX_CQN_S, 0);
...@@ -2931,11 +3820,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, ...@@ -2931,11 +3820,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0); V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
} }
if (attr_mask & IB_QP_QKEY) {
context->qkey_xrcd = attr->qkey;
qpc_mask->qkey_xrcd = 0;
}
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn); V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
...@@ -2947,13 +3831,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, ...@@ -2947,13 +3831,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_56_dqpn_err, roce_set_field(qpc_mask->byte_56_dqpn_err,
V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0); V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
} }
roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
ilog2((unsigned int)hr_qp->sq.wqe_cnt));
roce_set_field(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
} }
static int modify_qp_init_to_rtr(struct ib_qp *ibqp, static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
...@@ -2974,7 +3851,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -2974,7 +3851,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
u64 *mtts_2; u64 *mtts_2;
u64 *mtts; u64 *mtts;
u8 *dmac; u8 *dmac;
u8 *smac;
int port; int port;
/* Search qp buf's mtts */ /* Search qp buf's mtts */
...@@ -3031,7 +3907,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3031,7 +3907,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_20_smac_sgid_idx, roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M, V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S, V2_QPC_BYTE_20_SGE_HOP_NUM_S,
((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ? (((ibqp->qp_type == IB_QPT_GSI) ||
ibqp->qp_type == IB_QPT_UD) || hr_qp->sq.max_gs > 2) ?
hr_dev->caps.mtt_hop_num : 0); hr_dev->caps.mtt_hop_num : 0);
roce_set_field(qpc_mask->byte_20_smac_sgid_idx, roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M, V2_QPC_BYTE_20_SGE_HOP_NUM_M,
...@@ -3062,13 +3939,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3062,13 +3939,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0); V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_MIN_RNR_TIME_M,
V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_MIN_RNR_TIME_M,
V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size] context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
>> PAGE_ADDR_SHIFT); >> PAGE_ADDR_SHIFT);
...@@ -3096,13 +3966,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3096,13 +3966,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0); V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
roce_set_field(context->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
roce_set_field(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M, roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4); V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M, roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
...@@ -3133,23 +3996,15 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3133,23 +3996,15 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port; port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
smac = (u8 *)hr_dev->dev_addr[port]; /* when loop_idc is 1, it should loopback */
/* when dmac equals smac or loop_idc is 1, it should loopback */ if (ibqp->qp_type == IB_QPT_UC || ibqp->qp_type == IB_QPT_RC ||
if (ether_addr_equal_unaligned(dmac, smac) || ibqp->qp_type == IB_QPT_XRC_INI ||
hr_dev->loop_idc == 0x1) { ibqp->qp_type == IB_QPT_XRC_TGT) {
roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1); roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S,
hr_dev->loop_idc);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0); roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
} }
if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
attr->max_dest_rd_atomic) {
roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
V2_QPC_BYTE_140_RR_MAX_S,
fls(attr->max_dest_rd_atomic - 1));
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
V2_QPC_BYTE_140_RR_MAX_S, 0);
}
if (attr_mask & IB_QP_DEST_QPN) { if (attr_mask & IB_QP_DEST_QPN) {
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num); V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
...@@ -3210,11 +4065,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3210,11 +4065,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
context->rq_rnr_timer = 0; context->rq_rnr_timer = 0;
qpc_mask->rq_rnr_timer = 0; qpc_mask->rq_rnr_timer = 0;
roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
V2_QPC_BYTE_152_RAQ_PSN_S, 0);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M, roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0); V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M, roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
...@@ -3263,13 +4113,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -3263,13 +4113,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
* we should set all bits of the relevant fields in context mask to * we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1. * 0 at the same time, else set them to 0x1.
*/ */
roce_set_field(context->byte_60_qpst_mapid,
V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
roce_set_field(qpc_mask->byte_60_qpst_mapid,
V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
roce_set_field(context->byte_168_irrl_idx, roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
...@@ -3281,14 +4124,16 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -3281,14 +4124,16 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0); V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
context->sq_cur_sge_blk_addr = context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI ||
((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ? ibqp->qp_type == IB_QPT_UD) ||
hr_qp->sq.max_gs > 2) ?
((u32)(mtts[hr_qp->sge.offset / page_size] ((u32)(mtts[hr_qp->sge.offset / page_size]
>> PAGE_ADDR_SHIFT)) : 0; >> PAGE_ADDR_SHIFT)) : 0;
roce_set_field(context->byte_184_irrl_idx, roce_set_field(context->byte_184_irrl_idx,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ? ((ibqp->qp_type == IB_QPT_GSI ||
ibqp->qp_type == IB_QPT_UD) || hr_qp->sq.max_gs > 2) ?
(mtts[hr_qp->sge.offset / page_size] >> (mtts[hr_qp->sge.offset / page_size] >>
(32 + PAGE_ADDR_SHIFT)) : 0); (32 + PAGE_ADDR_SHIFT)) : 0);
qpc_mask->sq_cur_sge_blk_addr = 0; qpc_mask->sq_cur_sge_blk_addr = 0;
...@@ -3319,13 +4164,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -3319,13 +4164,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_240_RX_ACK_MSN_M, V2_QPC_BYTE_240_RX_ACK_MSN_M,
V2_QPC_BYTE_240_RX_ACK_MSN_S, 0); V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
roce_set_field(context->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RX_ACK_EPSN_M,
V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RX_ACK_EPSN_M,
V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
roce_set_field(qpc_mask->byte_248_ack_psn, roce_set_field(qpc_mask->byte_248_ack_psn,
V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M, V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0); V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
...@@ -3339,27 +4177,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -3339,27 +4177,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_240_IRRL_TAIL_REAL_M, V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0); V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
roce_set_field(context->byte_220_retry_psn_msn,
V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_220_retry_psn_msn,
V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
roce_set_field(context->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
roce_set_field(qpc_mask->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
roce_set_field(context->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
roce_set_field(qpc_mask->byte_220_retry_psn_msn, roce_set_field(qpc_mask->byte_220_retry_psn_msn,
V2_QPC_BYTE_220_RETRY_MSG_MSN_M, V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0); V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
...@@ -3370,61 +4187,391 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -3370,61 +4187,391 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M, roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
V2_QPC_BYTE_212_CHECK_FLG_S, 0); V2_QPC_BYTE_212_CHECK_FLG_S, 0);
roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S, 0);
roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
roce_set_field(context->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RNR_NUM_INIT_M,
V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
roce_set_field(qpc_mask->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RNR_NUM_INIT_M,
V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
V2_QPC_BYTE_244_RNR_CNT_S, 0);
roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M, roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
V2_QPC_BYTE_212_LSN_S, 0x100); V2_QPC_BYTE_212_LSN_S, 0x100);
roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M, roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
V2_QPC_BYTE_212_LSN_S, 0); V2_QPC_BYTE_212_LSN_S, 0);
if (attr_mask & IB_QP_TIMEOUT) {
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
V2_QPC_BYTE_28_AT_S, attr->timeout);
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
V2_QPC_BYTE_28_AT_S, 0);
}
roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M, roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
V2_QPC_BYTE_196_IRRL_HEAD_S, 0); V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) { return 0;
roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M, }
V2_QPC_BYTE_208_SR_MAX_S,
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
#ifdef CONFIG_KERNEL_419
const struct ib_gid_attr *gid_attr = NULL;
#else
struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_ROCE};
union ib_gid zgid = { {0} };
union ib_gid gid;
int status = 0;
#endif
int is_roce_protocol;
u16 vlan = 0xffff;
u8 ib_port;
u8 hr_port;
ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
hr_port = ib_port - 1;
is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
#ifdef CONFIG_KERNEL_419
if (is_roce_protocol) {
gid_attr = attr->ah_attr.grh.sgid_attr;
vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
if (is_vlan_dev(gid_attr->ndev)) {
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
roce_set_bit(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
roce_set_bit(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
}
}
#else
if (is_roce_protocol) {
int index = grh->sgid_index;
status = ib_get_cached_gid(ibqp->device, ib_port, index, &gid,
&gid_attr);
if (!status && !memcmp(&gid, &zgid, sizeof(gid)))
status = -ENOENT;
if (!status && gid_attr.ndev) {
vlan = rdma_vlan_dev_vlan_id(gid_attr.ndev);
if (is_vlan_dev(gid_attr.ndev)) {
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
roce_set_bit(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
roce_set_bit(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
}
dev_put(gid_attr.ndev);
}
}
if (status) {
dev_err(hr_dev->dev, "get gid during modifing QP failed\n");
return -EAGAIN;
}
#endif
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, vlan);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0);
if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
dev_err(hr_dev->dev,
"sgid_index(%u) too large. max is %d\n",
grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
return -EINVAL;
}
if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
return -EINVAL;
}
#ifdef CONFIG_KERNEL_419
roce_set_field(context->byte_52_udpspn_dmac,
V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
(gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
0 : 0x12b7);
#else
roce_set_field(context->byte_52_udpspn_dmac,
V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
(gid_attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
0 : 0x12b7);
#endif
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
V2_QPC_BYTE_52_UDPSPN_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
grh->sgid_index);
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
#ifdef CONFIG_KERNEL_419
if (hr_dev->pci_dev->revision == 0x21 &&
gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
#else
if (hr_dev->pci_dev->revision == 0x21 &&
gid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
#endif
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
else
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, grh->traffic_class);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
V2_QPC_BYTE_28_FL_S, grh->flow_label);
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
V2_QPC_BYTE_28_FL_S, 0);
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, 0);
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
return 0;
}
static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
enum ib_qp_state cur_state,
enum ib_qp_state new_state,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
int ret = 0;
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, sizeof(*qpc_mask));
modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
} else if (V2_QP_SUPPORT_STATE(cur_state, new_state)) {
/* Nothing */
;
} else {
dev_err(hr_dev->dev, "Illegal state for QP!\n");
ret = -EAGAIN;
goto out;
}
out:
return ret;
}
static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = hr_dev->dev;
int ret = 0;
/* The AV component shall be modified for RoCEv2 */
if (attr_mask & IB_QP_AV) {
ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
return ret;
}
if (attr_mask & IB_QP_TIMEOUT) {
if (attr->timeout < 31) {
roce_set_field(context->byte_28_at_fl,
V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
attr->timeout);
roce_set_field(qpc_mask->byte_28_at_fl,
V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
0);
} else
dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
}
if (attr_mask & IB_QP_RETRY_CNT) {
roce_set_field(context->byte_212_lsn,
V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
attr->retry_cnt);
roce_set_field(qpc_mask->byte_212_lsn,
V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
roce_set_field(context->byte_212_lsn,
V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S,
attr->retry_cnt);
roce_set_field(qpc_mask->byte_212_lsn,
V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S, 0);
}
if (attr_mask & IB_QP_RNR_RETRY) {
roce_set_field(context->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RNR_NUM_INIT_M,
V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
roce_set_field(qpc_mask->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RNR_NUM_INIT_M,
V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
roce_set_field(context->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RNR_CNT_M,
V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
roce_set_field(qpc_mask->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RNR_CNT_M,
V2_QPC_BYTE_244_RNR_CNT_S, 0);
}
/* RC&UC&UD required attr */
if (attr_mask & IB_QP_SQ_PSN) {
roce_set_field(context->byte_172_sq_psn,
V2_QPC_BYTE_172_SQ_CUR_PSN_M,
V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_172_sq_psn,
V2_QPC_BYTE_172_SQ_CUR_PSN_M,
V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
roce_set_field(context->byte_196_sq_psn,
V2_QPC_BYTE_196_SQ_MAX_PSN_M,
V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_196_sq_psn,
V2_QPC_BYTE_196_SQ_MAX_PSN_M,
V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
roce_set_field(context->byte_220_retry_psn_msn,
V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_220_retry_psn_msn,
V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
roce_set_field(context->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
attr->sq_psn >> 16);
roce_set_field(qpc_mask->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
roce_set_field(context->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
attr->sq_psn);
roce_set_field(qpc_mask->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
roce_set_field(context->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RX_ACK_EPSN_M,
V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RX_ACK_EPSN_M,
V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
}
if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
attr->max_dest_rd_atomic) {
roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
V2_QPC_BYTE_140_RR_MAX_S,
fls(attr->max_dest_rd_atomic - 1));
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
V2_QPC_BYTE_140_RR_MAX_S, 0);
}
if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
V2_QPC_BYTE_208_SR_MAX_S,
fls(attr->max_rd_atomic - 1)); fls(attr->max_rd_atomic - 1));
roce_set_field(qpc_mask->byte_208_irrl, roce_set_field(qpc_mask->byte_208_irrl,
V2_QPC_BYTE_208_SR_MAX_M, V2_QPC_BYTE_208_SR_MAX_M,
V2_QPC_BYTE_208_SR_MAX_S, 0); V2_QPC_BYTE_208_SR_MAX_S, 0);
} }
return 0;
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
roce_set_field(context->byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_MIN_RNR_TIME_M,
V2_QPC_BYTE_80_MIN_RNR_TIME_S,
attr->min_rnr_timer);
roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_MIN_RNR_TIME_M,
V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
}
/* RC&UC required attr */
if (attr_mask & IB_QP_RQ_PSN) {
roce_set_field(context->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
roce_set_field(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
roce_set_field(qpc_mask->byte_152_raq,
V2_QPC_BYTE_152_RAQ_PSN_M,
V2_QPC_BYTE_152_RAQ_PSN_S, 0);
}
if (attr_mask & IB_QP_QKEY) {
context->qkey_xrcd = attr->qkey;
qpc_mask->qkey_xrcd = 0;
hr_qp->qkey = attr->qkey;
}
return ret;
}
static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
if (attr_mask & IB_QP_ACCESS_FLAGS)
hr_qp->atomic_rd_en = attr->qp_access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) {
hr_qp->port = attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
}
} }
static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
...@@ -3451,44 +4598,10 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -3451,44 +4598,10 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
* 0 at the same time, else set them to 0x1. * 0 at the same time, else set them to 0x1.
*/ */
memset(qpc_mask, 0xff, sizeof(*qpc_mask)); memset(qpc_mask, 0xff, sizeof(*qpc_mask));
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
modify_qp_reset_to_init(ibqp, attr, attr_mask, context, new_state, context, qpc_mask);
qpc_mask); if (ret)
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
} else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) ||
(cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) ||
(cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) ||
(cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) ||
(cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
/* Nothing */
;
} else {
dev_err(dev, "Illegal state for QP!\n");
ret = -EINVAL;
goto out; goto out;
}
/* When QP state is err, SQ and RQ WQE should be flushed */ /* When QP state is err, SQ and RQ WQE should be flushed */
if (new_state == IB_QPS_ERR) { if (new_state == IB_QPS_ERR) {
...@@ -3499,112 +4612,41 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -3499,112 +4612,41 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_160_sq_ci_pi, roce_set_field(qpc_mask->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
roce_set_field(context->byte_84_rq_ci_pi,
if (!ibqp->srq) {
roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
hr_qp->rq.head); hr_qp->rq.head);
roce_set_field(qpc_mask->byte_84_rq_ci_pi, roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
}
if (attr_mask & IB_QP_AV) {
const struct ib_global_route *grh =
rdma_ah_read_grh(&attr->ah_attr);
const struct ib_gid_attr *gid_attr = NULL;
u8 src_mac[ETH_ALEN];
int is_roce_protocol;
u16 vlan = 0xffff;
u8 ib_port;
u8 hr_port;
ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
hr_qp->port + 1;
hr_port = ib_port - 1;
is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
if (is_roce_protocol) {
gid_attr = attr->ah_attr.grh.sgid_attr;
vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
}
roce_set_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, vlan);
roce_set_field(qpc_mask->byte_24_mtu_tc,
V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0);
if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
dev_err(hr_dev->dev,
"sgid_index(%u) too large. max is %d\n",
grh->sgid_index,
hr_dev->caps.gid_table_len[hr_port]);
ret = -EINVAL;
goto out;
}
if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
ret = -EINVAL;
goto out;
} }
roce_set_field(context->byte_52_udpspn_dmac,
V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
(gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
0 : 0x12b7);
roce_set_field(qpc_mask->byte_52_udpspn_dmac,
V2_QPC_BYTE_52_UDPSPN_M,
V2_QPC_BYTE_52_UDPSPN_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S, 0);
roce_set_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
roce_set_field(qpc_mask->byte_24_mtu_tc,
V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, grh->traffic_class);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
V2_QPC_BYTE_28_FL_S, grh->flow_label);
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
V2_QPC_BYTE_28_FL_S, 0);
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S,
rdma_ah_get_sl(&attr->ah_attr));
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, 0);
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
} }
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) /* Configure the optional fields */
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask); ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
((ibqp->srq ||
(to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC)) ?
1 : 0));
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_INV_CREDIT_S, 0);
/* Every status migrate must change state */ /* Every status migrate must change state */
roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M, roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, new_state); V2_QPC_BYTE_60_QP_ST_S, to_hns_roce_qp_st(new_state));
roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M, roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, 0); V2_QPC_BYTE_60_QP_ST_S, 0);
/* SW pass context to HW */ /* SW pass context to HW */
ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state, ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt,
to_hns_roce_qp_st(cur_state),
to_hns_roce_qp_st(new_state),
context, hr_qp); context, hr_qp);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret); dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
...@@ -3613,22 +4655,16 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -3613,22 +4655,16 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->state = new_state; hr_qp->state = new_state;
if (attr_mask & IB_QP_ACCESS_FLAGS) hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
hr_qp->atomic_rd_en = attr->qp_access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) {
hr_qp->port = attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
}
if (new_state == IB_QPS_RESET && !ibqp->uobject) { if (new_state == IB_QPS_RESET && !ibqp->uobject) {
hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, struct hns_roce_cq *send_cq, *recv_cq;
hns_roce_get_cqs(ibqp, &send_cq, &recv_cq);
hns_roce_v2_cq_clean(send_cq, hr_qp->qpn,
ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
if (ibqp->send_cq != ibqp->recv_cq) if (send_cq != recv_cq)
hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
hr_qp->qpn, NULL);
hr_qp->rq.head = 0; hr_qp->rq.head = 0;
hr_qp->rq.tail = 0; hr_qp->rq.tail = 0;
...@@ -3645,21 +4681,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -3645,21 +4681,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
return ret; return ret;
} }
static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
{
switch (state) {
case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
case HNS_ROCE_QP_ST_SQ_DRAINING:
case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
default: return -1;
}
}
static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, struct hns_roce_qp *hr_qp,
struct hns_roce_v2_qp_context *hr_context) struct hns_roce_v2_qp_context *hr_context)
...@@ -3720,7 +4741,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ...@@ -3720,7 +4741,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
goto out; goto out;
} }
state = roce_get_field(context->byte_60_qpst_mapid, state = roce_get_field(context->byte_60_qpst_tempid,
V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S); V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
if (tmp_qp_state == -1) { if (tmp_qp_state == -1) {
...@@ -3779,7 +4800,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ...@@ -3779,7 +4800,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
} }
qp_attr->port_num = hr_qp->port + 1; qp_attr->port_num = hr_qp->port + 1;
qp_attr->sq_draining = 0; qp_attr->sq_draining = (state == HNS_ROCE_QP_ST_SQ_DRAINING);
qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl, qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
V2_QPC_BYTE_208_SR_MAX_M, V2_QPC_BYTE_208_SR_MAX_M,
V2_QPC_BYTE_208_SR_MAX_S); V2_QPC_BYTE_208_SR_MAX_S);
...@@ -3837,8 +4858,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ...@@ -3837,8 +4858,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
} }
} }
send_cq = to_hr_cq(hr_qp->ibqp.send_cq); hns_roce_get_cqs(&hr_qp->ibqp, &send_cq, &recv_cq);
recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
hns_roce_lock_cqs(send_cq, recv_cq); hns_roce_lock_cqs(send_cq, recv_cq);
...@@ -3882,7 +4902,8 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ...@@ -3882,7 +4902,8 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_free_db(hr_dev, &hr_qp->rdb); hns_roce_free_db(hr_dev, &hr_qp->rdb);
} }
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hr_qp->rq.wqe_cnt) {
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
kfree(hr_qp->rq_inl_buf.wqe_list); kfree(hr_qp->rq_inl_buf.wqe_list);
} }
...@@ -3910,6 +4931,61 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp) ...@@ -3910,6 +4931,61 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
return 0; return 0;
} }
static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
struct hns_roce_scc_ctx_clr *scc_cxt_clr;
struct hns_roce_scc_ctx_clr_done *resp;
struct hns_roce_scc_ctx_clr_done *rst;
struct hns_roce_cmq_desc desc;
int ret;
int i;
/* set scc ctx clear done flag */
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCC_CTX,
false);
rst = (struct hns_roce_scc_ctx_clr_done *)desc.data;
memset(rst, 0, sizeof(*rst));
roce_set_bit(rst->rocee_scc_ctx_clr_done,
HNS_ROCE_V2_SCC_CTX_DONE_S,
0);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
/* clear scc context */
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_SCC_CTX_CLR,
false);
scc_cxt_clr = (struct hns_roce_scc_ctx_clr *)desc.data;
memset(scc_cxt_clr, 0, sizeof(*scc_cxt_clr));
scc_cxt_clr->rocee_scc_ctx_clr_qpn = hr_qp->qpn;
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
/* query scc context clear is done or not */
for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
hns_roce_cmq_setup_basic_desc(&desc,
HNS_ROCE_OPC_QUERY_SCC_CTX, true);
resp = (struct hns_roce_scc_ctx_clr_done *)desc.data;
memset(resp, 0, sizeof(*resp));
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
if (resp->rocee_scc_ctx_clr_done == 1)
return 0;
}
dev_err(hr_dev->dev, "clear scc ctx failure!");
return -EINVAL;
}
static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(cq->device); struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
...@@ -3953,21 +5029,23 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) ...@@ -3953,21 +5029,23 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn) static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
{ {
struct hns_roce_qp *hr_qp; struct hns_roce_qp *qp;
struct ib_qp_attr attr; struct ib_qp_attr attr;
int attr_mask; int attr_mask;
int ret; int ret;
hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (!hr_qp) { if (!qp) {
dev_warn(hr_dev->dev, "no hr_qp can be found!\n"); dev_warn(hr_dev->dev, "no qp can be found!\n");
return; return;
} }
if (hr_qp->ibqp.uobject) { if (qp->ibqp.pd->uobject) {
if (hr_qp->sdb_en == 1) { if (qp->sdb_en == 1) {
hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); qp->sq.head = *(int *)(qp->sdb.virt_addr);
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
if (qp->rdb_en == 1)
qp->rq.head = *(int *)(qp->rdb.virt_addr);
} else { } else {
dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n"); dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
return; return;
...@@ -3976,8 +5054,8 @@ static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn) ...@@ -3976,8 +5054,8 @@ static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
attr_mask = IB_QP_STATE; attr_mask = IB_QP_STATE;
attr.qp_state = IB_QPS_ERR; attr.qp_state = IB_QPS_ERR;
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask, ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
hr_qp->state, IB_QPS_ERR); qp->state, IB_QPS_ERR);
if (ret) if (ret)
dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n", dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
qpn); qpn);
...@@ -3987,14 +5065,58 @@ static void hns_roce_irq_work_handle(struct work_struct *work) ...@@ -3987,14 +5065,58 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
{ {
struct hns_roce_work *irq_work = struct hns_roce_work *irq_work =
container_of(work, struct hns_roce_work, work); container_of(work, struct hns_roce_work, work);
struct device *dev = irq_work->hr_dev->dev;
u32 qpn = irq_work->qpn; u32 qpn = irq_work->qpn;
u32 cqn = irq_work->cqn;
switch (irq_work->event_type) { switch (irq_work->event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
dev_info(dev, "Path migrated succeeded.\n");
break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
dev_warn(dev, "Path migration failed.\n");
break;
case HNS_ROCE_EVENT_TYPE_COMM_EST:
dev_info(dev, "Communication established.\n");
break;
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
dev_warn(dev, "Send queue drained.\n");
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
dev_err(dev, "Local work queue catastrophic error, sub_event type is: %d\n",
irq_work->sub_type);
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
dev_err(dev, "Invalid request local work queue error.\n");
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
dev_err(dev, "Local access violation work queue error, sub_event type is: %d\n",
irq_work->sub_type);
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
break; break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
dev_warn(dev, "SRQ limit reach.\n");
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
dev_warn(dev, "SRQ last wqe reach.\n");
break;
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
dev_err(dev, "SRQ catas error.\n");
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
dev_err(dev, "CQ 0x%x access err.\n", cqn);
break;
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
dev_warn(dev, "CQ 0x%x overflow\n", cqn);
break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
dev_warn(dev, "DB overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_FLR:
dev_warn(dev, "Function level reset.\n");
break;
default: default:
break; break;
} }
...@@ -4003,7 +5125,8 @@ static void hns_roce_irq_work_handle(struct work_struct *work) ...@@ -4003,7 +5125,8 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
} }
static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
struct hns_roce_eq *eq, u32 qpn) struct hns_roce_eq *eq,
u32 qpn, u32 cqn)
{ {
struct hns_roce_work *irq_work; struct hns_roce_work *irq_work;
...@@ -4014,6 +5137,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, ...@@ -4014,6 +5137,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle); INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
irq_work->hr_dev = hr_dev; irq_work->hr_dev = hr_dev;
irq_work->qpn = qpn; irq_work->qpn = qpn;
irq_work->cqn = cqn;
irq_work->event_type = eq->event_type; irq_work->event_type = eq->event_type;
irq_work->sub_type = eq->sub_type; irq_work->sub_type = eq->sub_type;
queue_work(hr_dev->irq_workq, &(irq_work->work)); queue_work(hr_dev->irq_workq, &(irq_work->work));
...@@ -4021,6 +5145,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, ...@@ -4021,6 +5145,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
static void set_eq_cons_index_v2(struct hns_roce_eq *eq) static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
{ {
struct hns_roce_dev *hr_dev = eq->hr_dev;
u32 doorbell[2]; u32 doorbell[2];
doorbell[0] = 0; doorbell[0] = 0;
...@@ -4047,125 +5172,7 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq) ...@@ -4047,125 +5172,7 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
HNS_ROCE_V2_EQ_DB_PARA_S, HNS_ROCE_V2_EQ_DB_PARA_S,
(eq->cons_index & HNS_ROCE_V2_CONS_IDX_M)); (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
hns_roce_write64_k(doorbell, eq->doorbell); hns_roce_write64(hr_dev, doorbell, eq->doorbell);
}
static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe,
u32 qpn)
{
struct device *dev = hr_dev->dev;
int sub_type;
dev_warn(dev, "Local work queue catastrophic error.\n");
sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
HNS_ROCE_V2_AEQE_SUB_TYPE_S);
switch (sub_type) {
case HNS_ROCE_LWQCE_QPC_ERROR:
dev_warn(dev, "QP %d, QPC error.\n", qpn);
break;
case HNS_ROCE_LWQCE_MTU_ERROR:
dev_warn(dev, "QP %d, MTU error.\n", qpn);
break;
case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
break;
case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
break;
case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
break;
default:
dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
break;
}
}
static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe, u32 qpn)
{
struct device *dev = hr_dev->dev;
int sub_type;
dev_warn(dev, "Local access violation work queue error.\n");
sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
HNS_ROCE_V2_AEQE_SUB_TYPE_S);
switch (sub_type) {
case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
dev_warn(dev, "QP %d, R_key violation.\n", qpn);
break;
case HNS_ROCE_LAVWQE_LENGTH_ERROR:
dev_warn(dev, "QP %d, length error.\n", qpn);
break;
case HNS_ROCE_LAVWQE_VA_ERROR:
dev_warn(dev, "QP %d, VA error.\n", qpn);
break;
case HNS_ROCE_LAVWQE_PD_ERROR:
dev_err(dev, "QP %d, PD error.\n", qpn);
break;
case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
dev_warn(dev, "QP %d, rw acc error.\n", qpn);
break;
case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
dev_warn(dev, "QP %d, key state error.\n", qpn);
break;
case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
dev_warn(dev, "QP %d, MR operation error.\n", qpn);
break;
default:
dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
break;
}
}
static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe,
int event_type, u32 qpn)
{
struct device *dev = hr_dev->dev;
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_COMM_EST:
dev_warn(dev, "Communication established.\n");
break;
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
dev_warn(dev, "Send queue drained.\n");
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
dev_warn(dev, "Invalid request local work queue error.\n");
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
break;
default:
break;
}
hns_roce_qp_event(hr_dev, qpn, event_type);
}
static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe,
int event_type, u32 cqn)
{
struct device *dev = hr_dev->dev;
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
dev_warn(dev, "CQ 0x%x access err.\n", cqn);
break;
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
dev_warn(dev, "CQ 0x%x overflow\n", cqn);
break;
default:
break;
}
hns_roce_cq_event(hr_dev, cqn, event_type);
} }
static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry) static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
...@@ -4218,6 +5225,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, ...@@ -4218,6 +5225,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
int aeqe_found = 0; int aeqe_found = 0;
int event_type; int event_type;
int sub_type; int sub_type;
u32 srqn;
u32 qpn; u32 qpn;
u32 cqn; u32 cqn;
...@@ -4240,34 +5248,30 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, ...@@ -4240,34 +5248,30 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
cqn = roce_get_field(aeqe->event.cq_event.cq, cqn = roce_get_field(aeqe->event.cq_event.cq,
HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M, HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S); HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
srqn = roce_get_field(aeqe->event.srq_event.srq,
HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
switch (event_type) { switch (event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG: case HNS_ROCE_EVENT_TYPE_PATH_MIG:
dev_warn(dev, "Path migrated succeeded.\n");
break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
dev_warn(dev, "Path migration failed.\n");
break;
case HNS_ROCE_EVENT_TYPE_COMM_EST: case HNS_ROCE_EVENT_TYPE_COMM_EST:
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type, hns_roce_qp_event(hr_dev, qpn, event_type);
qpn);
break; break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
dev_warn(dev, "SRQ not support.\n"); hns_roce_srq_event(hr_dev, srqn, event_type);
break; break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type, hns_roce_cq_event(hr_dev, cqn, event_type);
cqn);
break; break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
dev_warn(dev, "DB overflow.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_MB: case HNS_ROCE_EVENT_TYPE_MB:
hns_roce_cmd_event(hr_dev, hns_roce_cmd_event(hr_dev,
...@@ -4276,10 +5280,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, ...@@ -4276,10 +5280,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
le64_to_cpu(aeqe->event.cmd.out_param)); le64_to_cpu(aeqe->event.cmd.out_param));
break; break;
case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW: case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
dev_warn(dev, "CEQ overflow.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_FLR: case HNS_ROCE_EVENT_TYPE_FLR:
dev_warn(dev, "Function level reset.\n");
break; break;
default: default:
dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n", dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
...@@ -4296,7 +5298,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, ...@@ -4296,7 +5298,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
dev_warn(dev, "cons_index overflow, set back to 0.\n"); dev_warn(dev, "cons_index overflow, set back to 0.\n");
eq->cons_index = 0; eq->cons_index = 0;
} }
hns_roce_v2_init_irq_work(hr_dev, eq, qpn); hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
} }
set_eq_cons_index_v2(eq); set_eq_cons_index_v2(eq);
...@@ -4409,11 +5411,23 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) ...@@ -4409,11 +5411,23 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG); int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) { if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
struct pci_dev *pdev = hr_dev->pci_dev;
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
const struct hnae3_ae_ops *ops = ae_dev->ops;
dev_err(dev, "AEQ overflow!\n"); dev_err(dev, "AEQ overflow!\n");
roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1); roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
/* Set reset level for the following reset_event() call */
if (ops->set_default_reset_request)
ops->set_default_reset_request(ae_dev,
HNAE3_FUNC_RESET);
if (ops->reset_event)
ops->reset_event(pdev, NULL);
roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1); roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
...@@ -5117,6 +6131,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) ...@@ -5117,6 +6131,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
create_singlethread_workqueue("hns_roce_irq_workqueue"); create_singlethread_workqueue("hns_roce_irq_workqueue");
if (!hr_dev->irq_workq) { if (!hr_dev->irq_workq) {
dev_err(dev, "Create irq workqueue failed!\n"); dev_err(dev, "Create irq workqueue failed!\n");
ret = -ENOMEM;
goto err_request_irq_fail; goto err_request_irq_fail;
} }
...@@ -5175,6 +6190,303 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) ...@@ -5175,6 +6190,303 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
destroy_workqueue(hr_dev->irq_workq); destroy_workqueue(hr_dev->irq_workq);
} }
static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
u32 cqn, void *mb_buf, u64 *mtts_wqe,
u64 *mtts_idx, dma_addr_t dma_handle_wqe,
dma_addr_t dma_handle_idx)
{
struct hns_roce_srq_context *srq_context;
srq_context = mb_buf;
memset(srq_context, 0, sizeof(*srq_context));
roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
SRQC_BYTE_4_SRQ_ST_S, 1);
roce_set_field(srq_context->byte_4_srqn_srqst,
SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
(hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
hr_dev->caps.srqwqe_hop_num));
roce_set_field(srq_context->byte_4_srqn_srqst,
SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
ilog2(srq->max));
roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
SRQC_BYTE_4_SRQN_S, srq->srqn);
roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
roce_set_field(srq_context->byte_24_wqe_bt_ba,
SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
cpu_to_le32(dma_handle_wqe >> 35));
roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
SRQC_BYTE_28_PD_S, pdn);
roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
fls(srq->max_gs - 1));
srq_context->idx_bt_ba = (u32)(dma_handle_idx >> 3);
srq_context->idx_bt_ba = cpu_to_le32(srq_context->idx_bt_ba);
roce_set_field(srq_context->rsv_idx_bt_ba,
SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
cpu_to_le32(dma_handle_idx >> 35));
srq_context->idx_cur_blk_addr = (u32)(mtts_idx[0] >> PAGE_ADDR_SHIFT);
srq_context->idx_cur_blk_addr =
cpu_to_le32(srq_context->idx_cur_blk_addr);
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
cpu_to_le32((mtts_idx[0]) >> (32 + PAGE_ADDR_SHIFT)));
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
hr_dev->caps.idx_hop_num);
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
hr_dev->caps.idx_ba_pg_sz);
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
hr_dev->caps.idx_buf_pg_sz);
srq_context->idx_nxt_blk_addr = (u32)(mtts_idx[1] >> PAGE_ADDR_SHIFT);
srq_context->idx_nxt_blk_addr =
cpu_to_le32(srq_context->idx_nxt_blk_addr);
roce_set_field(srq_context->rsv_idxnxtblkaddr,
SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
cpu_to_le32((mtts_idx[1]) >> (32 + PAGE_ADDR_SHIFT)));
roce_set_field(srq_context->byte_56_xrc_cqn,
SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
cqn);
roce_set_field(srq_context->byte_56_xrc_cqn,
SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
hr_dev->caps.srqwqe_ba_pg_sz);
roce_set_field(srq_context->byte_56_xrc_cqn,
SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
hr_dev->caps.srqwqe_buf_pg_sz);
roce_set_bit(srq_context->db_record_addr_record_en,
SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
}
static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
struct hns_roce_srq_context *srq_context;
struct hns_roce_srq_context *srqc_mask;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
if (srq_attr_mask & IB_SRQ_LIMIT) {
if (srq_attr->srq_limit >= srq->max)
return -EINVAL;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
srq_context = mailbox->buf;
srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
memset(srqc_mask, 0xff, sizeof(*srqc_mask));
roce_set_field(srq_context->byte_8_limit_wl,
SRQC_BYTE_8_SRQ_LIMIT_WL_M,
SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
roce_set_field(srqc_mask->byte_8_limit_wl,
SRQC_BYTE_8_SRQ_LIMIT_WL_M,
SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
HNS_ROCE_CMD_MODIFY_SRQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
dev_err(hr_dev->dev,
"MODIFY SRQ Failed to cmd mailbox.\n");
return ret;
}
}
return 0;
}
int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
struct hns_roce_srq_context *srq_context;
struct hns_roce_cmd_mailbox *mailbox;
int limit_wl;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
srq_context = mailbox->buf;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
HNS_ROCE_CMD_QUERY_SRQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
goto out;
}
limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
SRQC_BYTE_8_SRQ_LIMIT_WL_M,
SRQC_BYTE_8_SRQ_LIMIT_WL_S);
attr->srq_limit = limit_wl;
attr->max_wr = srq->max - 1;
attr->max_sge = srq->max_gs;
memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
static int find_empty_entry(struct hns_roce_idx_que *idx_que)
{
int bit_num;
int i;
/* bitmap[i] is set zero if all bits are allocated */
for (i = 0; idx_que->bitmap[i] == 0; ++i)
;
bit_num = ffs(idx_que->bitmap[i]);
idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1));
return i * sizeof(u64) * 8 + (bit_num - 1);
}
static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
int cur_idx, int wqe_idx)
{
unsigned int *addr;
addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
cur_idx * idx_que->entry_sz);
*addr = wqe_idx;
}
#ifdef CONFIG_KERNEL_419
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
#else
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
#endif
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_v2_db srq_db;
unsigned long flags;
int ret = 0;
int wqe_idx;
void *wqe;
int nreq;
int ind;
int i;
spin_lock_irqsave(&srq->lock, flags);
ind = srq->head & (srq->max - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(wr->num_sge > srq->max_gs)) {
ret = -EINVAL;
*bad_wr = wr;
break;
}
if (unlikely(srq->head == srq->tail)) {
ret = -ENOMEM;
*bad_wr = wr;
break;
}
wqe_idx = find_empty_entry(&srq->idx_que);
fill_idx_queue(&srq->idx_que, ind, wqe_idx);
wqe = get_srq_wqe(srq, wqe_idx);
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
for (i = 0; i < wr->num_sge; ++i) {
dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
}
if (i < srq->max_gs) {
dseg[i].len = 0;
dseg[i].lkey = cpu_to_le32(0x100);
dseg[i].addr = 0;
}
srq->wrid[wqe_idx] = wr->wr_id;
ind = (ind + 1) & (srq->max - 1);
}
if (likely(nreq)) {
srq->head += nreq;
/*
* Make sure that descriptors are written before
* doorbell record.
*/
wmb();
srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << 24 | srq->srqn;
srq_db.parameter = srq->head;
hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
}
spin_unlock_irqrestore(&srq->lock, flags);
return ret;
}
static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
.query_cqc_stat = hns_roce_v2_query_cqc_stat,
.query_cmd_stat = hns_roce_v2_query_cmd_stat,
.query_ceqc_stat = hns_roce_v2_query_ceqc_stat,
.query_aeqc_stat = hns_roce_v2_query_aeqc_stat,
.query_qpc_stat = hns_roce_v2_query_qpc_stat,
.query_srqc_stat = hns_roce_v2_query_srqc_stat,
.query_mpt_stat = hns_roce_v2_query_mpt_stat,
.query_pkt_stat = hns_roce_v2_query_pkt_stat,
.modify_eq = hns_roce_v2_modify_eq,
};
static const struct hns_roce_hw hns_roce_hw_v2 = { static const struct hns_roce_hw hns_roce_hw_v2 = {
.cmq_init = hns_roce_v2_cmq_init, .cmq_init = hns_roce_v2_cmq_init,
.cmq_exit = hns_roce_v2_cmq_exit, .cmq_exit = hns_roce_v2_cmq_exit,
...@@ -5183,16 +6495,20 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { ...@@ -5183,16 +6495,20 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.hw_exit = hns_roce_v2_exit, .hw_exit = hns_roce_v2_exit,
.post_mbox = hns_roce_v2_post_mbox, .post_mbox = hns_roce_v2_post_mbox,
.chk_mbox = hns_roce_v2_chk_mbox, .chk_mbox = hns_roce_v2_chk_mbox,
.rst_prc_mbox = hns_roce_v2_rst_process_cmd,
.set_gid = hns_roce_v2_set_gid, .set_gid = hns_roce_v2_set_gid,
.set_mac = hns_roce_v2_set_mac, .set_mac = hns_roce_v2_set_mac,
.write_mtpt = hns_roce_v2_write_mtpt, .write_mtpt = hns_roce_v2_write_mtpt,
.rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt, .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
.frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
.mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
.write_cqc = hns_roce_v2_write_cqc, .write_cqc = hns_roce_v2_write_cqc,
.set_hem = hns_roce_v2_set_hem, .set_hem = hns_roce_v2_set_hem,
.clear_hem = hns_roce_v2_clear_hem, .clear_hem = hns_roce_v2_clear_hem,
.modify_qp = hns_roce_v2_modify_qp, .modify_qp = hns_roce_v2_modify_qp,
.query_qp = hns_roce_v2_query_qp, .query_qp = hns_roce_v2_query_qp,
.destroy_qp = hns_roce_v2_destroy_qp, .destroy_qp = hns_roce_v2_destroy_qp,
.qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
.modify_cq = hns_roce_v2_modify_cq, .modify_cq = hns_roce_v2_modify_cq,
.post_send = hns_roce_v2_post_send, .post_send = hns_roce_v2_post_send,
.post_recv = hns_roce_v2_post_recv, .post_recv = hns_roce_v2_post_recv,
...@@ -5200,6 +6516,10 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { ...@@ -5200,6 +6516,10 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.poll_cq = hns_roce_v2_poll_cq, .poll_cq = hns_roce_v2_poll_cq,
.init_eq = hns_roce_v2_init_eq_table, .init_eq = hns_roce_v2_init_eq_table,
.cleanup_eq = hns_roce_v2_cleanup_eq_table, .cleanup_eq = hns_roce_v2_cleanup_eq_table,
.write_srqc = hns_roce_v2_write_srqc,
.modify_srq = hns_roce_v2_modify_srq,
.query_srq = hns_roce_v2_query_srq,
.post_srq_recv = hns_roce_v2_post_srq_recv,
}; };
static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = { static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
...@@ -5217,7 +6537,9 @@ MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl); ...@@ -5217,7 +6537,9 @@ MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle) struct hnae3_handle *handle)
{ {
struct hns_roce_v2_priv *priv = hr_dev->priv;
const struct pci_device_id *id; const struct pci_device_id *id;
int d;
int i; int i;
id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev); id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
...@@ -5227,6 +6549,7 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, ...@@ -5227,6 +6549,7 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
} }
hr_dev->hw = &hns_roce_hw_v2; hr_dev->hw = &hns_roce_hw_v2;
hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
hr_dev->odb_offset = hr_dev->sdb_offset; hr_dev->odb_offset = hr_dev->sdb_offset;
...@@ -5235,22 +6558,29 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, ...@@ -5235,22 +6558,29 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->caps.num_ports = 1; hr_dev->caps.num_ports = 1;
hr_dev->iboe.netdevs[0] = handle->rinfo.netdev; hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
hr_dev->iboe.phy_port[0] = 0; hr_dev->iboe.phy_port[0] = 0;
d = is_d;
addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid, addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
hr_dev->iboe.netdevs[0]->dev_addr); hr_dev->iboe.netdevs[0]->dev_addr);
for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++) for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM(d); i++)
hr_dev->irq[i] = pci_irq_vector(handle->pdev, hr_dev->irq[i] = pci_irq_vector(handle->pdev,
i + handle->rinfo.base_vector); i + handle->rinfo.base_vector);
snprintf(hr_dev->ib_dev.name, IB_DEVICE_NAME_MAX, "hns%s",
handle->rinfo.netdev->name);
/* cmd issue mode: 0 is poll, 1 is event */ /* cmd issue mode: 0 is poll, 1 is event */
hr_dev->cmd_mod = 1; hr_dev->cmd_mod = 1;
hr_dev->loop_idc = 0; hr_dev->loop_idc = loopback;
hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
priv->handle = handle;
return 0; return 0;
} }
static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
{ {
struct hns_roce_dev *hr_dev; struct hns_roce_dev *hr_dev;
int ret; int ret;
...@@ -5267,7 +6597,6 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) ...@@ -5267,7 +6597,6 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
hr_dev->pci_dev = handle->pdev; hr_dev->pci_dev = handle->pdev;
hr_dev->dev = &handle->pdev->dev; hr_dev->dev = &handle->pdev->dev;
handle->priv = hr_dev;
ret = hns_roce_hw_v2_get_cfg(hr_dev, handle); ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
if (ret) { if (ret) {
...@@ -5281,6 +6610,8 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) ...@@ -5281,6 +6610,8 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto error_failed_get_cfg; goto error_failed_get_cfg;
} }
handle->priv = hr_dev;
return 0; return 0;
error_failed_get_cfg: error_failed_get_cfg:
...@@ -5292,7 +6623,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) ...@@ -5292,7 +6623,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
return ret; return ret;
} }
static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
bool reset) bool reset)
{ {
struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv; struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
...@@ -5300,24 +6631,148 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, ...@@ -5300,24 +6631,148 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
if (!hr_dev) if (!hr_dev)
return; return;
handle->priv = NULL;
hns_roce_exit(hr_dev); hns_roce_exit(hr_dev);
kfree(hr_dev->priv); kfree(hr_dev->priv);
ib_dealloc_device(&hr_dev->ib_dev); ib_dealloc_device(&hr_dev->ib_dev);
} }
static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
{
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
struct hns_roce_dev *hr_dev;
unsigned long end;
int ret;
handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
goto head_chk_err;
}
ret = __hns_roce_hw_v2_init_instance(handle);
if (ret) {
handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
dev_err(&handle->pdev->dev,
"RoCE instance init failed! ret = %d\n", ret);
if (ops->ae_dev_resetting(handle) ||
ops->get_hw_reset_stat(handle))
goto head_chk_err;
else
return ret;
}
handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
hr_dev = (struct hns_roce_dev *)handle->priv;
if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle) ||
hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle)) {
handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
goto tail_chk_err;
}
return 0;
tail_chk_err:
/* Wait until software reset process finished, in order to ensure that
* reset process and this function will not call
* __hns_roce_hw_v2_uninit_instance at the same time.
* If a timeout occurs, it indicates that the network subsystem has
* encountered a serious error and cannot be recovered from the reset
* processing.
*/
end = msecs_to_jiffies(HNS_ROCE_V2_RST_PRC_MAX_TIME) + jiffies;
while (ops->ae_dev_resetting(handle) && time_before(jiffies, end))
msleep(20);
if (!ops->ae_dev_resetting(handle))
dev_warn(&handle->pdev->dev, "Device completed reset.\n");
else {
dev_warn(&handle->pdev->dev,
"Device is still resetting! timeout!\n");
WARN_ON(1);
}
__hns_roce_hw_v2_uninit_instance(handle, false);
handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
head_chk_err:
dev_err(&handle->pdev->dev, "Device is busy in resetting state.\n"
"please retry later.\n");
return -EBUSY;
}
static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
bool reset)
{
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long end;
if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
return;
handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
/* Check the status of the current software reset process, if in
* software reset process, wait until software reset process finished,
* in order to ensure that reset process and this function will not call
* __hns_roce_hw_v2_uninit_instance at the same time.
* If a timeout occurs, it indicates that the network subsystem has
* encountered a serious error and cannot be recovered from the reset
* processing.
*/
if (ops->ae_dev_resetting(handle)) {
dev_warn(&handle->pdev->dev,
"Device is busy in resetting state. waiting.\n");
end = msecs_to_jiffies(HNS_ROCE_V2_RST_PRC_MAX_TIME) + jiffies;
while (ops->ae_dev_resetting(handle) &&
time_before(jiffies, end))
msleep(20);
if (!ops->ae_dev_resetting(handle))
dev_warn(&handle->pdev->dev,
"Device completed reset.\n");
else {
dev_warn(&handle->pdev->dev,
"Device is still resetting! timeout!\n");
WARN_ON(1);
}
}
__hns_roce_hw_v2_uninit_instance(handle, reset);
handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
}
static void hns_roce_hw_v2_reset_notify_usr(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_uar *uar = (struct hns_roce_v2_uar *)priv->uar.buf;
uar->dis_db = HNS_ROCE_DISABLE_DB;
}
static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
{ {
struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv; struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
struct ib_event event; struct ib_event event;
if (!hr_dev) { if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
dev_err(&handle->pdev->dev, set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
"Input parameter handle->priv is NULL!\n"); return 0;
return -EINVAL;
} }
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
if (!hr_dev)
return 0;
hr_dev->active = false; hr_dev->active = false;
hr_dev->is_reset = true; hr_dev->dis_db = true;
hns_roce_hw_v2_reset_notify_usr(hr_dev);
event.event = IB_EVENT_DEVICE_FATAL; event.event = IB_EVENT_DEVICE_FATAL;
event.device = &hr_dev->ib_dev; event.device = &hr_dev->ib_dev;
...@@ -5331,7 +6786,16 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) ...@@ -5331,7 +6786,16 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
{ {
int ret; int ret;
ret = hns_roce_hw_v2_init_instance(handle); if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state)) {
clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
return 0;
}
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
ret = __hns_roce_hw_v2_init_instance(handle);
if (ret) { if (ret) {
/* when reset notify type is HNAE3_INIT_CLIENT In reset notify /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
* callback function, RoCE Engine reinitialize. If RoCE reinit * callback function, RoCE Engine reinitialize. If RoCE reinit
...@@ -5340,6 +6804,10 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) ...@@ -5340,6 +6804,10 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
handle->priv = NULL; handle->priv = NULL;
dev_err(&handle->pdev->dev, dev_err(&handle->pdev->dev,
"In reset process RoCE reinit failed %d.\n", ret); "In reset process RoCE reinit failed %d.\n", ret);
} else {
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
dev_info(&handle->pdev->dev,
"Reset done, RoCE client reinit finished.\n");
} }
return ret; return ret;
...@@ -5347,8 +6815,13 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) ...@@ -5347,8 +6815,13 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle) static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
{ {
if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
return 0;
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
msleep(100); msleep(100);
hns_roce_hw_v2_uninit_instance(handle, false); __hns_roce_hw_v2_uninit_instance(handle, false);
return 0; return 0;
} }
...@@ -5404,3 +6877,9 @@ MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); ...@@ -5404,3 +6877,9 @@ MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>"); MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver"); MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
module_param(loopback, int, 0444);
MODULE_PARM_DESC(loopback, "default: 0");
module_param(dcqcn, int, 0444);
MODULE_PARM_DESC(dcqcn, "default: 0");
module_param(is_d, int, 0444);
MODULE_PARM_DESC(is_d, "default: 0");
...@@ -35,32 +35,44 @@ ...@@ -35,32 +35,44 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#define HNS_ROCE_VF_QPC_BT_NUM 256 #define HNS_ROCE_VF_QPC_BT_NUM(d) (d ? (256) : (8))
#define HNS_ROCE_VF_SRQC_BT_NUM 64 #define HNS_ROCE_VF_SRQC_BT_NUM(d) (d ? (64) : (8))
#define HNS_ROCE_VF_CQC_BT_NUM 64 #define HNS_ROCE_VF_CQC_BT_NUM(d) (d ? (64) : (8))
#define HNS_ROCE_VF_MPT_BT_NUM 64 #define HNS_ROCE_VF_MPT_BT_NUM(d) (d ? (64) : (8))
#define HNS_ROCE_VF_EQC_NUM 64 #define HNS_ROCE_VF_EQC_NUM(d) (d ? (64) : (8))
#define HNS_ROCE_VF_SMAC_NUM 32 #define HNS_ROCE_VF_SMAC_NUM(d) (d ? (32) : (8))
#define HNS_ROCE_VF_SGID_NUM 32 #define HNS_ROCE_VF_SGID_NUM(d) (d ? (32) : (8))
#define HNS_ROCE_VF_SL_NUM 8 #define HNS_ROCE_VF_SL_NUM 8
#define HNS_ROCE_V2_MAX_QP_NUM 0x2000 #define HNS_ROCE_V2_MAX_QP_NUM 0x100000
#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_CQ_NUM 0x8000 #define HNS_ROCE_V2_MAX_SRQ 0x100000
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
#define HNS_ROCE_V2_MAX_SRQ_SGE 0x100
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000 #define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
#define HNS_ROCE_V2_MAX_SRQ_SGE_NUM 0x100
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20 #define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256 #define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1 #define HNS_ROCE_V2_PHY_UAR_NUM 1
#define HNS_ROCE_V2_MAX_IRQ_NUM 65 #define HNS_ROCE_V2_MAX_IRQ_NUM(d) (d ? (65) : (3))
#define HNS_ROCE_V2_COMP_VEC_NUM 63 #define HNS_ROCE_V2_COMP_VEC_NUM(d) (d ? (63) : (1))
#define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_AEQE_VEC_NUM 1
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000 #define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000 #define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
#define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000
#define HNS_ROCE_V2_MAX_QP_INIT_RDMA 128 #define HNS_ROCE_V2_MAX_QP_INIT_RDMA 128
#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128 #define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64 #define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
...@@ -70,20 +82,37 @@ ...@@ -70,20 +82,37 @@
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64 #define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48 #define HNS_ROCE_V2_TRRL_ENTRY_SZ 48
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64 #define HNS_ROCE_V2_CQC_ENTRY_SZ 64
#define HNS_ROCE_V2_SRQC_ENTRY_SZ 64
#define HNS_ROCE_V2_MTPT_ENTRY_SZ 64 #define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64 #define HNS_ROCE_V2_MTT_ENTRY_SZ 64
#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32 #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
#define HNS_ROCE_V2_SCC_CTX_ENTRY_SZ 32
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100 #define HNS_ROCE_INVALID_LKEY 0x100
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
/* Time out for hardware to complete reset */
#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
/* The longest time for software reset process in NIC subsystem, if a timeout
* occurs, it indicates that the network subsystem has encountered a serious
* error and cannot be recovered from the reset processing.
*/
#define HNS_ROCE_V2_RST_PRC_MAX_TIME 300000
#define HNS_ROCE_CONTEXT_HOP_NUM 1 #define HNS_ROCE_CONTEXT_HOP_NUM 1
#define HNS_ROCE_SCC_CTX_HOP_NUM 1
#define HNS_ROCE_MTT_HOP_NUM 1 #define HNS_ROCE_MTT_HOP_NUM 1
#define HNS_ROCE_CQE_HOP_NUM 1 #define HNS_ROCE_CQE_HOP_NUM 1
#define HNS_ROCE_SRQWQE_HOP_NUM 1
#define HNS_ROCE_PBL_HOP_NUM 2 #define HNS_ROCE_PBL_HOP_NUM 2
#define HNS_ROCE_EQE_HOP_NUM 2 #define HNS_ROCE_EQE_HOP_NUM 2
#define HNS_ROCE_IDX_HOP_NUM 1
#define HNS_ROCE_V2_GID_INDEX_NUM 256 #define HNS_ROCE_V2_GID_INDEX_NUM 256
...@@ -107,13 +136,38 @@ ...@@ -107,13 +136,38 @@
#define HNS_ROCE_CMQ_EN_B 16 #define HNS_ROCE_CMQ_EN_B 16
#define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B) #define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B)
#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 100
#define check_whether_last_step(hop_num, step_idx) \ #define check_whether_last_step(hop_num, step_idx) \
((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \ ((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
(step_idx == 1 && hop_num == 1) || \ (step_idx == 1 && hop_num == 1) || \
(step_idx == 2 && hop_num == 2)) (step_idx == 2 && hop_num == 2))
#define CMD_CSQ_DESC_NUM 1024 #define V2_QP_SUPPORT_STATE(cur_state, new_state) \
#define CMD_CRQ_DESC_NUM 1024 ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) || \
(cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) || \
(cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) || \
(cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) || \
(cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) || \
(cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) || \
(cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) || \
(cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) || \
(cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) || \
(cur_state == IB_QPS_SQD && new_state == IB_QPS_RESET) || \
(cur_state == IB_QPS_SQE && new_state == IB_QPS_RESET) || \
(cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) || \
(cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) || \
(cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) || \
(cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) || \
(cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) || \
(cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))
#define HNS_ICL_SWITCH_CMD_ROCEE_SEL_SHIFT 0
#define HNS_ICL_SWITCH_CMD_ROCEE_SEL BIT(HNS_ICL_SWITCH_CMD_ROCEE_SEL_SHIFT)
#define CMD_CSQ_DESC_NUM (1024)
#define CMD_CRQ_DESC_NUM (1024)
enum { enum {
NO_ARMED = 0x0, NO_ARMED = 0x0,
...@@ -208,9 +262,28 @@ enum hns_roce_opcode_type { ...@@ -208,9 +262,28 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401, HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403, HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404, HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
HNS_ROCE_OPC_QUERY_VF_NUM = 0x8407,
HNS_ROCE_OPC_CFG_SGID_TB = 0x8500, HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501, HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501,
HNS_ROCE_OPC_POST_MB = 0x8504,
HNS_ROCE_OPC_QUERY_MB_ST = 0x8505,
HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506, HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
HNS_ROCE_OPC_FUNC_CLEAR = 0x8508,
HNS_ROCE_OPC_SCC_CTX_CLR = 0x8509,
HNS_ROCE_OPC_QUERY_SCC_CTX = 0x850a,
HNS_ROCE_OPC_RESET_SCC_CTX = 0x850b,
HNS_QUERY_FW_VER = 0x0001,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
/* DFx command */
HNS_ROCE_OPC_CNT_SNAP = 0x8006,
HNS_ROCE_OPC_QUEYR_PKT_CNT = 0x8200,
HNS_ROCE_OPC_QUEYR_CQE_CNT = 0x8201,
HNS_ROCE_OPC_QUEYR_MBDB_CNT = 0x8202,
HNS_ROCE_OPC_QUEYR_CNP_RX_CNT = 0x8203,
HNS_ROCE_OPC_QUEYR_CNP_TX_CNT = 0x8204,
HNS_ROCE_OPC_QUEYR_MDB_DFX = 0x8300,
}; };
enum { enum {
...@@ -322,8 +395,93 @@ struct hns_roce_v2_cq_context { ...@@ -322,8 +395,93 @@ struct hns_roce_v2_cq_context {
#define V2_CQC_BYTE_64_SE_CQE_IDX_S 0 #define V2_CQC_BYTE_64_SE_CQE_IDX_S 0
#define V2_CQC_BYTE_64_SE_CQE_IDX_M GENMASK(23, 0) #define V2_CQC_BYTE_64_SE_CQE_IDX_M GENMASK(23, 0)
struct hns_roce_srq_context {
__le32 byte_4_srqn_srqst;
__le32 byte_8_limit_wl;
__le32 byte_12_xrcd;
__le32 byte_16_pi_ci;
__le32 wqe_bt_ba;
__le32 byte_24_wqe_bt_ba;
__le32 byte_28_rqws_pd;
__le32 idx_bt_ba;
__le32 rsv_idx_bt_ba;
__le32 idx_cur_blk_addr;
__le32 byte_44_idxbufpgsz_addr;
__le32 idx_nxt_blk_addr;
__le32 rsv_idxnxtblkaddr;
__le32 byte_56_xrc_cqn;
__le32 db_record_addr_record_en;
__le32 db_record_addr;
};
#define SRQC_BYTE_4_SRQ_ST_S 0
#define SRQC_BYTE_4_SRQ_ST_M GENMASK(1, 0)
#define SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S 2
#define SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M GENMASK(3, 2)
#define SRQC_BYTE_4_SRQ_SHIFT_S 4
#define SRQC_BYTE_4_SRQ_SHIFT_M GENMASK(7, 4)
#define SRQC_BYTE_4_SRQN_S 8
#define SRQC_BYTE_4_SRQN_M GENMASK(31, 8)
#define SRQC_BYTE_8_SRQ_LIMIT_WL_S 0
#define SRQC_BYTE_8_SRQ_LIMIT_WL_M GENMASK(15, 0)
#define SRQC_BYTE_12_SRQ_XRCD_S 0
#define SRQC_BYTE_12_SRQ_XRCD_M GENMASK(23, 0)
#define SRQC_BYTE_16_SRQ_PRODUCER_IDX_S 0
#define SRQC_BYTE_16_SRQ_PRODUCER_IDX_M GENMASK(15, 0)
#define SRQC_BYTE_16_SRQ_CONSUMER_IDX_S 0
#define SRQC_BYTE_16_SRQ_CONSUMER_IDX_M GENMASK(31, 16)
#define SRQC_BYTE_24_SRQ_WQE_BT_BA_S 0
#define SRQC_BYTE_24_SRQ_WQE_BT_BA_M GENMASK(28, 0)
#define SRQC_BYTE_28_PD_S 0
#define SRQC_BYTE_28_PD_M GENMASK(23, 0)
#define SRQC_BYTE_28_RQWS_S 24
#define SRQC_BYTE_28_RQWS_M GENMASK(27, 24)
#define SRQC_BYTE_36_SRQ_IDX_BT_BA_S 0
#define SRQC_BYTE_36_SRQ_IDX_BT_BA_M GENMASK(28, 0)
#define SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S 0
#define SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M GENMASK(19, 0)
#define SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S 22
#define SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M GENMASK(23, 22)
#define SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S 24
#define SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M GENMASK(27, 24)
#define SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S 28
#define SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M GENMASK(31, 28)
#define SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S 0
#define SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M GENMASK(19, 0)
#define SRQC_BYTE_56_SRQ_XRC_CQN_S 0
#define SRQC_BYTE_56_SRQ_XRC_CQN_M GENMASK(23, 0)
#define SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S 24
#define SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M GENMASK(27, 24)
#define SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S 28
#define SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M GENMASK(31, 28)
#define SRQC_BYTE_60_SRQ_RECORD_EN_S 0
#define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_S 1
#define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_M GENMASK(31, 1)
enum{ enum{
V2_MPT_ST_VALID = 0x1, V2_MPT_ST_VALID = 0x1,
V2_MPT_ST_FREE = 0x2,
}; };
enum hns_roce_v2_qp_state { enum hns_roce_v2_qp_state {
...@@ -331,8 +489,8 @@ enum hns_roce_v2_qp_state { ...@@ -331,8 +489,8 @@ enum hns_roce_v2_qp_state {
HNS_ROCE_QP_ST_INIT, HNS_ROCE_QP_ST_INIT,
HNS_ROCE_QP_ST_RTR, HNS_ROCE_QP_ST_RTR,
HNS_ROCE_QP_ST_RTS, HNS_ROCE_QP_ST_RTS,
HNS_ROCE_QP_ST_SQER,
HNS_ROCE_QP_ST_SQD, HNS_ROCE_QP_ST_SQD,
HNS_ROCE_QP_ST_SQER,
HNS_ROCE_QP_ST_ERR, HNS_ROCE_QP_ST_ERR,
HNS_ROCE_QP_ST_SQ_DRAINING, HNS_ROCE_QP_ST_SQ_DRAINING,
HNS_ROCE_QP_NUM_ST HNS_ROCE_QP_NUM_ST
...@@ -350,7 +508,7 @@ struct hns_roce_v2_qp_context { ...@@ -350,7 +508,7 @@ struct hns_roce_v2_qp_context {
__le32 dmac; __le32 dmac;
__le32 byte_52_udpspn_dmac; __le32 byte_52_udpspn_dmac;
__le32 byte_56_dqpn_err; __le32 byte_56_dqpn_err;
__le32 byte_60_qpst_mapid; __le32 byte_60_qpst_tempid;
__le32 qkey_xrcd; __le32 qkey_xrcd;
__le32 byte_68_rq_db; __le32 byte_68_rq_db;
__le32 rq_db_record_addr; __le32 rq_db_record_addr;
...@@ -492,26 +650,15 @@ struct hns_roce_v2_qp_context { ...@@ -492,26 +650,15 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28 #define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28) #define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
#define V2_QPC_BYTE_60_MAPID_S 0 #define V2_QPC_BYTE_60_TEMPID_S 0
#define V2_QPC_BYTE_60_MAPID_M GENMASK(12, 0) #define V2_QPC_BYTE_60_TEMPID_M GENMASK(7, 0)
#define V2_QPC_BYTE_60_INNER_MAP_IND_S 13 #define V2_QPC_BYTE_60_SCC_TOKEN_S 8
#define V2_QPC_BYTE_60_SCC_TOKEN_M GENMASK(26, 8)
#define V2_QPC_BYTE_60_SQ_MAP_IND_S 14 #define V2_QPC_BYTE_60_SQ_DB_DOING_S 27
#define V2_QPC_BYTE_60_RQ_MAP_IND_S 15 #define V2_QPC_BYTE_60_RQ_DB_DOING_S 28
#define V2_QPC_BYTE_60_TEMPID_S 16
#define V2_QPC_BYTE_60_TEMPID_M GENMASK(22, 16)
#define V2_QPC_BYTE_60_EXT_MAP_IND_S 23
#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S 24
#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M GENMASK(26, 24)
#define V2_QPC_BYTE_60_SQ_RLS_IND_S 27
#define V2_QPC_BYTE_60_SQ_EXT_IND_S 28
#define V2_QPC_BYTE_60_QP_ST_S 29 #define V2_QPC_BYTE_60_QP_ST_S 29
#define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29) #define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29)
...@@ -534,6 +681,7 @@ struct hns_roce_v2_qp_context { ...@@ -534,6 +681,7 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_76_RQIE_S 28 #define V2_QPC_BYTE_76_RQIE_S 28
#define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30
#define V2_QPC_BYTE_80_RX_CQN_S 0 #define V2_QPC_BYTE_80_RX_CQN_S 0
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0) #define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
...@@ -588,7 +736,7 @@ struct hns_roce_v2_qp_context { ...@@ -588,7 +736,7 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_140_RR_MAX_S 12 #define V2_QPC_BYTE_140_RR_MAX_S 12
#define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12) #define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12)
#define V2_QPC_BYTE_140_RSVD_RAQ_MAP_S 15 #define V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S 15
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16 #define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16) #define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16)
...@@ -599,8 +747,6 @@ struct hns_roce_v2_qp_context { ...@@ -599,8 +747,6 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0 #define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0) #define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S 24
#define V2_QPC_BYTE_144_RAQ_CREDIT_S 25 #define V2_QPC_BYTE_144_RAQ_CREDIT_S 25
#define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25) #define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25)
...@@ -612,8 +758,8 @@ struct hns_roce_v2_qp_context { ...@@ -612,8 +758,8 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24 #define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24
#define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24) #define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24)
#define V2_QPC_BYTE_152_RAQ_PSN_S 8 #define V2_QPC_BYTE_152_RAQ_PSN_S 0
#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8) #define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24 #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24
#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24) #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24)
...@@ -637,9 +783,10 @@ struct hns_roce_v2_qp_context { ...@@ -637,9 +783,10 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_168_LP_SGEN_INI_S 22 #define V2_QPC_BYTE_168_LP_SGEN_INI_S 22
#define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 22) #define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 22)
#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_S 24 #define V2_QPC_BYTE_168_SQ_VLAN_EN_S 24
#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_M GENMASK(27, 24) #define V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S 25
#define V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S 26
#define V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S 27
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28 #define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28) #define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28)
...@@ -725,6 +872,10 @@ struct hns_roce_v2_qp_context { ...@@ -725,6 +872,10 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20 #define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20) #define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20)
#define V2_QPC_BYTE_232_SO_LP_VLD_S 29
#define V2_QPC_BYTE_232_FENCE_LP_VLD_S 30
#define V2_QPC_BYTE_232_IRRL_LP_VLD_S 31
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0 #define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0) #define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0)
...@@ -743,6 +894,9 @@ struct hns_roce_v2_qp_context { ...@@ -743,6 +894,9 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_244_RNR_CNT_S 27 #define V2_QPC_BYTE_244_RNR_CNT_S 27
#define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27) #define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27)
#define V2_QPC_BYTE_244_LCL_OP_FLG_S 30
#define V2_QPC_BYTE_244_IRRL_RD_FLG_S 31
#define V2_QPC_BYTE_248_IRRL_PSN_S 0 #define V2_QPC_BYTE_248_IRRL_PSN_S 0
#define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0) #define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0)
...@@ -818,6 +972,11 @@ struct hns_roce_v2_cqe { ...@@ -818,6 +972,11 @@ struct hns_roce_v2_cqe {
#define V2_CQE_BYTE_28_PORT_TYPE_S 16 #define V2_CQE_BYTE_28_PORT_TYPE_S 16
#define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16) #define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16)
#define V2_CQE_BYTE_28_VID_S 18
#define V2_CQE_BYTE_28_VID_M GENMASK(29, 18)
#define V2_CQE_BYTE_28_VID_VLD_S 30
#define V2_CQE_BYTE_32_RMT_QPN_S 0 #define V2_CQE_BYTE_32_RMT_QPN_S 0
#define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0) #define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0)
...@@ -878,8 +1037,19 @@ struct hns_roce_v2_mpt_entry { ...@@ -878,8 +1037,19 @@ struct hns_roce_v2_mpt_entry {
#define V2_MPT_BYTE_8_LW_EN_S 7 #define V2_MPT_BYTE_8_LW_EN_S 7
#define V2_MPT_BYTE_8_MW_CNT_S 8
#define V2_MPT_BYTE_8_MW_CNT_M GENMASK(31, 8)
#define V2_MPT_BYTE_12_FRE_S 0
#define V2_MPT_BYTE_12_PA_S 1 #define V2_MPT_BYTE_12_PA_S 1
#define V2_MPT_BYTE_12_MR_MW_S 4
#define V2_MPT_BYTE_12_BPD_S 5
#define V2_MPT_BYTE_12_BQP_S 6
#define V2_MPT_BYTE_12_INNER_PA_VLD_S 7 #define V2_MPT_BYTE_12_INNER_PA_VLD_S 7
#define V2_MPT_BYTE_12_MW_BIND_QPN_S 8 #define V2_MPT_BYTE_12_MW_BIND_QPN_S 8
...@@ -988,6 +1158,8 @@ struct hns_roce_v2_ud_send_wqe { ...@@ -988,6 +1158,8 @@ struct hns_roce_v2_ud_send_wqe {
#define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24 #define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24
#define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24) #define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24)
#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30
#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31 #define V2_UD_SEND_WQE_BYTE_40_LBI_S 31
#define V2_UD_SEND_WQE_DMAC_0_S 0 #define V2_UD_SEND_WQE_DMAC_0_S 0
...@@ -1042,6 +1214,16 @@ struct hns_roce_v2_rc_send_wqe { ...@@ -1042,6 +1214,16 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12 #define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
#define V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S 19
#define V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S 20
#define V2_RC_FRMR_WQE_BYTE_4_RR_S 21
#define V2_RC_FRMR_WQE_BYTE_4_RW_S 22
#define V2_RC_FRMR_WQE_BYTE_4_LW_S 23
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0 #define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0) #define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0)
...@@ -1051,6 +1233,16 @@ struct hns_roce_v2_rc_send_wqe { ...@@ -1051,6 +1233,16 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0 #define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0) #define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
struct hns_roce_wqe_frmr_seg {
__le32 pbl_size;
__le32 mode_buf_pg_sz;
};
#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S 4
#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M GENMASK(7, 4)
#define V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S 8
struct hns_roce_v2_wqe_data_seg { struct hns_roce_v2_wqe_data_seg {
__le32 len; __le32 len;
__le32 lkey; __le32 lkey;
...@@ -1068,6 +1260,29 @@ struct hns_roce_query_version { ...@@ -1068,6 +1260,29 @@ struct hns_roce_query_version {
__le32 rsv[5]; __le32 rsv[5];
}; };
struct hns_roce_query_fw_info {
__le32 fw_ver;
__le32 rsv[5];
};
struct hns_roce_func_clear {
__le32 rst_funcid_en;
__le32 func_done;
__le32 rsv[4];
};
struct hns_roce_pf_func_num {
__le32 pf_own_func_num;
__le32 func_done;
__le32 rsv[4];
};
#define FUNC_CLEAR_RST_FUN_EN_S 8
#define FUNC_CLEAR_RST_FUN_DONE_S 0
#define HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS (512 * 100)
struct hns_roce_cfg_llm_a { struct hns_roce_cfg_llm_a {
__le32 base_addr_l; __le32 base_addr_l;
__le32 base_addr_h; __le32 base_addr_h;
...@@ -1157,7 +1372,8 @@ struct hns_roce_pf_res_b { ...@@ -1157,7 +1372,8 @@ struct hns_roce_pf_res_b {
__le32 smac_idx_num; __le32 smac_idx_num;
__le32 sgid_idx_num; __le32 sgid_idx_num;
__le32 qid_idx_sl_num; __le32 qid_idx_sl_num;
__le32 rsv[2]; __le32 scc_ctx_bt_idx_num;
__le32 rsv;
}; };
#define PF_RES_DATA_1_PF_SMAC_IDX_S 0 #define PF_RES_DATA_1_PF_SMAC_IDX_S 0
...@@ -1178,6 +1394,31 @@ struct hns_roce_pf_res_b { ...@@ -1178,6 +1394,31 @@ struct hns_roce_pf_res_b {
#define PF_RES_DATA_3_PF_SL_NUM_S 16 #define PF_RES_DATA_3_PF_SL_NUM_S 16
#define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16) #define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16)
#define PF_RES_DATA_4_PF_SCC_CTX_BT_IDX_S 0
#define PF_RES_DATA_4_PF_SCC_CTX_BT_IDX_M GENMASK(8, 0)
#define PF_RES_DATA_4_PF_SCC_CTX_BT_NUM_S 9
#define PF_RES_DATA_4_PF_SCC_CTX_BT_NUM_M GENMASK(17, 9)
struct hns_roce_pf_timer_res_a {
__le32 rsv0;
__le32 qpc_timer_bt_idx_num;
__le32 cqc_timer_bt_idx_num;
__le32 rsv[3];
};
#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_S 0
#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_M GENMASK(11, 0)
#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S 16
#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M GENMASK(28, 16)
#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_S 0
#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_M GENMASK(10, 0)
#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S 16
#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M GENMASK(27, 16)
struct hns_roce_vf_res_a { struct hns_roce_vf_res_a {
__le32 vf_id; __le32 vf_id;
__le32 vf_qpc_bt_idx_num; __le32 vf_qpc_bt_idx_num;
...@@ -1246,12 +1487,43 @@ struct hns_roce_vf_res_b { ...@@ -1246,12 +1487,43 @@ struct hns_roce_vf_res_b {
#define VF_RES_B_DATA_3_VF_SL_NUM_S 16 #define VF_RES_B_DATA_3_VF_SL_NUM_S 16
#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16) #define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
struct hns_roce_vf_switch {
__le32 rocee_sel;
__le32 fun_id;
__le32 cfg;
__le32 resv1;
__le32 resv2;
__le32 resv3;
};
#define VF_SWITCH_DATA_FUN_ID_VF_ID_S 3
#define VF_SWITCH_DATA_FUN_ID_VF_ID_M GENMASK(10, 3)
#define VF_SWITCH_DATA_CFG_ALW_LPBK_S 1
#define VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S 2
#define VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S 3
struct hns_roce_post_mbox {
__le32 in_param_l;
__le32 in_param_h;
__le32 out_param_l;
__le32 out_param_h;
__le32 cmd_tag;
__le32 token_event_en;
};
struct hns_roce_mbox_status {
__le32 mb_status_hw_run;
__le32 rsv[5];
};
struct hns_roce_cfg_bt_attr { struct hns_roce_cfg_bt_attr {
__le32 vf_qpc_cfg; __le32 vf_qpc_cfg;
__le32 vf_srqc_cfg; __le32 vf_srqc_cfg;
__le32 vf_cqc_cfg; __le32 vf_cqc_cfg;
__le32 vf_mpt_cfg; __le32 vf_mpt_cfg;
__le32 rsv[2]; __le32 vf_scc_ctx_cfg;
__le32 rsv;
}; };
#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0 #define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
...@@ -1290,6 +1562,15 @@ struct hns_roce_cfg_bt_attr { ...@@ -1290,6 +1562,15 @@ struct hns_roce_cfg_bt_attr {
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8 #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8) #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BA_PGSZ_S 0
#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BA_PGSZ_M GENMASK(3, 0)
#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BUF_PGSZ_S 4
#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BUF_PGSZ_M GENMASK(7, 4)
#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_HOPNUM_S 8
#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_HOPNUM_M GENMASK(9, 8)
struct hns_roce_cfg_sgid_tb { struct hns_roce_cfg_sgid_tb {
__le32 table_idx_rsv; __le32 table_idx_rsv;
__le32 vf_sgid_l; __le32 vf_sgid_l;
...@@ -1329,18 +1610,6 @@ struct hns_roce_cmq_desc { ...@@ -1329,18 +1610,6 @@ struct hns_roce_cmq_desc {
#define HNS_ROCE_HW_RUN_BIT_SHIFT 31 #define HNS_ROCE_HW_RUN_BIT_SHIFT 31
#define HNS_ROCE_HW_MB_STATUS_MASK 0xFF #define HNS_ROCE_HW_MB_STATUS_MASK 0xFF
#define HNS_ROCE_VF_MB4_TAG_MASK 0xFFFFFF00
#define HNS_ROCE_VF_MB4_TAG_SHIFT 8
#define HNS_ROCE_VF_MB4_CMD_MASK 0xFF
#define HNS_ROCE_VF_MB4_CMD_SHIFT 0
#define HNS_ROCE_VF_MB5_EVENT_MASK 0x10000
#define HNS_ROCE_VF_MB5_EVENT_SHIFT 16
#define HNS_ROCE_VF_MB5_TOKEN_MASK 0xFFFF
#define HNS_ROCE_VF_MB5_TOKEN_SHIFT 0
struct hns_roce_v2_cmq_ring { struct hns_roce_v2_cmq_ring {
dma_addr_t desc_dma_addr; dma_addr_t desc_dma_addr;
struct hns_roce_cmq_desc *desc; struct hns_roce_cmq_desc *desc;
...@@ -1384,10 +1653,18 @@ struct hns_roce_link_table_entry { ...@@ -1384,10 +1653,18 @@ struct hns_roce_link_table_entry {
#define HNS_ROCE_LINK_TABLE_NXT_PTR_S 20 #define HNS_ROCE_LINK_TABLE_NXT_PTR_S 20
#define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20) #define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20)
#define HNS_ROCE_V2_UAR_BUF_SIZE 4096
struct hns_roce_v2_uar {
u32 dis_db;
};
struct hns_roce_v2_priv { struct hns_roce_v2_priv {
struct hnae3_handle *handle;
struct hns_roce_v2_cmq cmq; struct hns_roce_v2_cmq cmq;
struct hns_roce_link_table tsq; struct hns_roce_link_table tsq;
struct hns_roce_link_table tpq; struct hns_roce_link_table tpq;
struct hns_roce_buf_list uar;
}; };
struct hns_roce_eq_context { struct hns_roce_eq_context {
...@@ -1435,8 +1712,8 @@ struct hns_roce_eq_context { ...@@ -1435,8 +1712,8 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_CEQ_CEQE_OWNER_S 31 #define HNS_ROCE_V2_CEQ_CEQE_OWNER_S 31
#define HNS_ROCE_V2_AEQ_AEQE_OWNER_S 31 #define HNS_ROCE_V2_AEQ_AEQE_OWNER_S 31
#define HNS_ROCE_V2_COMP_EQE_NUM 0x1000 #define HNS_ROCE_V2_COMP_EQE_NUM 0x100000
#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000 #define HNS_ROCE_V2_ASYNC_EQE_NUM 0x100000
#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0 #define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
#define HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S 1 #define HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S 1
...@@ -1564,4 +1841,103 @@ struct hns_roce_eq_context { ...@@ -1564,4 +1841,103 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0 #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0) #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
struct hns_roce_wqe_atomic_seg {
__le64 fetchadd_swap_data;
__le64 cmp_data;
};
struct hns_roce_query_mbdb_cnt {
__le32 mailbox_issue_cnt;
__le32 mailbox_exe_cnt;
__le32 doorbell_issue_cnt;
__le32 doorbell_exe_cnt;
__le32 eq_doorbell_issue_cnt;
__le32 eq_doorbell_exe_cnt;
};
struct rdfx_cnt_snap {
__le32 data_0;
__le32 rsv[5];
};
struct rdfx_query_pkt_cnt {
__le32 rc_pkt_num;
__le32 uc_pkt_num;
__le32 ud_pkt_num;
__le32 xrc_pkt_num;
__le32 total_pkt_num;
__le32 error_pkt_num;
};
struct rdfx_query_cqe_cnt {
__le32 port0_cqe;
__le32 port1_cqe;
__le32 port2_cqe;
__le32 port3_cqe;
__le32 rsv[2];
};
struct rdfx_query_cnp_rx_cnt {
__le32 port0_cnp_rx;
__le32 port1_cnp_rx;
__le32 port2_cnp_rx;
__le32 port3_cnp_rx;
__le32 rsv[2];
};
struct rdfx_query_cnp_tx_cnt {
__le32 port0_cnp_tx;
__le32 port1_cnp_tx;
__le32 port2_cnp_tx;
__le32 port3_cnp_tx;
__le32 rsv[2];
};
int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int hns_roce_v2_query_srqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int hns_roce_v2_query_qpc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int hns_roce_v2_query_aeqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int hns_roce_v2_query_pkt_stat(struct hns_roce_dev *hr_dev,
char *buf, int *buff_size);
int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int hns_roce_v2_query_cmd_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int hns_roce_v2_modify_eq(struct hns_roce_dev *hr_dev,
u16 eq_count, u16 eq_period, u16 type);
void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
enum hns_roce_opcode_type opcode,
bool is_read);
int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num);
#define HNS_ROCE_V2_SCC_CTX_DONE_S 0
struct hns_roce_scc_ctx_clr {
__le32 rocee_scc_ctx_clr_qpn;
__le32 rsv[5];
};
struct hns_roce_scc_ctx_clr_done {
__le32 rocee_scc_ctx_clr_done;
__le32 rsv[5];
};
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
hns_roce_write64_k(val, dest);
}
#endif #endif
...@@ -30,9 +30,22 @@ ...@@ -30,9 +30,22 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#include "roce_k_compat.h"
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched.h>
#ifdef HAVE_LINUX_MM_H
#include <linux/mm.h>
#else
#include <linux/sched/mm.h>
#endif
#ifdef HAVE_LINUX_SCHED_H
#include <linux/sched.h>
#else
#include <linux/sched/task.h>
#endif
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
...@@ -74,20 +87,33 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr) ...@@ -74,20 +87,33 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
return hr_dev->hw->set_mac(hr_dev, phy_port, addr); return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
} }
#ifdef CONFIG_NEW_KERNEL
#ifdef CONFIG_KERNEL_419
static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context) static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
#else
static int hns_roce_add_gid(const union ib_gid *gid,
const struct ib_gid_attr *attr, void **context)
#endif
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
u8 port = attr->port_num - 1; u8 port = attr->port_num - 1;
unsigned long flags; unsigned long flags;
int ret; int ret;
if (port >= hr_dev->caps.num_ports) if (port >= hr_dev->caps.num_ports ||
attr->index > hr_dev->caps.gid_table_len[port]) {
dev_err(hr_dev->dev, "add gid failed. port - %d, index - %d\n",
port, attr->index);
return -EINVAL; return -EINVAL;
}
spin_lock_irqsave(&hr_dev->iboe.lock, flags); spin_lock_irqsave(&hr_dev->iboe.lock, flags);
#ifdef CONFIG_KERNEL_419
ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr); ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
#else
ret = hr_dev->hw->set_gid(hr_dev, port, attr->index,
(union ib_gid *)gid, attr);
#endif
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
return ret; return ret;
...@@ -112,6 +138,55 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context) ...@@ -112,6 +138,55 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
return ret; return ret;
} }
#else
static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(device);
u8 port = port_num - 1;
unsigned long flags;
int ret;
if (port >= hr_dev->caps.num_ports ||
index > hr_dev->caps.gid_table_len[port]) {
dev_err(hr_dev->dev, "add gid failed. port - %d, index - %d\n",
port, index);
return -EINVAL;
}
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
ret = hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid,
attr);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
return ret;
}
static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
unsigned int index, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(device);
struct ib_gid_attr zattr = { };
union ib_gid zgid = { {0} };
u8 port = port_num - 1;
unsigned long flags;
int ret;
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
ret = hr_dev->hw->set_gid(hr_dev, port, index, &zgid, &zattr);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
return ret;
}
#endif
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
unsigned long event) unsigned long event)
...@@ -196,6 +271,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev, ...@@ -196,6 +271,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
memset(props, 0, sizeof(*props)); memset(props, 0, sizeof(*props));
props->fw_ver = hr_dev->caps.fw_ver;
props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid); props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
props->max_mr_size = (u64)(~(0ULL)); props->max_mr_size = (u64)(~(0ULL));
props->page_size_cap = hr_dev->caps.page_size_cap; props->page_size_cap = hr_dev->caps.page_size_cap;
...@@ -206,8 +282,14 @@ static int hns_roce_query_device(struct ib_device *ib_dev, ...@@ -206,8 +282,14 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_qp_wr = hr_dev->caps.max_wqes; props->max_qp_wr = hr_dev->caps.max_wqes;
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_RC_RNR_NAK_GEN; IB_DEVICE_RC_RNR_NAK_GEN;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
props->device_cap_flags |= IB_DEVICE_XRC;
#ifdef CONFIG_KERNEL_419
props->max_send_sge = hr_dev->caps.max_sq_sg; props->max_send_sge = hr_dev->caps.max_sq_sg;
props->max_recv_sge = hr_dev->caps.max_rq_sg; props->max_recv_sge = hr_dev->caps.max_rq_sg;
#else
props->max_sge = min(hr_dev->caps.max_sq_sg, hr_dev->caps.max_rq_sg);
#endif
props->max_sge_rd = 1; props->max_sge_rd = 1;
props->max_cq = hr_dev->caps.num_cqs; props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes; props->max_cqe = hr_dev->caps.max_cqes;
...@@ -215,10 +297,26 @@ static int hns_roce_query_device(struct ib_device *ib_dev, ...@@ -215,10 +297,26 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_pd = hr_dev->caps.num_pds; props->max_pd = hr_dev->caps.num_pds;
props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma; props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma; props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
props->atomic_cap = IB_ATOMIC_NONE; props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->max_pkeys = 1; props->max_pkeys = 1;
props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay; props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
props->max_srq = hr_dev->caps.max_srqs;
props->max_srq_wr = hr_dev->caps.max_srq_wrs;
props->max_srq_sge = hr_dev->caps.max_srq_sges;
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
props->max_mw = hr_dev->caps.num_mtpts;
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
IB_DEVICE_MEM_WINDOW_TYPE_2B;
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
return 0; return 0;
} }
...@@ -292,6 +390,12 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, ...@@ -292,6 +390,12 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
return IB_LINK_LAYER_ETHERNET; return IB_LINK_LAYER_ETHERNET;
} }
static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
union ib_gid *gid)
{
return 0;
}
static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index, static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
u16 *pkey) u16 *pkey)
{ {
...@@ -434,12 +538,11 @@ static int hns_roce_mmap(struct ib_ucontext *context, ...@@ -434,12 +538,11 @@ static int hns_roce_mmap(struct ib_ucontext *context,
to_hr_ucontext(context)->uar.pfn, to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot)) PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN; return -EAGAIN;
} else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr && } else if (vma->vm_pgoff == 1 && hr_dev->uar2_dma_addr &&
hr_dev->tptr_size) { hr_dev->uar2_size) {
/* vm_pgoff: 1 -- TPTR */
if (io_remap_pfn_range(vma, vma->vm_start, if (io_remap_pfn_range(vma, vma->vm_start,
hr_dev->tptr_dma_addr >> PAGE_SHIFT, hr_dev->uar2_dma_addr >> PAGE_SHIFT,
hr_dev->tptr_size, hr_dev->uar2_size,
vma->vm_page_prot)) vma->vm_page_prot))
return -EAGAIN; return -EAGAIN;
} else } else
...@@ -508,7 +611,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -508,7 +611,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
spin_lock_init(&iboe->lock); spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev; ib_dev = &hr_dev->ib_dev;
strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX); if (!strlen(ib_dev->name))
strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
ib_dev->owner = THIS_MODULE; ib_dev->owner = THIS_MODULE;
ib_dev->node_type = RDMA_NODE_IB_CA; ib_dev->node_type = RDMA_NODE_IB_CA;
...@@ -532,11 +636,18 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -532,11 +636,18 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
(1ULL << IB_USER_VERBS_CMD_CREATE_QP) | (1ULL << IB_USER_VERBS_CMD_CREATE_QP) |
(1ULL << IB_USER_VERBS_CMD_MODIFY_QP) | (1ULL << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ULL << IB_USER_VERBS_CMD_QUERY_QP) | (1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
(1ULL << IB_USER_VERBS_CMD_DESTROY_QP); (1ULL << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) |
(1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) |
(1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) |
(1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV) |
(1ULL << IB_USER_VERBS_CMD_CREATE_XSRQ);
#ifdef MODIFY_CQ_MASK
ib_dev->uverbs_ex_cmd_mask |= ib_dev->uverbs_ex_cmd_mask |=
(1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ); (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
#endif
/* HCA||device||port */ /* HCA||device||port */
ib_dev->modify_device = hns_roce_modify_device; ib_dev->modify_device = hns_roce_modify_device;
ib_dev->query_device = hns_roce_query_device; ib_dev->query_device = hns_roce_query_device;
...@@ -544,6 +655,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -544,6 +655,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->modify_port = hns_roce_modify_port; ib_dev->modify_port = hns_roce_modify_port;
ib_dev->get_link_layer = hns_roce_get_link_layer; ib_dev->get_link_layer = hns_roce_get_link_layer;
ib_dev->get_netdev = hns_roce_get_netdev; ib_dev->get_netdev = hns_roce_get_netdev;
ib_dev->query_gid = hns_roce_query_gid;
ib_dev->add_gid = hns_roce_add_gid; ib_dev->add_gid = hns_roce_add_gid;
ib_dev->del_gid = hns_roce_del_gid; ib_dev->del_gid = hns_roce_del_gid;
ib_dev->query_pkey = hns_roce_query_pkey; ib_dev->query_pkey = hns_roce_query_pkey;
...@@ -559,6 +671,12 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -559,6 +671,12 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->create_ah = hns_roce_create_ah; ib_dev->create_ah = hns_roce_create_ah;
ib_dev->query_ah = hns_roce_query_ah; ib_dev->query_ah = hns_roce_query_ah;
ib_dev->destroy_ah = hns_roce_destroy_ah; ib_dev->destroy_ah = hns_roce_destroy_ah;
/* SRQ */
ib_dev->create_srq = hns_roce_create_srq;
ib_dev->modify_srq = hr_dev->hw->modify_srq;
ib_dev->query_srq = hr_dev->hw->query_srq;
ib_dev->destroy_srq = hns_roce_destroy_srq;
ib_dev->post_srq_recv = hr_dev->hw->post_srq_recv;
/* QP */ /* QP */
ib_dev->create_qp = hns_roce_create_qp; ib_dev->create_qp = hns_roce_create_qp;
...@@ -584,11 +702,36 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -584,11 +702,36 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR); ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
} }
/* MW */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
ib_dev->alloc_mw = hns_roce_alloc_mw;
ib_dev->dealloc_mw = hns_roce_dealloc_mw;
ib_dev->uverbs_cmd_mask |=
(1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
(1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
}
/* FRMR */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
ib_dev->alloc_mr = hns_roce_alloc_mr;
ib_dev->map_mr_sg = hns_roce_map_mr_sg;
}
/* OTHERS */ /* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable; ib_dev->get_port_immutable = hns_roce_port_immutable;
ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext; ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
ib_dev->alloc_xrcd = hns_roce_ib_alloc_xrcd;
ib_dev->dealloc_xrcd = hns_roce_ib_dealloc_xrcd;
ib_dev->uverbs_cmd_mask |=
(1ULL << IB_USER_VERBS_CMD_OPEN_XRCD) |
(1ULL << IB_USER_VERBS_CMD_CLOSE_XRCD);
}
#ifdef CONFIG_NEW_KERNEL
ib_dev->driver_id = RDMA_DRIVER_HNS; ib_dev->driver_id = RDMA_DRIVER_HNS;
#endif
ret = ib_register_device(ib_dev, NULL); ret = ib_register_device(ib_dev, NULL);
if (ret) { if (ret) {
dev_err(dev, "ib_register_device failed!\n"); dev_err(dev, "ib_register_device failed!\n");
...@@ -689,8 +832,111 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ...@@ -689,8 +832,111 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
goto err_unmap_trrl; goto err_unmap_trrl;
} }
if (hr_dev->caps.scc_ctx_entry_sz) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->qp_table.scc_ctx_table,
HEM_TYPE_SCC_CTX,
hr_dev->caps.scc_ctx_entry_sz,
hr_dev->caps.num_qps, 1);
if (ret) {
dev_err(dev,
"Failed to init SCC context memory, aborting.\n");
goto err_unmap_cq;
}
}
if (hr_dev->caps.qpc_timer_entry_sz) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->qpc_timer_table.table,
HEM_TYPE_QPC_TIMER,
hr_dev->caps.qpc_timer_entry_sz,
hr_dev->caps.num_qpc_timer, 1);
if (ret) {
dev_err(dev,
"Failed to init QPC timer memory, aborting.\n");
goto err_unmap_ctx;
}
}
if (hr_dev->caps.cqc_timer_entry_sz) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->cqc_timer_table.table,
HEM_TYPE_CQC_TIMER,
hr_dev->caps.cqc_timer_entry_sz,
hr_dev->caps.num_cqc_timer, 1);
if (ret) {
dev_err(dev,
"Failed to init CQC timer memory, aborting.\n");
goto err_unmap_qpc_timer;
}
}
if (hr_dev->caps.srqc_entry_sz) {
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
HEM_TYPE_SRQC,
hr_dev->caps.srqc_entry_sz,
hr_dev->caps.num_srqs, 1);
if (ret) {
dev_err(dev,
"Failed to init SRQ context memory, aborting.\n");
goto err_unmap_cqc_timer;
}
}
if (hr_dev->caps.num_srqwqe_segs) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->mr_table.mtt_srqwqe_table,
HEM_TYPE_SRQWQE,
hr_dev->caps.mtt_entry_sz,
hr_dev->caps.num_srqwqe_segs, 1);
if (ret) {
dev_err(dev,
"Failed to init MTT srqwqe memory, aborting.\n");
goto err_unmap_srq;
}
}
if (hr_dev->caps.num_idx_segs) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->mr_table.mtt_idx_table,
HEM_TYPE_IDX,
hr_dev->caps.idx_entry_sz,
hr_dev->caps.num_idx_segs, 1);
if (ret) {
dev_err(dev,
"Failed to init MTT idx memory, aborting.\n");
goto err_unmap_srqwqe;
}
}
return 0; return 0;
err_unmap_srqwqe:
if (hr_dev->caps.num_srqwqe_segs)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_srqwqe_table);
err_unmap_srq:
if (hr_dev->caps.srqc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
err_unmap_cqc_timer:
if (hr_dev->caps.cqc_timer_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->cqc_timer_table.table);
err_unmap_qpc_timer:
if (hr_dev->caps.qpc_timer_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qpc_timer_table.table);
err_unmap_ctx:
if (hr_dev->caps.scc_ctx_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.scc_ctx_table);
err_unmap_cq:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
err_unmap_trrl: err_unmap_trrl:
if (hr_dev->caps.trrl_entry_sz) if (hr_dev->caps.trrl_entry_sz)
hns_roce_cleanup_hem_table(hr_dev, hns_roce_cleanup_hem_table(hr_dev,
...@@ -752,10 +998,18 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -752,10 +998,18 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_uar_alloc_free; goto err_uar_alloc_free;
} }
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
ret = hns_roce_init_xrcd_table(hr_dev);
if (ret) {
dev_err(dev, "Failed to init protected domain table.\n");
goto err_pd_table_free;
}
}
ret = hns_roce_init_mr_table(hr_dev); ret = hns_roce_init_mr_table(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Failed to init memory region table.\n"); dev_err(dev, "Failed to init memory region table.\n");
goto err_pd_table_free; goto err_xrcd_table_free;
} }
ret = hns_roce_init_cq_table(hr_dev); ret = hns_roce_init_cq_table(hr_dev);
...@@ -770,14 +1024,31 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -770,14 +1024,31 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_cq_table_free; goto err_cq_table_free;
} }
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ret = hns_roce_init_srq_table(hr_dev);
if (ret) {
dev_err(dev,
"Failed to init share receive queue table.\n");
goto err_qp_table_free;
}
}
return 0; return 0;
err_qp_table_free:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_cleanup_qp_table(hr_dev);
err_cq_table_free: err_cq_table_free:
hns_roce_cleanup_cq_table(hr_dev); hns_roce_cleanup_cq_table(hr_dev);
err_mr_table_free: err_mr_table_free:
hns_roce_cleanup_mr_table(hr_dev); hns_roce_cleanup_mr_table(hr_dev);
err_xrcd_table_free:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
hns_roce_cleanup_xrcd_table(hr_dev);
err_pd_table_free: err_pd_table_free:
hns_roce_cleanup_pd_table(hr_dev); hns_roce_cleanup_pd_table(hr_dev);
...@@ -861,6 +1132,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) ...@@ -861,6 +1132,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
if (ret) if (ret)
goto error_failed_register_device; goto error_failed_register_device;
(void)hns_roce_register_sysfs(hr_dev);
return 0; return 0;
error_failed_register_device: error_failed_register_device:
...@@ -900,7 +1173,6 @@ EXPORT_SYMBOL_GPL(hns_roce_init); ...@@ -900,7 +1173,6 @@ EXPORT_SYMBOL_GPL(hns_roce_init);
void hns_roce_exit(struct hns_roce_dev *hr_dev) void hns_roce_exit(struct hns_roce_dev *hr_dev)
{ {
hns_roce_unregister_device(hr_dev); hns_roce_unregister_device(hr_dev);
if (hr_dev->hw->hw_exit) if (hr_dev->hw->hw_exit)
hr_dev->hw->hw_exit(hr_dev); hr_dev->hw->hw_exit(hr_dev);
hns_roce_cleanup_bitmap(hr_dev); hns_roce_cleanup_bitmap(hr_dev);
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#include "roce_k_compat.h"
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -184,12 +185,27 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order, ...@@ -184,12 +185,27 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
struct hns_roce_buddy *buddy; struct hns_roce_buddy *buddy;
int ret; int ret;
if (mtt_type == MTT_TYPE_WQE) { switch (mtt_type) {
case MTT_TYPE_WQE:
buddy = &mr_table->mtt_buddy; buddy = &mr_table->mtt_buddy;
table = &mr_table->mtt_table; table = &mr_table->mtt_table;
} else { break;
case MTT_TYPE_CQE:
buddy = &mr_table->mtt_cqe_buddy; buddy = &mr_table->mtt_cqe_buddy;
table = &mr_table->mtt_cqe_table; table = &mr_table->mtt_cqe_table;
break;
case MTT_TYPE_SRQWQE:
buddy = &mr_table->mtt_srqwqe_buddy;
table = &mr_table->mtt_srqwqe_table;
break;
case MTT_TYPE_IDX:
buddy = &mr_table->mtt_idx_buddy;
table = &mr_table->mtt_idx_table;
break;
default:
dev_err(hr_dev->dev, "Unsupport MTT table type: %d\n",
mtt_type);
return -EINVAL;
} }
ret = hns_roce_buddy_alloc(buddy, order, seg); ret = hns_roce_buddy_alloc(buddy, order, seg);
...@@ -229,7 +245,7 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, ...@@ -229,7 +245,7 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
/* Allocate MTT entry */ /* Allocate MTT entry */
ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg, ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
mtt->mtt_type); mtt->mtt_type);
if (ret == -1) if (ret != 0)
return -ENOMEM; return -ENOMEM;
return 0; return 0;
...@@ -242,18 +258,40 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt) ...@@ -242,18 +258,40 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
if (mtt->order < 0) if (mtt->order < 0)
return; return;
if (mtt->mtt_type == MTT_TYPE_WQE) { switch (mtt->mtt_type) {
case MTT_TYPE_WQE:
hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
mtt->order); mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
mtt->first_seg, mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1); mtt->first_seg + (1 << mtt->order) - 1);
} else { break;
case MTT_TYPE_CQE:
hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg, hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
mtt->order); mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table, hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
mtt->first_seg, mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1); mtt->first_seg + (1 << mtt->order) - 1);
break;
case MTT_TYPE_SRQWQE:
hns_roce_buddy_free(&mr_table->mtt_srqwqe_buddy, mtt->first_seg,
mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_srqwqe_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
break;
case MTT_TYPE_IDX:
hns_roce_buddy_free(&mr_table->mtt_idx_buddy, mtt->first_seg,
mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_idx_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
break;
default:
dev_err(hr_dev->dev,
"Unsupport mtt type %d, clean mtt failed\n",
mtt->mtt_type);
break;
} }
} }
EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup); EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
...@@ -329,7 +367,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -329,7 +367,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
u64 bt_idx; u64 bt_idx;
u64 size; u64 size;
mhop_num = hr_dev->caps.pbl_hop_num; mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
...@@ -351,7 +389,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -351,7 +389,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
mr->pbl_size = npages; mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_dma_addr; mr->pbl_ba = mr->pbl_dma_addr;
mr->pbl_hop_num = hr_dev->caps.pbl_hop_num; mr->pbl_hop_num = mhop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0; return 0;
...@@ -511,7 +549,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, ...@@ -511,7 +549,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->key = hw_index_to_key(index); /* MR key */ mr->key = hw_index_to_key(index); /* MR key */
if (size == ~0ull) { if (size == ~0ull) {
mr->type = MR_TYPE_DMA;
mr->pbl_buf = NULL; mr->pbl_buf = NULL;
mr->pbl_dma_addr = 0; mr->pbl_dma_addr = 0;
/* PBL multi-hop addressing parameters */ /* PBL multi-hop addressing parameters */
...@@ -522,7 +559,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, ...@@ -522,7 +559,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->pbl_l1_dma_addr = NULL; mr->pbl_l1_dma_addr = NULL;
mr->pbl_l0_dma_addr = 0; mr->pbl_l0_dma_addr = 0;
} else { } else {
mr->type = MR_TYPE_MR;
if (!hr_dev->caps.pbl_hop_num) { if (!hr_dev->caps.pbl_hop_num) {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr), &(mr->pbl_dma_addr),
...@@ -548,9 +584,9 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, ...@@ -548,9 +584,9 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
u32 mhop_num; u32 mhop_num;
u64 bt_idx; u64 bt_idx;
npages = ib_umem_page_count(mr->umem); npages = mr->pbl_size;
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
mhop_num = hr_dev->caps.pbl_hop_num; mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num;
if (mhop_num == HNS_ROCE_HOP_NUM_0) if (mhop_num == HNS_ROCE_HOP_NUM_0)
return; return;
...@@ -636,7 +672,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, ...@@ -636,7 +672,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
} }
if (mr->size != ~0ULL) { if (mr->size != ~0ULL) {
npages = ib_umem_page_count(mr->umem); if (mr->type == MR_TYPE_MR)
npages = ib_umem_page_count(mr->umem);
if (!hr_dev->caps.pbl_hop_num) if (!hr_dev->caps.pbl_hop_num)
dma_free_coherent(dev, (unsigned int)(npages * 8), dma_free_coherent(dev, (unsigned int)(npages * 8),
...@@ -674,7 +711,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, ...@@ -674,7 +711,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
goto err_table; goto err_table;
} }
ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx); if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
else
ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
if (ret) { if (ret) {
dev_err(dev, "Write mtpt fail!\n"); dev_err(dev, "Write mtpt fail!\n");
goto err_page; goto err_page;
...@@ -711,10 +751,26 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, ...@@ -711,10 +751,26 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
u32 bt_page_size; u32 bt_page_size;
u32 i; u32 i;
if (mtt->mtt_type == MTT_TYPE_WQE) switch (mtt->mtt_type) {
case MTT_TYPE_WQE:
table = &hr_dev->mr_table.mtt_table;
bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT); bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
else break;
case MTT_TYPE_CQE:
table = &hr_dev->mr_table.mtt_cqe_table;
bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT); bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
break;
case MTT_TYPE_SRQWQE:
table = &hr_dev->mr_table.mtt_srqwqe_table;
bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
break;
case MTT_TYPE_IDX:
table = &hr_dev->mr_table.mtt_idx_table;
bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
break;
default:
return -EINVAL;
}
/* All MTTs must fit in the same page */ /* All MTTs must fit in the same page */
if (start_index / (bt_page_size / sizeof(u64)) != if (start_index / (bt_page_size / sizeof(u64)) !=
...@@ -724,11 +780,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, ...@@ -724,11 +780,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1)) if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
return -EINVAL; return -EINVAL;
if (mtt->mtt_type == MTT_TYPE_WQE)
table = &hr_dev->mr_table.mtt_table;
else
table = &hr_dev->mr_table.mtt_cqe_table;
mtts = hns_roce_table_find(hr_dev, table, mtts = hns_roce_table_find(hr_dev, table,
mtt->first_seg + s / hr_dev->caps.mtt_entry_sz, mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
&dma_handle); &dma_handle);
...@@ -757,10 +808,25 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev, ...@@ -757,10 +808,25 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
if (mtt->order < 0) if (mtt->order < 0)
return -EINVAL; return -EINVAL;
if (mtt->mtt_type == MTT_TYPE_WQE) switch (mtt->mtt_type) {
case MTT_TYPE_WQE:
bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT); bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
else break;
case MTT_TYPE_CQE:
bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT); bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
break;
case MTT_TYPE_SRQWQE:
bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
break;
case MTT_TYPE_IDX:
bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
break;
default:
dev_err(hr_dev->dev,
"Unsupport mtt type %d, write mtt failed\n",
mtt->mtt_type);
return -EINVAL;
}
while (npages > 0) { while (npages > 0) {
chunk = min_t(int, bt_page_size / sizeof(u64), npages); chunk = min_t(int, bt_page_size / sizeof(u64), npages);
...@@ -826,8 +892,26 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) ...@@ -826,8 +892,26 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
if (ret) if (ret)
goto err_buddy_cqe; goto err_buddy_cqe;
} }
ret = hns_roce_buddy_init(&mr_table->mtt_srqwqe_buddy,
ilog2(hr_dev->caps.num_srqwqe_segs));
if (ret)
goto err_buddy_srqwqe;
ret = hns_roce_buddy_init(&mr_table->mtt_idx_buddy,
ilog2(hr_dev->caps.num_idx_segs));
if (ret)
goto err_buddy_idx;
return 0; return 0;
err_buddy_idx:
hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
err_buddy_srqwqe:
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
err_buddy_cqe: err_buddy_cqe:
hns_roce_buddy_cleanup(&mr_table->mtt_buddy); hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
...@@ -840,6 +924,8 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev) ...@@ -840,6 +924,8 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
hns_roce_buddy_cleanup(&mr_table->mtt_idx_buddy);
hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
hns_roce_buddy_cleanup(&mr_table->mtt_buddy); hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy); hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
...@@ -855,6 +941,8 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) ...@@ -855,6 +941,8 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
if (mr == NULL) if (mr == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_DMA;
/* Allocate memory region key */ /* Allocate memory region key */
ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0, ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
~0ULL, acc, 0, mr); ~0ULL, acc, 0, mr);
...@@ -893,8 +981,25 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, ...@@ -893,8 +981,25 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
u32 bt_page_size; u32 bt_page_size;
u32 n; u32 n;
order = mtt->mtt_type == MTT_TYPE_WQE ? hr_dev->caps.mtt_ba_pg_sz : switch (mtt->mtt_type) {
hr_dev->caps.cqe_ba_pg_sz; case MTT_TYPE_WQE:
order = hr_dev->caps.mtt_ba_pg_sz;
break;
case MTT_TYPE_CQE:
order = hr_dev->caps.cqe_ba_pg_sz;
break;
case MTT_TYPE_SRQWQE:
order = hr_dev->caps.srqwqe_ba_pg_sz;
break;
case MTT_TYPE_IDX:
order = hr_dev->caps.idx_ba_pg_sz;
break;
default:
dev_err(dev, "Unsupport mtt type %d, write mtt failed\n",
mtt->mtt_type);
return -EINVAL;
}
bt_page_size = 1 << (order + PAGE_SHIFT); bt_page_size = 1 << (order + PAGE_SHIFT);
pages = (u64 *) __get_free_pages(GFP_KERNEL, order); pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
...@@ -1017,20 +1122,22 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1017,20 +1122,22 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_umem; goto err_umem;
} }
} else { } else {
int pbl_size = 1; u64 pbl_size = 1;
bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8; bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8;
for (i = 0; i < hr_dev->caps.pbl_hop_num; i++) for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
pbl_size *= bt_size; pbl_size *= bt_size;
if (n > pbl_size) { if (n > pbl_size) {
dev_err(dev, dev_err(dev,
" MR len %lld err. MR page num is limited to %d!\n", " MR len %lld err. MR page num is limited to %lld!\n",
length, pbl_size); length, pbl_size);
ret = -EINVAL; ret = -EINVAL;
goto err_umem; goto err_umem;
} }
} }
mr->type = MR_TYPE_MR;
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length, ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
access_flags, n, mr); access_flags, n, mr);
if (ret) if (ret)
...@@ -1201,3 +1308,194 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr) ...@@ -1201,3 +1308,194 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr)
return ret; return ret;
} }
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr;
u64 length;
u32 page_size;
int ret;
page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT);
length = max_num_sg * page_size;
if (mr_type != IB_MR_TYPE_MEM_REG)
return ERR_PTR(-EINVAL);
if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
dev_err(dev, "max_num_sg larger than %d\n",
HNS_ROCE_FRMR_MAX_PA);
return ERR_PTR(-EINVAL);
}
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_FRMR;
/* Allocate memory region key */
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length,
0, max_num_sg, mr);
if (ret)
goto err_free;
ret = hns_roce_mr_enable(hr_dev, mr);
if (ret)
goto err_free_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
mr->umem = NULL;
return &mr->ibmr;
err_free_mr:
hns_roce_mr_free(to_hr_dev(pd->device), mr);
err_free:
kfree(mr);
return ERR_PTR(ret);
}
static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
{
struct hns_roce_mr *mr = to_hr_mr(ibmr);
mr->pbl_buf[mr->npages++] = cpu_to_le64(addr);
return 0;
}
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset)
{
struct hns_roce_mr *mr = to_hr_mr(ibmr);
mr->npages = 0;
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
}
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mw *mw)
{
struct device *dev = hr_dev->dev;
int ret;
if (mw->enabled) {
ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey)
& (hr_dev->caps.num_mtpts - 1));
if (ret)
dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret);
hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
key_to_hw_index(mw->rkey));
}
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
key_to_hw_index(mw->rkey), BITMAP_NO_RR);
}
static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
struct hns_roce_mw *mw)
{
unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
int ret;
/* prepare HEM entry memory */
ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
if (ret)
return ret;
/* allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
goto err_table;
}
ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
if (ret) {
dev_err(dev, "MW write mtpt fail!\n");
goto err_page;
}
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
goto err_page;
}
mw->enabled = 1;
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return 0;
err_page:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_table:
hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
return ret;
}
struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device);
struct hns_roce_mw *mw;
unsigned long index = 0;
int ret;
mw = kmalloc(sizeof(*mw), GFP_KERNEL);
if (!mw)
return ERR_PTR(-ENOMEM);
/* Allocate a key for mw from bitmap */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
if (ret)
goto err_bitmap;
mw->rkey = hw_index_to_key(index);
mw->ibmw.rkey = mw->rkey;
mw->ibmw.type = type;
mw->pdn = to_hr_pd(ib_pd)->pdn;
mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
ret = hns_roce_mw_enable(hr_dev, mw);
if (ret)
goto err_mw;
return &mw->ibmw;
err_mw:
hns_roce_mw_free(hr_dev, mw);
err_bitmap:
kfree(mw);
return ERR_PTR(ret);
}
int hns_roce_dealloc_mw(struct ib_mw *ibmw)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
struct hns_roce_mw *mw = to_hr_mw(ibmw);
hns_roce_mw_free(hr_dev, mw);
kfree(mw);
return 0;
}
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#include "roce_k_compat.h"
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pci.h> #include <linux/pci.h>
...@@ -37,7 +38,7 @@ ...@@ -37,7 +38,7 @@
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
{ {
return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0; return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
} }
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
...@@ -45,6 +46,18 @@ static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) ...@@ -45,6 +46,18 @@ static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR); hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR);
} }
static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev,
unsigned long *xrcdn)
{
return hns_roce_bitmap_alloc(&hr_dev->xrcd_bitmap, xrcdn);
}
static void hns_roce_xrcd_free(struct hns_roce_dev *hr_dev,
unsigned long xrcdn)
{
hns_roce_bitmap_free(&hr_dev->xrcd_bitmap, xrcdn, BITMAP_NO_RR);
}
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev) int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
{ {
return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds, return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds,
...@@ -57,6 +70,19 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) ...@@ -57,6 +70,19 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
} }
int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
{
return hns_roce_bitmap_init(&hr_dev->xrcd_bitmap,
hr_dev->caps.num_xrcds,
hr_dev->caps.num_xrcds - 1,
hr_dev->caps.reserved_xrcds, 0);
}
void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev)
{
hns_roce_bitmap_cleanup(&hr_dev->xrcd_bitmap);
}
struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
...@@ -77,6 +103,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, ...@@ -77,6 +103,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
#ifdef CONFIG_NEW_KERNEL
if (context) { if (context) {
struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn}; struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn};
...@@ -88,6 +115,17 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, ...@@ -88,6 +115,17 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
} }
} }
#else
if (context) {
if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
kfree(pd);
return ERR_PTR(-EFAULT);
}
}
#endif
return &pd->ibpd; return &pd->ibpd;
} }
EXPORT_SYMBOL_GPL(hns_roce_alloc_pd); EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
...@@ -101,6 +139,65 @@ int hns_roce_dealloc_pd(struct ib_pd *pd) ...@@ -101,6 +139,65 @@ int hns_roce_dealloc_pd(struct ib_pd *pd)
} }
EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd); EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd);
struct ib_xrcd *hns_roce_ib_alloc_xrcd(struct ib_device *ib_dev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct ib_cq_init_attr cq_attr = {};
struct hns_roce_xrcd *xrcd;
int ret;
if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
return ERR_PTR(-EINVAL);
xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
if (!xrcd)
return ERR_PTR(-ENOMEM);
ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
if (ret) {
kfree(xrcd);
dev_err(hr_dev->dev,
"[alloc_xrcd]hns_roce_xrcd_alloc failed!\n");
return ERR_PTR(ret);
}
xrcd->pd = ib_alloc_pd(ib_dev, 0);
if (IS_ERR_OR_NULL(xrcd->pd)) {
ret = PTR_ERR(xrcd->pd);
goto err_dealloc_xrcd;
}
cq_attr.cqe = 1;
xrcd->cq = ib_create_cq(ib_dev, NULL, NULL, xrcd, &cq_attr);
if (IS_ERR_OR_NULL(xrcd->cq)) {
ret = PTR_ERR(xrcd->cq);
goto err_dealloc_pd;
}
return &xrcd->ibxrcd;
err_dealloc_pd:
ib_dealloc_pd(xrcd->pd);
err_dealloc_xrcd:
hns_roce_xrcd_free(hr_dev, xrcd->xrcdn);
kfree(xrcd);
return ERR_PTR(ret);
}
int hns_roce_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
ib_destroy_cq(to_hr_xrcd(xrcd)->cq);
ib_dealloc_pd(to_hr_xrcd(xrcd)->pd);
hns_roce_xrcd_free(to_hr_dev(xrcd->device), to_hr_xrcd(xrcd)->xrcdn);
kfree(xrcd);
return 0;
}
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{ {
struct resource *res; struct resource *res;
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/pci.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
...@@ -115,10 +116,7 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, ...@@ -115,10 +116,7 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
base) ?
-ENOMEM :
0;
} }
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
...@@ -208,13 +206,23 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ...@@ -208,13 +206,23 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
} }
} }
if (hr_dev->caps.scc_ctx_entry_sz) {
/* Alloc memory for SCC CTX */
ret = hns_roce_table_get(hr_dev, &qp_table->scc_ctx_table,
hr_qp->qpn);
if (ret) {
dev_err(dev, "SCC CTX table get failed\n");
goto err_put_trrl;
}
}
spin_lock_irq(&qp_table->lock); spin_lock_irq(&qp_table->lock);
ret = radix_tree_insert(&hr_dev->qp_table_tree, ret = radix_tree_insert(&hr_dev->qp_table_tree,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock); spin_unlock_irq(&qp_table->lock);
if (ret) { if (ret) {
dev_err(dev, "QPC radix_tree_insert failed\n"); dev_err(dev, "QPC radix_tree_insert failed\n");
goto err_put_trrl; goto err_put_scc_ctx;
} }
atomic_set(&hr_qp->refcount, 1); atomic_set(&hr_qp->refcount, 1);
...@@ -222,6 +230,11 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ...@@ -222,6 +230,11 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
return 0; return 0;
err_put_scc_ctx:
if (hr_dev->caps.scc_ctx_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->scc_ctx_table,
hr_qp->qpn);
err_put_trrl: err_put_trrl:
if (hr_dev->caps.trrl_entry_sz) if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
...@@ -257,6 +270,9 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) ...@@ -257,6 +270,9 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
wait_for_completion(&hr_qp->free); wait_for_completion(&hr_qp->free);
if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
if (hr_dev->caps.scc_ctx_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->scc_ctx_table,
hr_qp->qpn);
if (hr_dev->caps.trrl_entry_sz) if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table, hns_roce_table_put(hr_dev, &qp_table->trrl_table,
hr_qp->qpn); hr_qp->qpn);
...@@ -279,7 +295,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, ...@@ -279,7 +295,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
EXPORT_SYMBOL_GPL(hns_roce_release_range_qp); EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, int is_user, int has_srq, struct ib_qp_cap *cap, int is_user, int has_rq,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
...@@ -293,14 +309,12 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, ...@@ -293,14 +309,12 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
return -EINVAL; return -EINVAL;
} }
/* If srq exit, set zero for relative number of rq */ /* If srq exist, set zero for relative number of rq */
if (has_srq) { if (!has_rq) {
if (cap->max_recv_wr) { hr_qp->rq.wqe_cnt = 0;
dev_dbg(dev, "srq no need config max_recv_wr\n"); hr_qp->rq.max_gs = 0;
return -EINVAL; cap->max_recv_wr = 0;
} cap->max_recv_sge = 0;
hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
} else { } else {
if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) { if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n"); dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
...@@ -345,6 +359,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -345,6 +359,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
u8 max_sq_stride = ilog2(roundup_sq_stride); u8 max_sq_stride = ilog2(roundup_sq_stride);
u32 page_size; u32 page_size;
u32 max_cnt; u32 max_cnt;
u32 ex_sge_num;
/* Sanity check SQ size before proceeding */ /* Sanity check SQ size before proceeding */
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
...@@ -372,7 +387,22 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -372,7 +387,22 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
if (hr_qp->sq.max_gs > 2) if (hr_qp->sq.max_gs > 2)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2)); (hr_qp->sq.max_gs - 2));
if (hr_qp->ibqp.qp_type == IB_QPT_UD)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->sq.max_gs);
if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(hr_dev->dev,
"The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
return -EINVAL;
}
}
hr_qp->sge.sge_shift = 4; hr_qp->sge.sge_shift = 4;
ex_sge_num = hr_qp->sge.sge_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */ /* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) { if (hr_dev->caps.max_sq_sg <= 2) {
...@@ -386,6 +416,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -386,6 +416,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), PAGE_SIZE); hr_qp->sq.wqe_shift), PAGE_SIZE);
} else { } else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sge.sge_cnt =
max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num);
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) + hr_qp->rq.wqe_shift), page_size) +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt << HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
...@@ -394,7 +426,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -394,7 +426,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), page_size); hr_qp->sq.wqe_shift), page_size);
hr_qp->sq.offset = 0; hr_qp->sq.offset = 0;
if (hr_qp->sge.sge_cnt) { if (ex_sge_num) {
hr_qp->sge.offset = HNS_ROCE_ALOGN_UP( hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
(hr_qp->sq.wqe_cnt << (hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), hr_qp->sq.wqe_shift),
...@@ -465,6 +497,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -465,6 +497,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sge.sge_shift = 4; hr_qp->sge.sge_shift = 4;
} }
if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
return -EINVAL;
}
}
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0; hr_qp->sq.offset = 0;
...@@ -472,6 +512,8 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -472,6 +512,8 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
page_size); page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) { if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
(u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size; hr_qp->sge.offset = size;
size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt << size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, page_size); hr_qp->sge.sge_shift, page_size);
...@@ -503,7 +545,8 @@ static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) ...@@ -503,7 +545,8 @@ static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
{ {
if (attr->qp_type == IB_QPT_XRC_INI || if (attr->qp_type == IB_QPT_XRC_INI ||
attr->qp_type == IB_QPT_XRC_TGT || attr->srq) attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
!attr->cap.max_recv_wr)
return 0; return 0;
return 1; return 1;
...@@ -538,13 +581,14 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -538,13 +581,14 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR); hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject, ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
!!init_attr->srq, hr_qp); hns_roce_qp_has_rq(init_attr), hr_qp);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_set_rq_size failed\n"); dev_err(dev, "hns_roce_set_rq_size failed\n");
goto err_out; goto err_out;
} }
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr)) {
/* allocate recv inline buf */ /* allocate recv inline buf */
hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
sizeof(struct hns_roce_rinl_wqe), sizeof(struct hns_roce_rinl_wqe),
...@@ -651,6 +695,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -651,6 +695,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
dev_err(dev, "rq record doorbell map failed!\n"); dev_err(dev, "rq record doorbell map failed!\n");
goto err_sq_dbmap; goto err_sq_dbmap;
} }
/* indicate kernel supports rq record db */
resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
hr_qp->rdb_en = 1;
} }
} else { } else {
if (init_attr->create_flags & if (init_attr->create_flags &
...@@ -759,17 +807,20 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -759,17 +807,20 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
else else
hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn); hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) && if (ib_pd->uobject && (udata->outlen >= sizeof(resp))) {
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
/* indicate kernel supports rq record db */
resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (ret) if (ret)
goto err_qp; goto err_qp;
}
hr_qp->rdb_en = 1; if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
if (ret) {
dev_err(hr_dev->dev, "qp flow control init failure!");
goto err_qp;
}
} }
hr_qp->event = hns_roce_ib_qp_event; hr_qp->event = hns_roce_ib_qp_event;
return 0; return 0;
...@@ -838,13 +889,27 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -838,13 +889,27 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct hns_roce_dev *hr_dev = pd ? to_hr_dev(pd->device) :
to_hr_dev(init_attr->xrcd->device);
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_sqp *hr_sqp; struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp; struct hns_roce_qp *hr_qp;
u16 xrcdn = 0;
int ret; int ret;
switch (init_attr->qp_type) { switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
return ERR_PTR(-EINVAL);
pd = to_hr_xrcd(init_attr->xrcd)->pd;
xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
init_attr->send_cq = to_hr_xrcd(init_attr->xrcd)->cq;
case IB_QPT_XRC_INI:
if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
return ERR_PTR(-EINVAL);
init_attr->recv_cq = init_attr->send_cq;
case IB_QPT_UD:
case IB_QPT_UC:
case IB_QPT_RC: { case IB_QPT_RC: {
hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
if (!hr_qp) if (!hr_qp)
...@@ -859,7 +924,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -859,7 +924,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
} }
hr_qp->ibqp.qp_num = hr_qp->qpn; hr_qp->ibqp.qp_num = hr_qp->qpn;
hr_qp->xrcdn = xrcdn;
break; break;
} }
case IB_QPT_GSI: { case IB_QPT_GSI: {
...@@ -916,6 +981,8 @@ int to_hr_qp_type(int qp_type) ...@@ -916,6 +981,8 @@ int to_hr_qp_type(int qp_type)
transport_type = SERV_TYPE_UD; transport_type = SERV_TYPE_UD;
else if (qp_type == IB_QPT_GSI) else if (qp_type == IB_QPT_GSI)
transport_type = SERV_TYPE_UD; transport_type = SERV_TYPE_UD;
else if (qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT)
transport_type = SERV_TYPE_XRC;
else else
transport_type = -1; transport_type = -1;
...@@ -923,46 +990,42 @@ int to_hr_qp_type(int qp_type) ...@@ -923,46 +990,42 @@ int to_hr_qp_type(int qp_type)
} }
EXPORT_SYMBOL_GPL(to_hr_qp_type); EXPORT_SYMBOL_GPL(to_hr_qp_type);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, static int check_mtu_validate(struct hns_roce_dev *hr_dev,
int attr_mask, struct ib_udata *udata) struct hns_roce_qp *hr_qp,
struct ib_qp_attr *attr, int attr_mask)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret = -EINVAL;
int p;
enum ib_mtu active_mtu; enum ib_mtu active_mtu;
int p;
mutex_lock(&hr_qp->mutex); p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
cur_state = attr_mask & IB_QP_CUR_STATE ? if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; attr->path_mtu > hr_dev->caps.max_mtu) ||
new_state = attr_mask & IB_QP_STATE ? attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
attr->qp_state : cur_state; dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
attr->path_mtu);
if (ibqp->uobject && return -EINVAL;
(attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
if (hr_qp->sdb_en == 1) {
hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else {
dev_warn(dev, "flush cqe is not supported in userspace!\n");
goto out;
}
} }
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, return 0;
IB_LINK_LAYER_ETHERNET)) { }
dev_err(dev, "ib_modify_qp_is_ok failed\n");
goto out; static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} int attr_mask)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = hr_dev->dev;
int ret = 0;
int p;
if ((attr_mask & IB_QP_PORT) && if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
dev_err(dev, "attr port_num invalid.attr->port_num=%d\n", dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
attr->port_num); attr->port_num);
goto out; return -EINVAL;
} }
if (attr_mask & IB_QP_PKEY_INDEX) { if (attr_mask & IB_QP_PKEY_INDEX) {
...@@ -970,40 +1033,72 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -970,40 +1033,72 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n", dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
attr->pkey_index); attr->pkey_index);
goto out; return -EINVAL;
} }
} }
if (attr_mask & IB_QP_PATH_MTU) { if (attr_mask & IB_QP_PATH_MTU) {
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; ret = check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); if (ret)
return ret;
if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
attr->path_mtu > IB_MTU_4096) ||
(hr_dev->caps.max_mtu == IB_MTU_2048 &&
attr->path_mtu > IB_MTU_2048) ||
attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > active_mtu) {
dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
attr->path_mtu);
goto out;
}
} }
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
attr->max_rd_atomic); attr->max_rd_atomic);
goto out; return -EINVAL;
} }
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
attr->max_dest_rd_atomic); attr->max_dest_rd_atomic);
return -EINVAL;
}
return ret;
}
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state;
struct device *dev = hr_dev->dev;
int ret = -EINVAL;
mutex_lock(&hr_qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ?
attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
new_state = attr_mask & IB_QP_STATE ?
attr->qp_state : cur_state;
if (ibqp->pd->uobject &&
(attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
if (hr_qp->sdb_en == 1) {
hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
if (hr_qp->rdb_en == 1)
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else {
dev_warn(dev, "flush cqe is not supported in userspace!\n");
goto out;
}
}
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
IB_LINK_LAYER_ETHERNET)) {
dev_err(dev, "ib_modify_qp_is_ok failed\n");
goto out; goto out;
} }
ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
if (ret)
goto out;
if (cur_state == new_state && cur_state == IB_QPS_RESET) { if (cur_state == new_state && cur_state == IB_QPS_RESET) {
if (hr_dev->caps.min_wqes) { if (hr_dev->caps.min_wqes) {
ret = -EPERM; ret = -EPERM;
...@@ -1106,14 +1201,20 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) ...@@ -1106,14 +1201,20 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
int reserved_from_top = 0; int reserved_from_top = 0;
int reserved_from_bot;
int ret; int ret;
spin_lock_init(&qp_table->lock); spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC); INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
/* A port include two SQP, six port total 12 */ /* In hw v1, a port include two SQP, six ports total 12 */
if (hr_dev->caps.max_sq_sg <= 2)
reserved_from_bot = SQP_NUM;
else
reserved_from_bot = hr_dev->caps.reserved_qps;
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
hr_dev->caps.num_qps - 1, SQP_NUM, hr_dev->caps.num_qps - 1, reserved_from_bot,
reserved_from_top); reserved_from_top);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n", dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
......
/*
* Copyright (c) 2018 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "roce_k_compat.h"
#include <rdma/ib_umem.h>
#include <rdma/hns-abi.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct hns_roce_srq *srq;
rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree,
srqn & (hr_dev->caps.num_srqs - 1));
rcu_read_unlock();
if (srq) {
refcount_inc(&srq->refcount);
} else {
dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
return;
}
srq->event(srq, event_type);
if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
}
EXPORT_SYMBOL_GPL(hns_roce_srq_event);
static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
enum hns_roce_event event_type)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
struct ib_srq *ibsrq = &srq->ibsrq;
struct ib_event event;
if (ibsrq->event_handler) {
event.device = ibsrq->device;
event.element.srq = ibsrq;
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
event.event = IB_EVENT_SRQ_LIMIT_REACHED;
break;
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
event.event = IB_EVENT_SRQ_ERR;
break;
default:
dev_err(hr_dev->dev,
"hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
event_type, srq->srqn);
return;
}
ibsrq->event_handler(&event, ibsrq->srq_context);
}
}
static int hns_roce_sw2hw_srq(struct hns_roce_dev *dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
HNS_ROCE_CMD_SW2HW_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd,
struct hns_roce_mtt *hr_mtt, u64 db_rec_addr,
struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct hns_roce_cmd_mailbox *mailbox;
dma_addr_t dma_handle_wqe;
dma_addr_t dma_handle_idx;
u64 *mtts_wqe;
u64 *mtts_idx;
int ret;
/* Get the physical address of srq buf */
mtts_wqe = hns_roce_table_find(hr_dev,
&hr_dev->mr_table.mtt_srqwqe_table,
srq->mtt.first_seg,
&dma_handle_wqe);
if (!mtts_wqe) {
dev_err(hr_dev->dev,
"SRQ alloc.Failed to find srq buf addr.\n");
return -EINVAL;
}
/* Get physical address of idx que buf */
mtts_idx = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_idx_table,
srq->idx_que.mtt.first_seg,
&dma_handle_idx);
if (!mtts_idx) {
dev_err(hr_dev->dev,
"SRQ alloc.Failed to find idx que buf addr.\n");
return -EINVAL;
}
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
if (ret == -1) {
dev_err(hr_dev->dev, "SRQ alloc.Failed to alloc index.\n");
return -ENOMEM;
}
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
if (ret)
goto err_out;
spin_lock_irq(&srq_table->lock);
ret = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
spin_unlock_irq(&srq_table->lock);
if (ret)
goto err_put;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
goto err_radix;
}
hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
mtts_wqe, mtts_idx, dma_handle_wqe,
dma_handle_idx);
ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
goto err_radix;
refcount_set(&srq->refcount, 1);
init_completion(&srq->free);
return ret;
err_radix:
spin_lock_irq(&srq_table->lock);
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
err_put:
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
err_out:
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
return ret;
}
void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
int ret;
ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
if (ret)
dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
ret, srq->srqn);
spin_lock_irq(&srq_table->lock);
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
}
static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
u32 page_shift)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que;
u32 bitmap_num;
int i;
idx_que->entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, 8 * sizeof(u64));
idx_que->bitmap = kcalloc(1, bitmap_num / 8, GFP_KERNEL);
if (!idx_que->bitmap)
return -ENOMEM;
bitmap_num = bitmap_num / (8 * sizeof(u64));
idx_que->buf_size = srq->max * idx_que->entry_sz;
if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
&idx_que->idx_buf, page_shift)) {
kfree(idx_que->bitmap);
return -ENOMEM;
}
for (i = 0; i < bitmap_num; i++)
idx_que->bitmap[i] = ~(0UL);
return 0;
}
struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_srq *srq;
int srq_desc_size;
int srq_buf_size;
u32 page_shift;
int ret = 0;
u32 npages;
u16 xrcdn;
u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */
if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
return ERR_PTR(-EINVAL);
srq = kzalloc(sizeof(*srq), GFP_KERNEL);
if (!srq)
return ERR_PTR(-ENOMEM);
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
srq->max_gs = srq_init_attr->attr.max_sge;
srq_desc_size = max(16, 16 * srq->max_gs);
srq->wqe_shift = ilog2(srq_desc_size);
srq_buf_size = srq->max * srq_desc_size;
srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
if (udata) {
struct hns_roce_ib_create_srq ucmd;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
ret = -EFAULT;
goto err_srq;
}
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
srq_buf_size, 0, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
}
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
if (hr_dev->caps.srqwqe_buf_pg_sz) {
npages = (ib_umem_page_count(srq->umem) +
(1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
(1 << hr_dev->caps.srqwqe_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages,
page_shift,
&srq->mtt);
} else
ret = hns_roce_mtt_init(hr_dev,
ib_umem_page_count(srq->umem),
srq->umem->page_shift,
&srq->mtt);
if (ret)
goto err_buf;
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
if (ret)
goto err_srq_mtt;
/* config index queue BA */
srq->idx_que.umem = ib_umem_get(pd->uobject->context,
ucmd.que_addr,
srq->idx_que.buf_size, 0, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev,
"ib_umem_get error for index queue\n");
goto err_srq_mtt;
}
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
if (hr_dev->caps.idx_buf_pg_sz) {
npages = (ib_umem_page_count(srq->idx_que.umem) +
(1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
(1 << hr_dev->caps.idx_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages,
page_shift, &srq->idx_que.mtt);
} else {
ret = hns_roce_mtt_init(hr_dev,
ib_umem_page_count(srq->idx_que.umem),
srq->idx_que.umem->page_shift,
&srq->idx_que.mtt);
}
if (ret) {
dev_err(hr_dev->dev,
"hns_roce_mtt_init error for idx que\n");
goto err_idx_mtt;
}
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
srq->idx_que.umem);
if (ret) {
dev_err(hr_dev->dev,
"hns_roce_ib_umem_write_mtt error for idx que\n");
goto err_idx_buf;
}
} else {
u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
(1 << page_shift) * 2,
&srq->buf, page_shift)) {
ret = -ENOMEM;
goto err_buf;
}
srq->head = 0;
srq->tail = srq->max - 1;
srq->wqe_ctr = 0;
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages,
srq->buf.page_shift, &srq->mtt);
if (ret)
goto err_buf;
ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
if (ret)
goto err_srq_mtt;
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_create_idx_que(pd, srq, page_shift);
if (ret) {
dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
ret);
goto err_srq_mtt;
}
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
/* Init mtt table for idx_que */
ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
srq->idx_que.idx_buf.page_shift,
&srq->idx_que.mtt);
if (ret)
goto err_create_idx;
/* Write buffer address into the mtt table */
ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
&srq->idx_que.idx_buf);
if (ret)
goto err_idx_buf;
srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_idx_buf;
}
}
cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
xrcdn = (srq_init_attr->srq_type == IB_SRQT_XRC) ?
to_hr_xrcd(srq_init_attr->ext.xrc.xrcd)->xrcdn : 0;
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, xrcdn,
&srq->mtt, 0, srq);
if (ret)
goto err_wrid;
srq->event = hns_roce_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->srqn;
if (pd->uobject) {
if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
ret = -EFAULT;
goto err_wrid;
}
}
return &srq->ibsrq;
err_wrid:
kvfree(srq->wrid);
err_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
err_idx_mtt:
if (udata)
ib_umem_release(srq->idx_que.umem);
err_create_idx:
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
&srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);
err_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
err_buf:
if (udata)
ib_umem_release(srq->umem);
else
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
err_srq:
kfree(srq);
return ERR_PTR(ret);
}
int hns_roce_destroy_srq(struct ib_srq *ibsrq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
hns_roce_srq_free(hr_dev, srq);
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
if (ibsrq->uobject) {
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
ib_umem_release(srq->idx_que.umem);
ib_umem_release(srq->umem);
} else {
kvfree(srq->wrid);
hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
&srq->buf);
}
kfree(srq);
return 0;
}
struct hns_roce_srq *hns_roce_srq_lookup(struct hns_roce_dev *hr_dev, u32 srqn)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct hns_roce_srq *srq;
rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree,
srqn & (hr_dev->caps.max_srqs - 1));
rcu_read_unlock();
return srq;
}
EXPORT_SYMBOL_GPL(hns_roce_srq_lookup);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
spin_lock_init(&srq_table->lock);
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
hr_dev->caps.num_srqs - 1,
hr_dev->caps.reserved_srqs, 0);
}
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
{
hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
}
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2016-2017 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/acpi.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <net/addrconf.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_umem.h>
#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
static ssize_t cqc_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
ret = kstrtou32(buf, 10, &hr_dev->hr_stat.cqn);
if (ret) {
dev_err(dev, "Input params format unmatch\n");
return -EINVAL;
}
return strnlen(buf, count);
};
static ssize_t cqc_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
int count = 0;
ret = hr_dev->dfx->query_cqc_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "pkt query failed");
return -EBUSY;
}
return count;
};
static ssize_t cmd_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
int count = 0;
ret = hr_dev->dfx->query_cmd_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "cmd query failed");
return -EBUSY;
}
return count;
}
static ssize_t pkt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
int count = 0;
ret = hr_dev->dfx->query_pkt_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "cmd query failed");
return -EBUSY;
}
return count;
}
static ssize_t ceqc_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
ret = kstrtou32(buf, 10, &hr_dev->hr_stat.ceqn);
if (ret) {
dev_err(dev, "Input params format unmatch\n");
return -EINVAL;
}
return strnlen(buf, count);
};
static ssize_t ceqc_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
int count = 0;
ret = hr_dev->dfx->query_ceqc_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "ceqc query failed");
return -EBUSY;
}
return count;
}
static ssize_t aeqc_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
ret = kstrtou32(buf, 10, &hr_dev->hr_stat.aeqn);
if (ret) {
dev_err(dev, "Input params format unmatch\n");
return -EINVAL;
}
return strnlen(buf, count);
};
static ssize_t aeqc_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
int count = 0;
ret = hr_dev->dfx->query_aeqc_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "ceqc query failed");
return -EBUSY;
}
return count;
}
static ssize_t qpc_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
ret = kstrtou32(buf, 10, &hr_dev->hr_stat.qpn);
if (ret) {
dev_err(dev, "Input params format unmatch\n");
return -EINVAL;
}
return strnlen(buf, count);
}
static ssize_t qpc_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
int count = 0;
ret = hr_dev->dfx->query_qpc_stat(hr_dev,
buf, &count);
if (ret) {
dev_err(dev, "ceqc query failed");
return -EBUSY;
}
return count;
}
static ssize_t srqc_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
ret = kstrtou32(buf, 10, &hr_dev->hr_stat.srqn);
if (ret) {
dev_err(dev, "Input params format unmatch\n");
return -EINVAL;
}
return strnlen(buf, count);
};
static ssize_t srqc_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
int count = 0;
ret = hr_dev->dfx->query_srqc_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "ceqc query failed");
return -EBUSY;
}
return count;
}
static ssize_t mpt_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
ret = kstrtou32(buf, 10, &hr_dev->hr_stat.key);
if (ret) {
dev_err(dev, "Input params format unmatch\n");
return -EINVAL;
}
return strnlen(buf, count);
};
static ssize_t mpt_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hns_roce_dev *hr_dev =
container_of(dev, struct hns_roce_dev, ib_dev.dev);
int ret;
int count = 0;
ret = hr_dev->dfx->query_mpt_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "ceqc query failed");
return -EBUSY;
}
return count;
}
static ssize_t coalesce_maxcnt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hns_roce_dev *hr_dev = container_of(dev, struct hns_roce_dev,
ib_dev.dev);
struct hns_roce_eq *eq = hr_dev->eq_table.eq;
return scnprintf(buf, PAGE_SIZE, "%d\n", eq->eq_max_cnt);
}
static ssize_t coalesce_maxcnt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct hns_roce_dev *hr_dev = container_of(dev, struct hns_roce_dev,
ib_dev.dev);
struct hns_roce_eq *eq = hr_dev->eq_table.eq;
u32 int_maxcnt;
int ceq_num;
int i;
int ret;
ceq_num = hr_dev->caps.num_comp_vectors;
ret = kstrtou32(buf, 10, &int_maxcnt);
if (ret) {
dev_err(dev, "Input params of irq coalesce maxcnt format unmatch\n");
return -EINVAL;
}
if (int_maxcnt > HNS_ROCE_CEQ_MAX_BURST_NUM) {
dev_err(dev, "int_maxcnt must be less than 2^16!\n");
return -EINVAL;
}
eq->eq_max_cnt = int_maxcnt;
for (i = 0; i < ceq_num; i++) {
eq->eqn = i;
ret = hr_dev->dfx->modify_eq(hr_dev, eq->eq_max_cnt, 0,
HNS_ROCE_EQ_MAXCNT_MASK);
if (ret) {
dev_err(dev, "eqc modify failed, eq_num=%d\n", eq->eqn);
return -EBUSY;
}
}
return count;
}
static ssize_t coalesce_period_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hns_roce_dev *hr_dev = container_of(dev, struct hns_roce_dev,
ib_dev.dev);
struct hns_roce_eq *eq = hr_dev->eq_table.eq;
return scnprintf(buf, PAGE_SIZE, "%u\n", eq->eq_period);
}
static ssize_t coalesce_period_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct hns_roce_dev *hr_dev = container_of(dev, struct hns_roce_dev,
ib_dev.dev);
struct hns_roce_eq *eq = hr_dev->eq_table.eq;
u32 int_period;
int ceq_num;
int i;
int ret;
ceq_num = hr_dev->caps.num_comp_vectors;
ret = kstrtou32(buf, 10, &int_period);
if (ret) {
dev_err(dev, "Input params of irq coalesce period format unmatch\n");
return -EINVAL;
}
if (int_period > HNS_ROCE_CEQ_MAX_INTERVAL) {
dev_err(dev, "int_period must be less than 2^16!\n");
return -EINVAL;
}
eq->eq_period = int_period;
for (i = 0; i < ceq_num; i++) {
eq->eqn = i;
ret = hr_dev->dfx->modify_eq(hr_dev, 0, eq->eq_period,
HNS_ROCE_EQ_PERIOD_MASK);
if (ret) {
dev_err(dev, "eqc modify failed, eq_num=%d\n", eq->eqn);
return -EBUSY;
}
}
return count;
}
static DEVICE_ATTR_RW(aeqc);
static DEVICE_ATTR_RW(qpc);
static DEVICE_ATTR_RW(srqc);
static DEVICE_ATTR_RW(mpt);
static DEVICE_ATTR_RW(ceqc);
static DEVICE_ATTR_RO(pkt);
static DEVICE_ATTR_RO(cmd);
static DEVICE_ATTR_RW(cqc);
static DEVICE_ATTR_RW(coalesce_maxcnt);
static DEVICE_ATTR_RW(coalesce_period);
static struct device_attribute *hns_roce_hw_attrs_list[] = {
&dev_attr_cmd,
&dev_attr_cqc,
&dev_attr_aeqc,
&dev_attr_qpc,
&dev_attr_mpt,
&dev_attr_pkt,
&dev_attr_ceqc,
&dev_attr_srqc,
&dev_attr_coalesce_maxcnt,
&dev_attr_coalesce_period,
};
int hns_roce_register_sysfs(struct hns_roce_dev *hr_dev)
{
int ret;
int i;
for (i = 0; i < ARRAY_SIZE(hns_roce_hw_attrs_list); i++) {
ret = device_create_file(&hr_dev->ib_dev.dev,
hns_roce_hw_attrs_list[i]);
if (ret) {
dev_err(hr_dev->dev, "register_sysfs failed!\n");
return ret;
}
}
return 0;
}
void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev)
{
int i;
for (i = 0; i < ARRAY_SIZE(hns_roce_hw_attrs_list); i++)
device_remove_file(&hr_dev->ib_dev.dev,
hns_roce_hw_attrs_list[i]);
}
#ifndef _ROCE_K_COMPAT_H
#define _ROCE_K_COMPAT_H
#ifndef LINUX_VERSION_CODE
#include <linux/version.h>
#else
#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#endif
#ifndef PCI_VENDOR_ID_HUAWEI
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
/**
* OFED didn't provide a version code
* !!!!! This is a TEMPORARILY solution !!!!!
*/
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0))
typedef unsigned long long __u64;
#if defined(__GNUC__)
typedef __u64 uint64_t;
#endif
typedef uint64_t u64;
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
#undef pci_irq_vector
#define pci_irq_vector _kc_pci_irq_vector
#ifdef CONFIG_PCI_MSI
#include <linux/pci.h>
#include <linux/msi.h>
/**
* pci_irq_vector - return Linux IRQ number of a device vector
* @dev: PCI device to operate on
* @nr: device-relative interrupt vector index (0-based).
*/
static inline int _kc_pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
if (dev->msix_enabled) {
struct msi_desc *entry;
int i = 0;
for_each_pci_msi_entry(entry, dev) {
if (i == nr)
return entry->irq;
i++;
}
WARN_ON_ONCE(1);
return -EINVAL;
}
if (dev->msi_enabled) {
struct msi_desc *entry = first_pci_msi_entry(dev);
if (WARN_ON_ONCE(nr >= entry->nvec_used))
return -EINVAL;
} else {
if (WARN_ON_ONCE(nr > 0))
return -EINVAL;
}
return dev->irq + nr;
}
#else
static inline int _kc_pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
if (WARN_ON_ONCE(nr > 0))
return -EINVAL;
return dev->irq;
}
#endif
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
#ifndef HAVE_LINUX_MM_H
#define HAVE_LINUX_MM_H
#endif
#ifndef HAVE_LINUX_SCHED_H
#define HAVE_LINUX_SCHED_H
#endif
/**
* struct refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
* The counter saturates at UINT_MAX and will not move once
* there. This avoids wrapping the counter and causing 'spurious'
* use-after-free bugs.
*/
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;
/**
* refcount_set - set a refcount's value
* @r: the refcount
* @n: value to which the refcount will be set
*/
#undef refcount_set
#define refcount_set _kc_refcount_set
static inline void _kc_refcount_set(refcount_t *r, unsigned int n)
{
atomic_set(&r->refs, n);
}
#undef refcount_dec_and_test
#define refcount_dec_and_test _kc_refcount_dec_and_test
static inline __must_check bool _kc_refcount_dec_and_test(refcount_t *r)
{
return atomic_dec_and_test(&r->refs);
}
/*
* Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
*/
static inline bool refcount_inc_not_zero(refcount_t *r)
{
unsigned int old, new, val = atomic_read(&r->refs);
for (;;) {
new = val + 1;
if (!val)
return false;
if (unlikely(!new))
return true;
old = atomic_cmpxchg_relaxed(&r->refs, val, new);
if (old == val)
break;
val = old;
}
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
return true;
}
/*
* Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
*
* Provides no memory ordering, it is assumed the caller already has a
* reference on the object, will WARN when this is not so.
*/
static inline void refcount_inc(refcount_t *r)
{
WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
}
/*
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
* when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
static inline void refcount_dec(refcount_t *r)
{
WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
/*
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
* success thereof.
*
* Like all decrement operations, it provides release memory order and provides
* a control dependency.
*
* It can be used like a try-delete operator; this explicit case is provided
* and not cmpxchg in generic, because that would allow implementing unsafe
* operations.
*/
static inline bool refcount_dec_if_one(refcount_t *r)
{
return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
}
/**
* Here we call kmalloc_array for mem allocate
* Kernel optimize from 4.11
*/
#undef kvmalloc_array
#define kvmalloc_array _kc_kvmalloc_array
static inline void *_kc_kvmalloc_array(size_t n, size_t size, gfp_t flags)
{
return kmalloc_array(n, size, flags);
}
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
#undef addrconf_addr_eui48_base
#define addrconf_addr_eui48_base _kc_addrconf_addr_eui48_base
static inline void _kc_addrconf_addr_eui48_base(u8 *eui,
const char *const addr)
{
memcpy(eui, addr, 3);
eui[3] = 0xFF;
eui[4] = 0xFE;
memcpy(eui + 5, addr + 3, 3);
}
#undef addrconf_addr_eui48
#define addrconf_addr_eui48 _kc_addrconf_addr_eui48
static inline void _kc_addrconf_addr_eui48(u8 *eui, const char *const addr)
{
addrconf_addr_eui48_base(eui, addr);
eui[0] ^= 2;
}
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0))
#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
/*
* If one of a or b is a compile-time constant, this avoids a division.
*/
#define __unsigned_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a * __b; \
__builtin_constant_p(__b) ? \
__b > 0 && __a > type_max(typeof(__a)) / __b : \
__a > 0 && __b > type_max(typeof(__b)) / __a; \
})
/*
* Signed multiplication is rather hard. gcc always follows C99, so
* division is truncated towards 0. This means that we can write the
* overflow check like this:
*
* (a > 0 && (b > MAX/a || b < MIN/a)) ||
* (a < -1 && (b > MIN/a || b < MAX/a) ||
* (a == -1 && b == MIN)
*
* The redundant casts of -1 are to silence an annoying -Wtype-limits
* (included in -Wextra) warning: When the type is u8 or u16, the
* __b_c_e in check_mul_overflow obviously selects
* __unsigned_mul_overflow, but unfortunately gcc still parses this
* code and warns about the limited range of __b.
*/
#define __signed_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
typeof(a) __tmax = type_max(typeof(a)); \
typeof(a) __tmin = type_min(typeof(a)); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a * (u64)__b; \
(__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
(__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
(__b == (typeof(__b))-1 && __a == __tmin); \
})
#define check_mul_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_mul_overflow(a, b, d), \
__unsigned_mul_overflow(a, b, d))
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0))
#define __must_check __attribute__((warn_unused_result))
typedef unsigned long __kernel_ulong_t;
typedef __kernel_ulong_t __kernel_size_t;
typedef __kernel_size_t size_t;
#define SIZE_MAX (~(size_t)0)
#endif
/**
* array_size() - Calculate size of 2-dimensional array.
*
* @a: dimension one
* @b: dimension two
*
* Calculates size of 2-dimensional array: @a * @b.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
static inline __must_check size_t array_size(size_t a, size_t b)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
return bytes;
}
#endif
#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 16, 0))
#define CONFIG_NEW_KERNEL
#define MODIFY_CQ_MASK
#else
#endif
#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 18, 0))
#define CONFIG_KERNEL_419
#endif
#endif /*_ROCE_K_COMPAT_H*/
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册