提交 6d22d0eb 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

driver: roce: update roce driver from driver team

driver inclusion
category: feature

-----------------------------------------

Based on 984f952682c9d0a603ed2fbbc541eab485a667be
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 9228d6c1
...@@ -7,8 +7,9 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 ...@@ -7,8 +7,9 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_sysfs.o hns_roce_sysfs.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
hns-roce-hw-v1-objs := hns_roce_hw_v1.o hns-roce-hw-v1-objs := hns_roce_hw_v1.o
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_sysfs_v2.o hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o hns_roce_hw_sysfs_v2.o
...@@ -59,6 +59,8 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, ...@@ -59,6 +59,8 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
bool vlan_en = false; bool vlan_en = false;
rdfx_func_cnt(hr_dev, RDFX_FUNC_CREATE_AH);
ah = kzalloc(sizeof(*ah), GFP_ATOMIC); ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah) if (!ah)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -139,6 +141,8 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) ...@@ -139,6 +141,8 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
{ {
struct hns_roce_ah *ah = to_hr_ah(ibah); struct hns_roce_ah *ah = to_hr_ah(ibah);
rdfx_func_cnt(to_hr_dev(ibah->device), RDFX_FUNC_QUERY_AH);
memset(ah_attr, 0, sizeof(*ah_attr)); memset(ah_attr, 0, sizeof(*ah_attr));
rdma_ah_set_sl(ah_attr, (le32_to_cpu(ah->av.sl_tclass_flowlabel) >> rdma_ah_set_sl(ah_attr, (le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
...@@ -159,6 +163,8 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) ...@@ -159,6 +163,8 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
int hns_roce_destroy_ah(struct ib_ah *ah) int hns_roce_destroy_ah(struct ib_ah *ah)
{ {
rdfx_func_cnt(to_hr_dev(ah->device), RDFX_FUNC_DESTROY_AH);
kfree(to_hr_ah(ah)); kfree(to_hr_ah(ah));
return 0; return 0;
......
...@@ -308,6 +308,118 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev, ...@@ -308,6 +308,118 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
&buf->hr_buf); &buf->hr_buf);
} }
static int create_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq,
struct ib_ucontext *context,
struct ib_udata *udata,
struct hns_roce_ib_create_cq_resp *resp,
struct hns_roce_uar *uar,
int cq_entries)
{
struct hns_roce_ib_create_cq ucmd;
struct device *dev = hr_dev->dev;
int ret;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "Failed to copy_from_udata.\n");
return -EFAULT;
}
/* Get user space address, write it into mtt table */
ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
&hr_cq->umem, ucmd.buf_addr,
cq_entries);
if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n");
return ret;
}
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(*resp))) {
ret = hns_roce_db_map_user(to_hr_ucontext(context),
ucmd.db_addr, &hr_cq->db);
if (ret) {
dev_err(dev, "cq record doorbell map failed!\n");
goto err_mtt;
}
hr_cq->db_en = 1;
resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
}
/* Get user space parameters */
uar = &to_hr_ucontext(context)->uar;
return 0;
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
ib_umem_release(hr_cq->umem);
return ret;
}
static int create_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, struct hns_roce_uar *uar,
int cq_entries)
{
struct device *dev = hr_dev->dev;
int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
if (ret)
return ret;
hr_cq->set_ci_db = hr_cq->db.db_record;
*hr_cq->set_ci_db = 0;
hr_cq->db_en = 1;
}
/* Init mmt table and write buff address to mtt table */
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n");
goto err_db;
}
uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
DB_REG_OFFSET * uar->index;
return 0;
err_db:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
return ret;
}
static void destroy_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq,
struct ib_ucontext *context,
struct ib_udata *udata,
struct hns_roce_ib_create_cq_resp *resp)
{
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(*resp)))
hns_roce_db_unmap_user(to_hr_ucontext(context),
&hr_cq->db);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
ib_umem_release(hr_cq->umem);
}
static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
{
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
...@@ -315,7 +427,6 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -315,7 +427,6 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq ucmd;
struct hns_roce_ib_create_cq_resp resp = {}; struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = NULL; struct hns_roce_cq *hr_cq = NULL;
struct hns_roce_uar *uar = NULL; struct hns_roce_uar *uar = NULL;
...@@ -323,6 +434,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -323,6 +434,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
int cq_entries = attr->cqe; int cq_entries = attr->cqe;
int ret; int ret;
rdfx_func_cnt(hr_dev, RDFX_FUNC_CREATE_CQ);
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
cq_entries, hr_dev->caps.max_cqes); cq_entries, hr_dev->caps.max_cqes);
...@@ -341,57 +454,18 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -341,57 +454,18 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
spin_lock_init(&hr_cq->lock); spin_lock_init(&hr_cq->lock);
if (context) { if (context) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { ret = create_user_cq(hr_dev, hr_cq, context, udata, &resp, uar,
dev_err(dev, "Failed to copy_from_udata.\n"); cq_entries);
ret = -EFAULT;
goto err_cq;
}
/* Get user space address, write it into mtt table */
ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
&hr_cq->umem, ucmd.buf_addr,
cq_entries);
if (ret) { if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n"); dev_err(dev, "Create cq fail in user mode!\n");
goto err_cq; goto err_cq;
} }
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp))) {
ret = hns_roce_db_map_user(to_hr_ucontext(context),
ucmd.db_addr, &hr_cq->db);
if (ret) {
dev_err(dev, "cq record doorbell map failed!\n");
goto err_mtt;
}
hr_cq->db_en = 1;
resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
}
/* Get user space parameters */
uar = &to_hr_ucontext(context)->uar;
} else { } else {
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { ret = create_kernel_cq(hr_dev, hr_cq, uar, cq_entries);
ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
if (ret)
goto err_cq;
hr_cq->set_ci_db = hr_cq->db.db_record;
*hr_cq->set_ci_db = 0;
hr_cq->db_en = 1;
}
/* Init mmt table and write buff address to mtt table */
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
cq_entries);
if (ret) { if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n"); dev_err(dev, "Create cq fail in user mode!\n");
goto err_db; goto err_cq;
} }
uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
DB_REG_OFFSET * uar->index;
} }
/* Allocate cq index, fill cq_context */ /* Allocate cq index, fill cq_context */
...@@ -423,28 +497,18 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -423,28 +497,18 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
goto err_cqc; goto err_cqc;
} }
rdfx_alloc_cq_buf(hr_dev, hr_cq);
return &hr_cq->ib_cq; return &hr_cq->ib_cq;
err_cqc: err_cqc:
hns_roce_free_cq(hr_dev, hr_cq); hns_roce_free_cq(hr_dev, hr_cq);
err_dbmap: err_dbmap:
if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp)))
hns_roce_db_unmap_user(to_hr_ucontext(context),
&hr_cq->db);
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (context) if (context)
ib_umem_release(hr_cq->umem); destroy_user_cq(hr_dev, hr_cq, context, udata, &resp);
else else
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, destroy_kernel_cq(hr_dev, hr_cq);
hr_cq->ib_cq.cqe);
err_db:
if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
hns_roce_free_db(hr_dev, &hr_cq->db);
err_cq: err_cq:
kfree(hr_cq); kfree(hr_cq);
...@@ -458,6 +522,10 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) ...@@ -458,6 +522,10 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
int ret = 0; int ret = 0;
rdfx_func_cnt(hr_dev, RDFX_FUNC_DESTROY_CQ);
rdfx_inc_dealloc_cq_cnt(hr_dev);
rdfx_free_cq_buff(hr_dev, hr_cq);
if (hr_dev->hw->destroy_cq) { if (hr_dev->hw->destroy_cq) {
ret = hr_dev->hw->destroy_cq(ib_cq); ret = hr_dev->hw->destroy_cq(ib_cq);
} else { } else {
......
...@@ -496,7 +496,7 @@ struct hns_roce_idx_que { ...@@ -496,7 +496,7 @@ struct hns_roce_idx_que {
u32 buf_size; u32 buf_size;
struct ib_umem *umem; struct ib_umem *umem;
struct hns_roce_mtt mtt; struct hns_roce_mtt mtt;
u64 *bitmap; unsigned long *bitmap;
}; };
struct hns_roce_srq { struct hns_roce_srq {
...@@ -654,8 +654,6 @@ struct hns_roce_qp { ...@@ -654,8 +654,6 @@ struct hns_roce_qp {
u32 doorbell_qpn; u32 doorbell_qpn;
__le32 sq_signal_bits; __le32 sq_signal_bits;
u32 sq_next_wqe; u32 sq_next_wqe;
int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct hns_roce_wq sq; struct hns_roce_wq sq;
struct ib_umem *umem; struct ib_umem *umem;
...@@ -919,6 +917,12 @@ struct hns_roce_stat { ...@@ -919,6 +917,12 @@ struct hns_roce_stat {
}; };
struct hns_roce_dfx_hw { struct hns_roce_dfx_hw {
int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer);
int (*query_qpc_info)(struct hns_roce_dev *hr_dev, u32 qpn,
int *buffer);
int (*query_mpt_info)(struct hns_roce_dev *hr_dev, u32 key,
int *buffer);
int (*query_cqc_stat)(struct hns_roce_dev *hr_dev, int (*query_cqc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc); char *buf, int *desc);
int (*query_cmd_stat)(struct hns_roce_dev *hr_dev, int (*query_cmd_stat)(struct hns_roce_dev *hr_dev,
...@@ -1072,6 +1076,7 @@ struct hns_roce_dev { ...@@ -1072,6 +1076,7 @@ struct hns_roce_dev {
const struct hns_roce_hw *hw; const struct hns_roce_hw *hw;
const struct hns_roce_dfx_hw *dfx; const struct hns_roce_dfx_hw *dfx;
void *priv; void *priv;
void *dfx_priv;
struct workqueue_struct *irq_workq; struct workqueue_struct *irq_workq;
struct hns_roce_stat hr_stat; struct hns_roce_stat hr_stat;
u32 func_num; u32 func_num;
...@@ -1257,8 +1262,6 @@ int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr, ...@@ -1257,8 +1262,6 @@ int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
struct ib_udata *udata); struct ib_udata *udata);
int hns_roce_destroy_srq(struct ib_srq *ibsrq); int hns_roce_destroy_srq(struct ib_srq *ibsrq);
struct hns_roce_srq *hns_roce_srq_lookup(struct hns_roce_dev *hr_dev, u32 srqn);
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
...@@ -1304,6 +1307,139 @@ void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type); ...@@ -1304,6 +1307,139 @@ void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
int hns_roce_init(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev); void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_fill_res_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res);
int hns_roce_register_sysfs(struct hns_roce_dev *hr_dev); int hns_roce_register_sysfs(struct hns_roce_dev *hr_dev);
void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev); void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev);
#ifdef CONFIG_INFINIBAND_HNS_DFX
enum {
RDFX_FUNC_MODIFY_DEVICE,
RDFX_FUNC_QUERY_DEVICE,
RDFX_FUNC_QUERY_PORT,
RDFX_FUNC_MODIFY_PORT,
RDFX_FUNC_GET_LINK_LAYER,
RDFX_FUNC_GET_NETDEV,
RDFX_FUNC_QUERY_GID,
RDFX_FUNC_ADD_GID,
RDFX_FUNC_DEL_GID,
RDFX_FUNC_QUERY_PKEY,
RDFX_FUNC_ALLOC_UCONTEXT,
RDFX_FUNC_DEALLOC_UCONTEXT,
RDFX_FUNC_MMAP,
RDFX_FUNC_ALLOC_PD,
RDFX_FUNC_DEALLOC_PD,
RDFX_FUNC_CREATE_AH,
RDFX_FUNC_QUERY_AH,
RDFX_FUNC_DESTROY_AH,
RDFX_FUNC_CREATE_QP,
RDFX_FUNC_MODIFY_QP,
RDFX_FUNC_QUERY_QP,
RDFX_FUNC_DESTROY_QP,
RDFX_FUNC_POST_SEND,
RDFX_FUNC_POST_RECV,
RDFX_FUNC_CREATE_CQ,
RDFX_FUNC_MODIFY_CQ,
RDFX_FUNC_DESTROY_CQ,
RDFX_FUNC_REQ_NOTIFY_CQ,
RDFX_FUNC_POLL_CQ,
RDFX_FUNC_RESIZE_CQ,
RDFX_FUNC_GET_DMA_MR,
RDFX_FUNC_REG_USER_MR,
RDFX_FUNC_REREG_USER_MR,
RDFX_FUNC_DEREG_MR,
RDFX_FUNC_PORT_IMMUTABLE
};
void alloc_rdfx_info(struct hns_roce_dev *hr_dev);
void rdfx_set_dev_name(struct hns_roce_dev *hr_dev);
void free_rdfx_info(struct hns_roce_dev *hr_dev);
void rdfx_func_cnt(struct hns_roce_dev *hr_dev, int func);
void rdfx_inc_dealloc_qp_cnt(struct hns_roce_dev *hr_dev);
void rdfx_inc_arm_cq_cnt(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
enum ib_cq_notify_flags flags);
void rdfx_inc_dereg_mr_cnt(struct hns_roce_dev *hr_dev);
void rdfx_inc_sq_db_cnt(struct hns_roce_dev *hr_dev, u32 qpn);
void rdfx_inc_rq_db_cnt(struct hns_roce_dev *hr_dev, u32 qpn);
void rdfx_inc_ceqe_cnt(struct hns_roce_dev *hr_dev, int ceqn);
void rdfx_inc_dealloc_cq_cnt(struct hns_roce_dev *hr_dev);
struct rdfx_qp_info *rdfx_get_rdfx_qp(struct hns_roce_dev *hr_dev,
unsigned long qpn);
void rdfx_put_rdfx_qp(struct hns_roce_dev *hr_dev, unsigned long qpn);
#ifndef CONFIG_INFINIBAND_HNS_DFX_ENHANCE
void rdfx_release_rdfx_qp(struct hns_roce_dev *hr_dev, unsigned long qpn);
#else
#define rdfx_release_rdfx_qp(hr_dev, qpn)
#endif
struct rdfx_cq_info *rdfx_get_rdfx_cq(struct hns_roce_dev *hr_dev,
unsigned long cqn);
void rdfx_put_rdfx_cq(struct hns_roce_dev *hr_dev, unsigned long cqn);
void rdfx_release_rdfx_cq(struct hns_roce_dev *hr_dev, unsigned long cqn);
struct rdfx_ceq_info *rdfx_get_rdfx_ceq(struct hns_roce_dev *hr_dev,
unsigned long ceqn);
void rdfx_put_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn);
void rdfx_release_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn);
void rdfx_alloc_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn,
unsigned int eq_cmd);
void rdfx_alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
void rdfx_free_cq_buff(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
void rdfx_alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void rdfx_set_qp_attr(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state new_state);
void rdfx_alloc_rdfx_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
void rdfx_release_rdfx_mr(struct hns_roce_dev *hr_dev, unsigned long key);
void rdfx_alloc_rdfx_pd(struct hns_roce_dev *hr_dev, struct hns_roce_pd *pd);
void rdfx_release_rdfx_pd(struct hns_roce_dev *hr_dev, unsigned long pdn);
#ifdef CONFIG_KERNEL_419
void rdfx_cp_rq_wqe_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, int ind, void *wqe,
const struct ib_recv_wr *wr);
#else
void rdfx_cp_rq_wqe_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, int ind, void *wqe,
struct ib_recv_wr *wr);
#endif
void rdfx_cp_cqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
void *cqe);
void rdfx_set_rdfx_cq_ci(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq);
#else
#define alloc_rdfx_info(hr_dev)
#define rdfx_set_dev_name(hr_dev)
#define free_rdfx_info(hr_dev)
#define rdfx_func_cnt(hr_dev, func)
#define rdfx_inc_dealloc_qp_cnt(hr_dev)
#define rdfx_inc_arm_cq_cnt(hr_dev, hr_cq, flags)
#define rdfx_inc_dereg_mr_cnt(hr_dev)
#define rdfx_inc_sq_db_cnt(hr_dev, qpn)
#define rdfx_inc_rq_db_cnt(hr_dev, qpn)
#define rdfx_inc_ceqe_cnt(hr_dev, ceqn)
#define rdfx_inc_dealloc_cq_cnt(hr_dev)
#define rdfx_get_rdfx_qp(hr_dev, qpn)
#define rdfx_put_rdfx_qp(hr_dev, qpn)
#define rdfx_release_rdfx_qp(hr_dev, qpn)
#define rdfx_get_rdfx_cq(hr_dev, cqn)
#define rdfx_put_rdfx_cq(hr_dev, cqn)
#define rdfx_release_rdfx_cq(hr_dev, cqn)
#define rdfx_get_rdfx_ceq(hr_dev, ceqn)
#define rdfx_put_rdfx_ceq(hr_dev, ceqn)
#define rdfx_release_rdfx_ceq(hr_dev, ceqn)
#define rdfx_alloc_rdfx_ceq(hr_dev, ceqn, eq_cmd)
#define rdfx_alloc_cq_buf(hr_dev, hr_cq)
#define rdfx_free_cq_buff(hr_dev, hr_cq)
#define rdfx_alloc_qp_buf(hr_dev, hr_qp)
#define rdfx_set_qp_attr(hr_dev, hr_qp, attr, attr_mask, new_state)
#define rdfx_alloc_rdfx_mr(hr_dev, mr)
#define rdfx_release_rdfx_mr(hr_dev, key)
#define rdfx_alloc_rdfx_pd(hr_dev, pd)
#define rdfx_release_rdfx_pd(hr_dev, pdn)
#define rdfx_cp_rq_wqe_buf(hr_dev, hr_qp, ind, wqe, wr)
#define rdfx_cp_cqe_buf(hr_dev, hr_cq, cqe)
#define rdfx_set_rdfx_cq_ci(hr_dev, hr_cq)
#endif
#endif /* _HNS_ROCE_DEVICE_H */ #endif /* _HNS_ROCE_DEVICE_H */
...@@ -42,20 +42,47 @@ ...@@ -42,20 +42,47 @@
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
{ {
if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) || int hop_num = 0;
(hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
(hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) || switch (type) {
(hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) || case HEM_TYPE_QPC:
(hr_dev->caps.scc_ctx_hop_num && type == HEM_TYPE_SCC_CTX) || hop_num = hr_dev->caps.qpc_hop_num;
(hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) || break;
(hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) || case HEM_TYPE_MTPT:
(hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) || hop_num = hr_dev->caps.mpt_hop_num;
(hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) || break;
(hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) || case HEM_TYPE_CQC:
(hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX)) hop_num = hr_dev->caps.cqc_hop_num;
return true; break;
case HEM_TYPE_SRQC:
return false; hop_num = hr_dev->caps.srqc_hop_num;
break;
case HEM_TYPE_SCC_CTX:
hop_num = hr_dev->caps.scc_ctx_hop_num;
break;
case HEM_TYPE_QPC_TIMER:
hop_num = hr_dev->caps.qpc_timer_hop_num;
break;
case HEM_TYPE_CQC_TIMER:
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
case HEM_TYPE_CQE:
hop_num = hr_dev->caps.cqe_hop_num;
break;
case HEM_TYPE_MTT:
hop_num = hr_dev->caps.mtt_hop_num;
break;
case HEM_TYPE_SRQWQE:
hop_num = hr_dev->caps.srqwqe_hop_num;
break;
case HEM_TYPE_IDX:
hop_num = hr_dev->caps.idx_hop_num;
break;
default:
return false;
}
return hop_num ? true : false;
} }
EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop); EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop);
...@@ -94,17 +121,13 @@ static int hns_roce_get_bt_num(u32 table_type, u32 hop_num) ...@@ -94,17 +121,13 @@ static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
return 0; return 0;
} }
int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, static int get_hem_table_config(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long *obj, struct hns_roce_hem_mhop *mhop,
struct hns_roce_hem_mhop *mhop) u32 type)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
u32 chunk_ba_num;
u32 table_idx;
u32 bt_num;
u32 chunk_size;
switch (table->type) { switch (type) {
case HEM_TYPE_QPC: case HEM_TYPE_QPC:
mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
...@@ -195,10 +218,26 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -195,10 +218,26 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
break; break;
default: default:
dev_err(dev, "Table %d not support multi-hop addressing!\n", dev_err(dev, "Table %d not support multi-hop addressing!\n",
table->type); type);
return -EINVAL; return -EINVAL;
} }
return 0;
}
int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long *obj,
struct hns_roce_hem_mhop *mhop)
{
struct device *dev = hr_dev->dev;
u32 chunk_ba_num;
u32 table_idx;
u32 bt_num;
u32 chunk_size;
if (get_hem_table_config(hr_dev, mhop, table->type))
return -EINVAL;
if (!obj) if (!obj)
return 0; return 0;
...@@ -890,7 +929,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -890,7 +929,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
unsigned long obj_size, unsigned long nobj, unsigned long obj_size, unsigned long nobj,
int use_lowmem) int use_lowmem)
{ {
struct device *dev = hr_dev->dev;
unsigned long obj_per_chunk; unsigned long obj_per_chunk;
unsigned long num_hem; unsigned long num_hem;
...@@ -903,99 +941,21 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -903,99 +941,21 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
if (!table->hem) if (!table->hem)
return -ENOMEM; return -ENOMEM;
} else { } else {
struct hns_roce_hem_mhop mhop = {};
unsigned long buf_chunk_size; unsigned long buf_chunk_size;
unsigned long bt_chunk_size; unsigned long bt_chunk_size;
unsigned long bt_chunk_num; unsigned long bt_chunk_num;
unsigned long num_bt_l0 = 0; unsigned long num_bt_l0 = 0;
u32 hop_num; u32 hop_num;
switch (type) { if (get_hem_table_config(hr_dev, &mhop, type))
case HEM_TYPE_QPC:
buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.qpc_bt_num;
hop_num = hr_dev->caps.qpc_hop_num;
break;
case HEM_TYPE_MTPT:
buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.mpt_bt_num;
hop_num = hr_dev->caps.mpt_hop_num;
break;
case HEM_TYPE_CQC:
buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.cqc_bt_num;
hop_num = hr_dev->caps.cqc_hop_num;
break;
case HEM_TYPE_SCC_CTX:
buf_chunk_size = 1 << (hr_dev->caps.scc_ctx_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.scc_ctx_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.scc_ctx_bt_num;
hop_num = hr_dev->caps.scc_ctx_hop_num;
break;
case HEM_TYPE_QPC_TIMER:
buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
hop_num = hr_dev->caps.qpc_timer_hop_num;
break;
case HEM_TYPE_CQC_TIMER:
buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
case HEM_TYPE_SRQC:
buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.srqc_bt_num;
hop_num = hr_dev->caps.srqc_hop_num;
break;
case HEM_TYPE_MTT:
buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.mtt_hop_num;
break;
case HEM_TYPE_CQE:
buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.cqe_hop_num;
break;
case HEM_TYPE_SRQWQE:
buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.srqwqe_hop_num;
break;
case HEM_TYPE_IDX:
buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.idx_hop_num;
break;
default:
dev_err(dev,
"Table %d not support to init hem table here!\n",
type);
return -EINVAL; return -EINVAL;
}
buf_chunk_size = mhop.buf_chunk_size;
bt_chunk_size = mhop.bt_chunk_size;
num_bt_l0 = mhop.ba_l0_num;
hop_num = mhop.hop_num;
obj_per_chunk = buf_chunk_size / obj_size; obj_per_chunk = buf_chunk_size / obj_size;
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
bt_chunk_num = bt_chunk_size / 8; bt_chunk_num = bt_chunk_size / 8;
......
...@@ -35,13 +35,14 @@ ...@@ -35,13 +35,14 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#define HNS_ROCE_VF_QPC_BT_NUM(d) (d ? (256) : (8)) #define HNS_ROCE_VF_QPC_BT_NUM(d) (d ? (8) : (256))
#define HNS_ROCE_VF_SRQC_BT_NUM(d) (d ? (64) : (8)) #define HNS_ROCE_VF_SCCC_BT_NUM(d) (d ? (8) : (64))
#define HNS_ROCE_VF_CQC_BT_NUM(d) (d ? (64) : (8)) #define HNS_ROCE_VF_SRQC_BT_NUM(d) (d ? (8) : (64))
#define HNS_ROCE_VF_MPT_BT_NUM(d) (d ? (64) : (8)) #define HNS_ROCE_VF_CQC_BT_NUM(d) (d ? (8) : (64))
#define HNS_ROCE_VF_EQC_NUM(d) (d ? (64) : (8)) #define HNS_ROCE_VF_MPT_BT_NUM(d) (d ? (8) : (64))
#define HNS_ROCE_VF_SMAC_NUM(d) (d ? (32) : (8)) #define HNS_ROCE_VF_EQC_NUM(d) (d ? (8) : (64))
#define HNS_ROCE_VF_SGID_NUM(d) (d ? (32) : (8)) #define HNS_ROCE_VF_SMAC_NUM(d) (d ? (8) : (32))
#define HNS_ROCE_VF_SGID_NUM(d) (d ? (8) : (32))
#define HNS_ROCE_VF_SL_NUM 8 #define HNS_ROCE_VF_SL_NUM 8
#define HNS_ROCE_V2_MAX_QP_NUM 0x100000 #define HNS_ROCE_V2_MAX_QP_NUM 0x100000
...@@ -62,8 +63,8 @@ ...@@ -62,8 +63,8 @@
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20 #define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256 #define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1 #define HNS_ROCE_V2_PHY_UAR_NUM 1
#define HNS_ROCE_V2_MAX_IRQ_NUM(d) (d ? (65) : (3)) #define HNS_ROCE_V2_MAX_IRQ_NUM(d) (d ? (3) : (65))
#define HNS_ROCE_V2_COMP_VEC_NUM(d) (d ? (63) : (1)) #define HNS_ROCE_V2_COMP_VEC_NUM(d) (d ? (1) : (63))
#define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_AEQE_VEC_NUM 1
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 #define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
...@@ -114,7 +115,7 @@ ...@@ -114,7 +115,7 @@
#define HNS_ROCE_EQE_HOP_NUM 2 #define HNS_ROCE_EQE_HOP_NUM 2
#define HNS_ROCE_IDX_HOP_NUM 1 #define HNS_ROCE_IDX_HOP_NUM 1
#define HNS_ROCE_V2_GID_INDEX_NUM 256 #define HNS_ROCE_V2_GID_INDEX_NUM(d) (d ? (8) : (256))
#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18) #define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
...@@ -166,8 +167,8 @@ ...@@ -166,8 +167,8 @@
#define HNS_ICL_SWITCH_CMD_ROCEE_SEL BIT(HNS_ICL_SWITCH_CMD_ROCEE_SEL_SHIFT) #define HNS_ICL_SWITCH_CMD_ROCEE_SEL BIT(HNS_ICL_SWITCH_CMD_ROCEE_SEL_SHIFT)
#define CMD_CSQ_DESC_NUM (1024) #define CMD_CSQ_DESC_NUM 1024
#define CMD_CRQ_DESC_NUM (1024) #define CMD_CRQ_DESC_NUM 1024
enum { enum {
NO_ARMED = 0x0, NO_ARMED = 0x0,
...@@ -682,6 +683,7 @@ struct hns_roce_v2_qp_context { ...@@ -682,6 +683,7 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_76_RQIE_S 28 #define V2_QPC_BYTE_76_RQIE_S 28
#define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30 #define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30
#define V2_QPC_BYTE_76_RQ_RTY_TX_ERR_S 31
#define V2_QPC_BYTE_80_RX_CQN_S 0 #define V2_QPC_BYTE_80_RX_CQN_S 0
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0) #define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
...@@ -1463,7 +1465,8 @@ struct hns_roce_vf_res_b { ...@@ -1463,7 +1465,8 @@ struct hns_roce_vf_res_b {
__le32 vf_smac_idx_num; __le32 vf_smac_idx_num;
__le32 vf_sgid_idx_num; __le32 vf_sgid_idx_num;
__le32 vf_qid_idx_sl_num; __le32 vf_qid_idx_sl_num;
__le32 rsv[2]; __le32 vf_sccc_idx_num;
__le32 rsv1;
}; };
#define VF_RES_B_DATA_0_VF_ID_S 0 #define VF_RES_B_DATA_0_VF_ID_S 0
...@@ -1487,6 +1490,13 @@ struct hns_roce_vf_res_b { ...@@ -1487,6 +1490,13 @@ struct hns_roce_vf_res_b {
#define VF_RES_B_DATA_3_VF_SL_NUM_S 16 #define VF_RES_B_DATA_3_VF_SL_NUM_S 16
#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16) #define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S 0
#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M GENMASK(8, 0)
#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9
#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9)
struct hns_roce_vf_switch { struct hns_roce_vf_switch {
__le32 rocee_sel; __le32 rocee_sel;
__le32 fun_id; __le32 fun_id;
...@@ -1702,6 +1712,9 @@ struct hns_roce_eq_context { ...@@ -1702,6 +1712,9 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_EQ_ARMED 1 #define HNS_ROCE_V2_EQ_ARMED 1
#define HNS_ROCE_V2_EQ_ALWAYS_ARMED 3 #define HNS_ROCE_V2_EQ_ALWAYS_ARMED 3
#define HNS_ROCE_V2_EQ_DEFAULT_INTERVAL 0x10
#define HNS_ROCE_V2_EQ_DEFAULT_BURST_NUM 0x10
#define HNS_ROCE_EQ_INIT_EQE_CNT 0 #define HNS_ROCE_EQ_INIT_EQE_CNT 0
#define HNS_ROCE_EQ_INIT_PROD_IDX 0 #define HNS_ROCE_EQ_INIT_PROD_IDX 0
#define HNS_ROCE_EQ_INIT_REPORT_TIMER 0 #define HNS_ROCE_EQ_INIT_REPORT_TIMER 0
...@@ -1912,11 +1925,38 @@ int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev, ...@@ -1912,11 +1925,38 @@ int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev,
int hns_roce_v2_modify_eq(struct hns_roce_dev *hr_dev, int hns_roce_v2_modify_eq(struct hns_roce_dev *hr_dev,
u16 eq_count, u16 eq_period, u16 type); u16 eq_count, u16 eq_period, u16 type);
int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer);
int hns_roce_v2_query_qpc_info(struct hns_roce_dev *hr_dev, u32 qpn,
int *buffer);
int hns_roce_v2_query_mpt_info(struct hns_roce_dev *hr_dev, u32 key,
int *buffer);
void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
enum hns_roce_opcode_type opcode, enum hns_roce_opcode_type opcode,
bool is_read); bool is_read);
int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num); struct hns_roce_cmq_desc *desc, int num);
#ifdef CONFIG_INFINIBAND_HNS_DFX
#ifdef CONFIG_KERNEL_419
void rdfx_cp_sq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
unsigned int ind, void *wqe,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
const struct ib_send_wr *wr);
#else
void rdfx_cp_sq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
unsigned int ind, void *wqe,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
struct ib_send_wr *wr);
#endif
void rdfx_set_cqe_info(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
struct hns_roce_v2_cqe *cqe);
#else
#define rdfx_set_cqe_info(hr_dev, hr_cq, cqe)
#define rdfx_cp_sq_wqe_buf(hr_dev, qp, ind, wqe, rc_sq_wqe, wr)
#endif
#define HNS_ROCE_V2_SCC_CTX_DONE_S 0 #define HNS_ROCE_V2_SCC_CTX_DONE_S 0
......
// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
// Copyright (c) 2018 Hisilicon Limited.
#include "hnae3.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hw_v2.h"
int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer)
{
struct hns_roce_v2_cq_context *context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
HNS_ROCE_CMD_QUERY_CQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
dev_err(hr_dev->dev, "QUERY cqc cmd process error\n");
goto err_mailbox;
}
memcpy(buffer, context, sizeof(*context));
err_mailbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
int hns_roce_v2_query_qpc_info(struct hns_roce_dev *hr_dev, u32 qpn,
int *buffer)
{
struct hns_roce_v2_qp_context *context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0,
HNS_ROCE_CMD_QUERY_QPC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
dev_err(hr_dev->dev, "QUERY qpc cmd process error\n");
goto err_mailbox;
}
context = mailbox->buf;
memcpy(buffer, context, sizeof(*context));
err_mailbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
int hns_roce_v2_query_mpt_info(struct hns_roce_dev *hr_dev, u32 key,
int *buffer)
{
struct hns_roce_v2_mpt_entry *context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0,
HNS_ROCE_CMD_QUERY_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
dev_err(hr_dev->dev, "QUERY mpt cmd process error\n");
goto err_mailbox;
}
memcpy(buffer, context, sizeof(*context));
err_mailbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
...@@ -148,6 +148,8 @@ static int hns_roce_add_gid(struct ib_device *device, u8 port_num, ...@@ -148,6 +148,8 @@ static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
unsigned long flags; unsigned long flags;
int ret; int ret;
rdfx_func_cnt(hr_dev, RDFX_FUNC_ADD_GID);
if (port >= hr_dev->caps.num_ports || if (port >= hr_dev->caps.num_ports ||
index > hr_dev->caps.gid_table_len[port]) { index > hr_dev->caps.gid_table_len[port]) {
dev_err(hr_dev->dev, "add gid failed. port - %d, index - %d\n", dev_err(hr_dev->dev, "add gid failed. port - %d, index - %d\n",
...@@ -175,6 +177,8 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num, ...@@ -175,6 +177,8 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
unsigned long flags; unsigned long flags;
int ret; int ret;
rdfx_func_cnt(hr_dev, RDFX_FUNC_DEL_GID);
if (port >= hr_dev->caps.num_ports) if (port >= hr_dev->caps.num_ports)
return -EINVAL; return -EINVAL;
...@@ -269,6 +273,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev, ...@@ -269,6 +273,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
rdfx_func_cnt(hr_dev, RDFX_FUNC_QUERY_DEVICE);
memset(props, 0, sizeof(*props)); memset(props, 0, sizeof(*props));
props->fw_ver = hr_dev->caps.fw_ver; props->fw_ver = hr_dev->caps.fw_ver;
...@@ -314,8 +320,10 @@ static int hns_roce_query_device(struct ib_device *ib_dev, ...@@ -314,8 +320,10 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
IB_DEVICE_MEM_WINDOW_TYPE_2B; IB_DEVICE_MEM_WINDOW_TYPE_2B;
} }
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
}
return 0; return 0;
} }
...@@ -326,6 +334,8 @@ static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev, ...@@ -326,6 +334,8 @@ static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct net_device *ndev; struct net_device *ndev;
rdfx_func_cnt(hr_dev, RDFX_FUNC_GET_NETDEV);
if (port_num < 1 || port_num > hr_dev->caps.num_ports) if (port_num < 1 || port_num > hr_dev->caps.num_ports)
return NULL; return NULL;
...@@ -349,6 +359,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, ...@@ -349,6 +359,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
enum ib_mtu mtu; enum ib_mtu mtu;
u8 port; u8 port;
rdfx_func_cnt(hr_dev, RDFX_FUNC_QUERY_PORT);
assert(port_num > 0); assert(port_num > 0);
port = port_num - 1; port = port_num - 1;
...@@ -387,12 +399,16 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, ...@@ -387,12 +399,16 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
u8 port_num) u8 port_num)
{ {
rdfx_func_cnt(to_hr_dev(device), RDFX_FUNC_GET_LINK_LAYER);
return IB_LINK_LAYER_ETHERNET; return IB_LINK_LAYER_ETHERNET;
} }
static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index, static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
union ib_gid *gid) union ib_gid *gid)
{ {
rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_QUERY_GID);
return 0; return 0;
} }
...@@ -401,6 +417,8 @@ static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index, ...@@ -401,6 +417,8 @@ static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
{ {
*pkey = PKEY_ID; *pkey = PKEY_ID;
rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_QUERY_PKEY);
return 0; return 0;
} }
...@@ -409,6 +427,8 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask, ...@@ -409,6 +427,8 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
{ {
unsigned long flags; unsigned long flags;
rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_MODIFY_DEVICE);
if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -424,6 +444,8 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask, ...@@ -424,6 +444,8 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask, static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
struct ib_port_modify *props) struct ib_port_modify *props)
{ {
rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_MODIFY_PORT);
return 0; return 0;
} }
...@@ -438,6 +460,8 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, ...@@ -438,6 +460,8 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
if (!hr_dev->active) if (!hr_dev->active)
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
rdfx_func_cnt(hr_dev, RDFX_FUNC_ALLOC_UCONTEXT);
resp.qp_tab_size = hr_dev->caps.num_qps; resp.qp_tab_size = hr_dev->caps.num_qps;
context = kmalloc(sizeof(*context), GFP_KERNEL); context = kmalloc(sizeof(*context), GFP_KERNEL);
...@@ -474,6 +498,9 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) ...@@ -474,6 +498,9 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
{ {
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
rdfx_func_cnt(to_hr_dev(ibcontext->device),
RDFX_FUNC_DEALLOC_UCONTEXT);
hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar); hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
kfree(context); kfree(context);
...@@ -529,6 +556,8 @@ static int hns_roce_mmap(struct ib_ucontext *context, ...@@ -529,6 +556,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(context->device); struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
rdfx_func_cnt(hr_dev, RDFX_FUNC_MMAP);
if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
return -EINVAL; return -EINVAL;
...@@ -557,6 +586,8 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num, ...@@ -557,6 +586,8 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr attr; struct ib_port_attr attr;
int ret; int ret;
rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_PORT_IMMUTABLE);
ret = ib_query_port(ib_dev, port_num, &attr); ret = ib_query_port(ib_dev, port_num, &attr);
if (ret) if (ret)
return ret; return ret;
...@@ -720,6 +751,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -720,6 +751,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
/* OTHERS */ /* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable; ib_dev->get_port_immutable = hns_roce_port_immutable;
ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext; ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext;
ib_dev->res.fill_res_entry = hns_roce_fill_res_entry;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
ib_dev->alloc_xrcd = hns_roce_ib_alloc_xrcd; ib_dev->alloc_xrcd = hns_roce_ib_alloc_xrcd;
...@@ -1060,20 +1092,33 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -1060,20 +1092,33 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
return ret; return ret;
} }
int hns_roce_init(struct hns_roce_dev *hr_dev) int hns_roce_reset(struct hns_roce_dev *hr_dev)
{ {
int ret; int ret;
struct device *dev = hr_dev->dev;
if (hr_dev->hw->reset) { if (hr_dev->hw->reset) {
ret = hr_dev->hw->reset(hr_dev, true); ret = hr_dev->hw->reset(hr_dev, true);
if (ret) { if (ret)
dev_err(dev, "Reset RoCE engine failed!\n");
return ret; return ret;
}
} }
hr_dev->is_reset = false; hr_dev->is_reset = false;
return 0;
}
int hns_roce_init(struct hns_roce_dev *hr_dev)
{
int ret;
struct device *dev = hr_dev->dev;
alloc_rdfx_info(hr_dev);
ret = hns_roce_reset(hr_dev);
if (ret) {
dev_err(dev, "Reset RoCE engine failed!\n");
return ret;
}
if (hr_dev->hw->cmq_init) { if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev); ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) { if (ret) {
...@@ -1133,7 +1178,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) ...@@ -1133,7 +1178,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
goto error_failed_register_device; goto error_failed_register_device;
(void)hns_roce_register_sysfs(hr_dev); (void)hns_roce_register_sysfs(hr_dev);
rdfx_set_dev_name(hr_dev);
return 0; return 0;
error_failed_register_device: error_failed_register_device:
...@@ -1187,6 +1232,8 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev) ...@@ -1187,6 +1232,8 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
hr_dev->hw->cmq_exit(hr_dev); hr_dev->hw->cmq_exit(hr_dev);
if (hr_dev->hw->reset) if (hr_dev->hw->reset)
hr_dev->hw->reset(hr_dev, false); hr_dev->hw->reset(hr_dev, false);
free_rdfx_info(hr_dev);
} }
EXPORT_SYMBOL_GPL(hns_roce_exit); EXPORT_SYMBOL_GPL(hns_roce_exit);
......
...@@ -39,6 +39,10 @@ ...@@ -39,6 +39,10 @@
#include "hns_roce_cmd.h" #include "hns_roce_cmd.h"
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
#ifdef CONFIG_INFINIBAND_HNS_TEST
#include "hns_roce_test.h"
#endif
static u32 hw_index_to_key(unsigned long ind) static u32 hw_index_to_key(unsigned long ind)
{ {
return (u32)(ind >> 24) | (ind << 8); return (u32)(ind >> 24) | (ind << 8);
...@@ -351,155 +355,208 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, ...@@ -351,155 +355,208 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
mr->pbl_bt_l0 = NULL; mr->pbl_bt_l0 = NULL;
mr->pbl_l0_dma_addr = 0; mr->pbl_l0_dma_addr = 0;
} }
static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr, u32 pbl_bt_sz)
{
struct device *dev = hr_dev->dev;
/* PBL multi hop addressing */ if (npages > pbl_bt_sz / 8) {
static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, dev_err(dev, "npages %d is larger than buf_pg_sz!",
struct hns_roce_mr *mr) npages);
return -EINVAL;
}
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
GFP_KERNEL);
if (!mr->pbl_buf)
return -ENOMEM;
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_dma_addr;
mr->pbl_hop_num = 1;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0;
}
static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr, u32 pbl_bt_sz)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int mr_alloc_done = 0;
int npages_allocated; int npages_allocated;
int i = 0, j = 0;
u32 pbl_bt_sz;
u32 mhop_num;
u64 pbl_last_bt_num; u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0; u64 pbl_bt_cnt = 0;
u64 bt_idx;
u64 size; u64 size;
int i;
mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
if (mhop_num == HNS_ROCE_HOP_NUM_0) /* alloc L1 BT */
return 0; for (i = 0; i < pbl_bt_sz / 8; i++) {
if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
/* hop_num = 1 */ size = pbl_bt_sz;
if (mhop_num == 1) { } else {
if (npages > pbl_bt_sz / 8) { npages_allocated = i * (pbl_bt_sz / 8);
dev_err(dev, "npages %d is larger than buf_pg_sz!", size = (npages - npages_allocated) * 8;
npages);
return -EINVAL;
} }
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
&(mr->pbl_dma_addr), &(mr->pbl_l1_dma_addr[i]),
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_buf) if (!mr->pbl_bt_l1[i]) {
hns_roce_loop_free(hr_dev, mr, 1, i, 0);
return -ENOMEM; return -ENOMEM;
}
mr->pbl_size = npages; *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
mr->pbl_ba = mr->pbl_dma_addr;
mr->pbl_hop_num = mhop_num; pbl_bt_cnt++;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; if (pbl_bt_cnt >= pbl_last_bt_num)
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; break;
return 0;
} }
mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8, mr->l0_chunk_last_num = i + 1;
sizeof(*mr->pbl_l1_dma_addr),
return 0;
}
static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr, u32 pbl_bt_sz)
{
struct device *dev = hr_dev->dev;
int mr_alloc_done = 0;
int npages_allocated;
u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
u64 bt_idx;
u64 size;
int i;
int j = 0;
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_l2_dma_addr),
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_l1_dma_addr) if (!mr->pbl_l2_dma_addr)
return -ENOMEM; return -ENOMEM;
mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1), mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_bt_l2),
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_bt_l1) if (!mr->pbl_bt_l2)
goto err_kcalloc_bt_l1; goto err_kcalloc_bt_l2;
/* alloc L1, L2 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) {
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l1_dma_addr[i]),
GFP_KERNEL);
if (!mr->pbl_bt_l1[i]) {
hns_roce_loop_free(hr_dev, mr, 1, i, 0);
goto err_dma_alloc_l0;
}
if (mhop_num == 3) { *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_l2_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l2_dma_addr)
goto err_kcalloc_l2_dma;
mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num, for (j = 0; j < pbl_bt_sz / 8; j++) {
sizeof(*mr->pbl_bt_l2), bt_idx = i * pbl_bt_sz / 8 + j;
GFP_KERNEL);
if (!mr->pbl_bt_l2)
goto err_kcalloc_bt_l2;
}
/* alloc L0 BT */
mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l0_dma_addr),
GFP_KERNEL);
if (!mr->pbl_bt_l0)
goto err_dma_alloc_l0;
if (mhop_num == 2) {
/* alloc L1 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) {
if (pbl_bt_cnt + 1 < pbl_last_bt_num) { if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
size = pbl_bt_sz; size = pbl_bt_sz;
} else { } else {
npages_allocated = i * (pbl_bt_sz / 8); npages_allocated = bt_idx *
(pbl_bt_sz / 8);
size = (npages - npages_allocated) * 8; size = (npages - npages_allocated) * 8;
} }
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size, mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
&(mr->pbl_l1_dma_addr[i]), dev, size,
GFP_KERNEL); &(mr->pbl_l2_dma_addr[bt_idx]),
if (!mr->pbl_bt_l1[i]) { GFP_KERNEL);
hns_roce_loop_free(hr_dev, mr, 1, i, 0); if (!mr->pbl_bt_l2[bt_idx]) {
hns_roce_loop_free(hr_dev, mr, 2, i, j);
goto err_dma_alloc_l0; goto err_dma_alloc_l0;
} }
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; *(mr->pbl_bt_l1[i] + j) =
mr->pbl_l2_dma_addr[bt_idx];
pbl_bt_cnt++; pbl_bt_cnt++;
if (pbl_bt_cnt >= pbl_last_bt_num) if (pbl_bt_cnt >= pbl_last_bt_num) {
mr_alloc_done = 1;
break; break;
}
} else if (mhop_num == 3) {
/* alloc L1, L2 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) {
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l1_dma_addr[i]),
GFP_KERNEL);
if (!mr->pbl_bt_l1[i]) {
hns_roce_loop_free(hr_dev, mr, 1, i, 0);
goto err_dma_alloc_l0;
} }
}
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; if (mr_alloc_done)
break;
}
for (j = 0; j < pbl_bt_sz / 8; j++) { mr->l0_chunk_last_num = i + 1;
bt_idx = i * pbl_bt_sz / 8 + j; mr->l1_chunk_last_num = j + 1;
if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
size = pbl_bt_sz;
} else {
npages_allocated = bt_idx *
(pbl_bt_sz / 8);
size = (npages - npages_allocated) * 8;
}
mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
dev, size,
&(mr->pbl_l2_dma_addr[bt_idx]),
GFP_KERNEL);
if (!mr->pbl_bt_l2[bt_idx]) {
hns_roce_loop_free(hr_dev, mr, 2, i, j);
goto err_dma_alloc_l0;
}
*(mr->pbl_bt_l1[i] + j) = return 0;
mr->pbl_l2_dma_addr[bt_idx];
pbl_bt_cnt++; err_dma_alloc_l0:
if (pbl_bt_cnt >= pbl_last_bt_num) { kfree(mr->pbl_bt_l2);
mr_alloc_done = 1; mr->pbl_bt_l2 = NULL;
break;
}
}
if (mr_alloc_done) err_kcalloc_bt_l2:
break; kfree(mr->pbl_l2_dma_addr);
} mr->pbl_l2_dma_addr = NULL;
return -ENOMEM;
}
/* PBL multi hop addressing */
static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr)
{
struct device *dev = hr_dev->dev;
u32 pbl_bt_sz;
u32 mhop_num;
mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
if (mhop_num == HNS_ROCE_HOP_NUM_0)
return 0;
/* hop_num = 1 */
if (mhop_num == 1)
return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz);
mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
sizeof(*mr->pbl_l1_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l1_dma_addr)
return -ENOMEM;
mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
GFP_KERNEL);
if (!mr->pbl_bt_l1)
goto err_kcalloc_bt_l1;
/* alloc L0 BT */
mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l0_dma_addr),
GFP_KERNEL);
if (!mr->pbl_bt_l0)
goto err_kcalloc_l2_dma;
if (mhop_num == 2) {
if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
goto err_kcalloc_l2_dma;
}
if (mhop_num == 3) {
if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
goto err_kcalloc_l2_dma;
} }
mr->l0_chunk_last_num = i + 1;
if (mhop_num == 3)
mr->l1_chunk_last_num = j + 1;
mr->pbl_size = npages; mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_l0_dma_addr; mr->pbl_ba = mr->pbl_l0_dma_addr;
...@@ -509,14 +566,6 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -509,14 +566,6 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
return 0; return 0;
err_dma_alloc_l0:
kfree(mr->pbl_bt_l2);
mr->pbl_bt_l2 = NULL;
err_kcalloc_bt_l2:
kfree(mr->pbl_l2_dma_addr);
mr->pbl_l2_dma_addr = NULL;
err_kcalloc_l2_dma: err_kcalloc_l2_dma:
kfree(mr->pbl_bt_l1); kfree(mr->pbl_bt_l1);
mr->pbl_bt_l1 = NULL; mr->pbl_bt_l1 = NULL;
...@@ -949,6 +998,10 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) ...@@ -949,6 +998,10 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
if (ret) if (ret)
goto err_free; goto err_free;
#ifdef CONFIG_INFINIBAND_HNS_TEST
test_set_mr_access(mr);
#endif
ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr); ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
if (ret) if (ret)
goto err_mr; goto err_mr;
...@@ -956,6 +1009,9 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) ...@@ -956,6 +1009,9 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
mr->ibmr.rkey = mr->ibmr.lkey = mr->key; mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
mr->umem = NULL; mr->umem = NULL;
rdfx_func_cnt(to_hr_dev(pd->device), RDFX_FUNC_GET_DMA_MR);
rdfx_alloc_rdfx_mr(to_hr_dev(pd->device), mr);
return &mr->ibmr; return &mr->ibmr;
err_mr: err_mr:
...@@ -1153,6 +1209,9 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1153,6 +1209,9 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->ibmr.rkey = mr->ibmr.lkey = mr->key; mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
rdfx_func_cnt(to_hr_dev(pd->device), RDFX_FUNC_REG_USER_MR);
rdfx_alloc_rdfx_mr(to_hr_dev(pd->device), mr);
return &mr->ibmr; return &mr->ibmr;
err_mr: err_mr:
...@@ -1166,6 +1225,82 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1166,6 +1225,82 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct hns_roce_cmd_mailbox *mailbox,
u32 pdn)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct device *dev = hr_dev->dev;
int npages;
int ret;
if (mr->size != ~0ULL) {
npages = ib_umem_page_count(mr->umem);
if (hr_dev->caps.pbl_hop_num)
hns_roce_mhop_free(hr_dev, mr);
else
dma_free_coherent(dev, npages * 8, mr->pbl_buf,
mr->pbl_dma_addr);
}
ib_umem_release(mr->umem);
mr->umem = ib_umem_get(ibmr->uobject->context, start, length,
mr_access_flags, 0);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
return -ENOMEM;
}
npages = ib_umem_page_count(mr->umem);
if (hr_dev->caps.pbl_hop_num) {
ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
if (ret)
goto release_umem;
} else {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
GFP_KERNEL);
if (!mr->pbl_buf) {
ret = -ENOMEM;
goto release_umem;
}
}
ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
mr_access_flags, virt_addr,
length, mailbox->buf);
if (ret)
goto release_umem;
ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
if (ret) {
if (mr->size != ~0ULL) {
npages = ib_umem_page_count(mr->umem);
if (hr_dev->caps.pbl_hop_num)
hns_roce_mhop_free(hr_dev, mr);
else
dma_free_coherent(dev, npages * 8,
mr->pbl_buf,
mr->pbl_dma_addr);
}
goto release_umem;
}
release_umem:
ib_umem_release(mr->umem);
return ret;
}
int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd, u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata) struct ib_udata *udata)
...@@ -1176,9 +1311,10 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, ...@@ -1176,9 +1311,10 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
unsigned long mtpt_idx; unsigned long mtpt_idx;
u32 pdn = 0; u32 pdn = 0;
int npages;
int ret; int ret;
rdfx_func_cnt(hr_dev, RDFX_FUNC_REREG_USER_MR);
if (!mr->enabled) if (!mr->enabled)
return -EINVAL; return -EINVAL;
...@@ -1199,77 +1335,30 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, ...@@ -1199,77 +1335,30 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
mr->enabled = 0; mr->enabled = 0;
if (flags & IB_MR_REREG_PD) if (flags & IB_MR_REREG_PD) {
pdn = to_hr_pd(pd)->pdn; pdn = to_hr_pd(pd)->pdn;
if (flags & IB_MR_REREG_TRANS) { ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
if (mr->size != ~0ULL) { mr_access_flags, virt_addr,
npages = ib_umem_page_count(mr->umem); length, mailbox->buf);
if (ret)
if (hr_dev->caps.pbl_hop_num)
hns_roce_mhop_free(hr_dev, mr);
else
dma_free_coherent(dev, npages * 8, mr->pbl_buf,
mr->pbl_dma_addr);
}
ib_umem_release(mr->umem);
mr->umem = ib_umem_get(ibmr->uobject->context, start, length,
mr_access_flags, 0);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
goto free_cmd_mbox;
}
npages = ib_umem_page_count(mr->umem);
if (hr_dev->caps.pbl_hop_num) {
ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
if (ret)
goto release_umem;
} else {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
GFP_KERNEL);
if (!mr->pbl_buf) {
ret = -ENOMEM;
goto release_umem;
}
}
}
ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
mr_access_flags, virt_addr,
length, mailbox->buf);
if (ret) {
if (flags & IB_MR_REREG_TRANS)
goto release_umem;
else
goto free_cmd_mbox; goto free_cmd_mbox;
} }
if (flags & IB_MR_REREG_TRANS) { if (flags & IB_MR_REREG_TRANS) {
ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); ret = rereg_mr_trans(ibmr, flags,
if (ret) { start, length,
if (mr->size != ~0ULL) { virt_addr, mr_access_flags,
npages = ib_umem_page_count(mr->umem); mailbox, pdn);
if (ret)
if (hr_dev->caps.pbl_hop_num) goto free_cmd_mbox;
hns_roce_mhop_free(hr_dev, mr);
else
dma_free_coherent(dev, npages * 8,
mr->pbl_buf,
mr->pbl_dma_addr);
}
goto release_umem;
}
} }
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx); ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) { if (ret) {
dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
goto release_umem; ib_umem_release(mr->umem);
goto free_cmd_mbox;
} }
mr->enabled = 1; mr->enabled = 1;
...@@ -1280,9 +1369,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, ...@@ -1280,9 +1369,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
return 0; return 0;
release_umem:
ib_umem_release(mr->umem);
free_cmd_mbox: free_cmd_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
...@@ -1295,6 +1381,10 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr) ...@@ -1295,6 +1381,10 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr)
struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mr *mr = to_hr_mr(ibmr);
int ret = 0; int ret = 0;
rdfx_func_cnt(hr_dev, RDFX_FUNC_DEREG_MR);
rdfx_inc_dereg_mr_cnt(hr_dev);
rdfx_release_rdfx_mr(hr_dev, mr->key);
if (hr_dev->hw->dereg_mr) { if (hr_dev->hw->dereg_mr) {
ret = hr_dev->hw->dereg_mr(hr_dev, mr); ret = hr_dev->hw->dereg_mr(hr_dev, mr);
} else { } else {
...@@ -1350,6 +1440,9 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, ...@@ -1350,6 +1440,9 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
mr->ibmr.rkey = mr->ibmr.lkey = mr->key; mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
mr->umem = NULL; mr->umem = NULL;
rdfx_func_cnt(hr_dev, RDFX_FUNC_REG_USER_MR);
rdfx_alloc_rdfx_mr(hr_dev, mr);
return &mr->ibmr; return &mr->ibmr;
err_free_mr: err_free_mr:
......
...@@ -125,6 +125,8 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, ...@@ -125,6 +125,8 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
} }
} }
rdfx_func_cnt(hr_dev, RDFX_FUNC_ALLOC_PD);
rdfx_alloc_rdfx_pd(hr_dev, pd);
#endif #endif
return &pd->ibpd; return &pd->ibpd;
} }
...@@ -132,6 +134,10 @@ EXPORT_SYMBOL_GPL(hns_roce_alloc_pd); ...@@ -132,6 +134,10 @@ EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
int hns_roce_dealloc_pd(struct ib_pd *pd) int hns_roce_dealloc_pd(struct ib_pd *pd)
{ {
rdfx_func_cnt(to_hr_dev(pd->device), RDFX_FUNC_DEALLOC_PD);
rdfx_release_rdfx_pd(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
kfree(to_hr_pd(pd)); kfree(to_hr_pd(pd));
......
...@@ -350,16 +350,12 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, ...@@ -350,16 +350,12 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
return 0; return 0;
} }
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp, struct hns_roce_ib_create_qp *ucmd)
struct hns_roce_ib_create_qp *ucmd)
{ {
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride); u8 max_sq_stride = ilog2(roundup_sq_stride);
u32 page_size;
u32 max_cnt;
u32 ex_sge_num;
/* Sanity check SQ size before proceeding */ /* Sanity check SQ size before proceeding */
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
...@@ -375,6 +371,25 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -375,6 +371,25 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
return -EINVAL; return -EINVAL;
} }
return 0;
}
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd)
{
u32 ex_sge_num;
u32 page_size;
u32 max_cnt;
int ret;
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
dev_err(hr_dev->dev, "Sanity check sq size fail\n");
return ret;
}
hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride; hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
...@@ -416,8 +431,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -416,8 +431,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), PAGE_SIZE); hr_qp->sq.wqe_shift), PAGE_SIZE);
} else { } else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sge.sge_cnt = hr_qp->sge.sge_cnt = ex_sge_num ?
max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num); max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) + hr_qp->rq.wqe_shift), page_size) +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt << HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
...@@ -446,6 +461,35 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -446,6 +461,35 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
return 0; return 0;
} }
static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
struct device *dev = hr_dev->dev;
if (hr_qp->sq.max_gs > 2) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
hr_qp->sge.sge_shift = 4;
}
/* ud sqwqe's sge use extend sge */
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->sq.max_gs);
hr_qp->sge.sge_shift = 4;
}
if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
return -EINVAL;
}
}
return 0;
}
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
...@@ -454,6 +498,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -454,6 +498,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
u32 page_size; u32 page_size;
u32 max_cnt; u32 max_cnt;
int size; int size;
int ret;
if (cap->max_send_wr > hr_dev->caps.max_wqes || if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg || cap->max_send_sge > hr_dev->caps.max_sq_sg ||
...@@ -463,8 +508,6 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -463,8 +508,6 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
} }
hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
hr_qp->sq_max_wqes_per_wr = 1;
hr_qp->sq_spare_wqes = 0;
if (hr_dev->caps.min_wqes) if (hr_dev->caps.min_wqes)
max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes); max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
...@@ -484,25 +527,10 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -484,25 +527,10 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
else else
hr_qp->sq.max_gs = max_cnt; hr_qp->sq.max_gs = max_cnt;
if (hr_qp->sq.max_gs > 2) { ret = set_extend_sge_param(hr_dev, hr_qp);
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * if (ret) {
(hr_qp->sq.max_gs - 2)); dev_err(dev, "set extend sge parameters fail\n");
hr_qp->sge.sge_shift = 4; return ret;
}
/* ud sqwqe's sge use extend sge */
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->sq.max_gs);
hr_qp->sge.sge_shift = 4;
}
if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
return -EINVAL;
}
} }
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
...@@ -536,7 +564,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -536,7 +564,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
{ {
if (attr->qp_type == IB_QPT_XRC_TGT) if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
return 0; return 0;
return 1; return 1;
...@@ -874,11 +902,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -874,11 +902,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_free_db(hr_dev, &hr_qp->rdb); hns_roce_free_db(hr_dev, &hr_qp->rdb);
err_rq_sge_list: err_rq_sge_list:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr))
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
err_wqe_list: err_wqe_list:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr))
kfree(hr_qp->rq_inl_buf.wqe_list); kfree(hr_qp->rq_inl_buf.wqe_list);
err_out: err_out:
...@@ -918,7 +948,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -918,7 +948,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
hr_qp); hr_qp);
if (ret) { if (ret) {
dev_err(dev, "Create RC QP failed\n"); dev_err(dev, "Create RC QP 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
kfree(hr_qp); kfree(hr_qp);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -965,6 +996,9 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -965,6 +996,9 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
} }
} }
rdfx_func_cnt(hr_dev, RDFX_FUNC_CREATE_QP);
rdfx_alloc_qp_buf(hr_dev, hr_qp);
return &hr_qp->ibqp; return &hr_qp->ibqp;
} }
EXPORT_SYMBOL_GPL(hns_roce_create_qp); EXPORT_SYMBOL_GPL(hns_roce_create_qp);
...@@ -1069,6 +1103,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1069,6 +1103,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret = -EINVAL; int ret = -EINVAL;
rdfx_func_cnt(hr_dev, RDFX_FUNC_MODIFY_QP);
mutex_lock(&hr_qp->mutex); mutex_lock(&hr_qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? cur_state = attr_mask & IB_QP_CUR_STATE ?
......
// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
// Copyright (c) 2018 Hisilicon Limited.
#include <rdma/rdma_cm.h>
#include <rdma/restrack.h>
#include <uapi/rdma/rdma_netlink.h>
#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
static int hns_roce_fill_cq(struct sk_buff *msg,
struct hns_roce_v2_cq_context *context)
{
if (rdma_nl_put_driver_u32(msg, "state",
roce_get_field(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_ARM_ST_M,
V2_CQC_BYTE_4_ARM_ST_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "ceqn",
roce_get_field(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_CEQN_M,
V2_CQC_BYTE_4_CEQN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "poll",
roce_get_bit(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_POLL_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "shift",
roce_get_field(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_SHIFT_M,
V2_CQC_BYTE_4_SHIFT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "cmd_sn",
roce_get_field(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_CMD_SN_M,
V2_CQC_BYTE_4_CMD_SN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "cqn",
roce_get_field(context->byte_8_cqn,
V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "hopnum",
roce_get_field(context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_HOP_NUM_M,
V2_CQC_BYTE_16_CQE_HOP_NUM_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "pi",
roce_get_field(context->byte_28_cq_pi,
V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M,
V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "ci",
roce_get_field(context->byte_32_cq_ci,
V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M,
V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rdb_en",
roce_get_field(context->byte_44_db_record,
V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
V2_CQC_BYTE_44_DB_RECORD_ADDR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "coalesce",
roce_get_field(
context->byte_56_cqe_period_maxcnt,
V2_CQC_BYTE_56_CQ_MAX_CNT_M,
V2_CQC_BYTE_56_CQ_MAX_CNT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "period",
roce_get_field(
context->byte_56_cqe_period_maxcnt,
V2_CQC_BYTE_56_CQ_PERIOD_M,
V2_CQC_BYTE_56_CQ_PERIOD_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "cnt",
roce_get_field(context->byte_52_cqe_cnt,
V2_CQC_BYTE_52_CQE_CNT_M,
V2_CQC_BYTE_52_CQE_CNT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "se_idx",
roce_get_field(context->byte_64_se_cqe_idx,
V2_CQC_BYTE_64_SE_CQE_IDX_M,
V2_CQC_BYTE_64_SE_CQE_IDX_S)))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
struct ib_cq *ib_cq = container_of(res, struct ib_cq, res);
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
struct hns_roce_v2_cq_context context;
struct nlattr *table_attr;
int ret;
if (!hr_dev->dfx->query_cqc_info)
return -EINVAL;
ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)&context);
if (ret)
goto err;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
if (hns_roce_fill_cq(msg, &context))
goto err_cancel_table;
nla_nest_end(msg, table_attr);
return 0;
err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
return -EMSGSIZE;
}
static int hns_roce_qp_fill_rp(struct sk_buff *msg,
struct hns_roce_v2_qp_context *context)
{
if (rdma_nl_put_driver_u32(msg, "rq_pi",
roce_get_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_ci",
roce_get_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_shift",
roce_get_field(
context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M,
V2_QPC_BYTE_20_RQ_SHIFT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_cqeidx",
roce_get_field(
context->byte_256_sqflush_rqcqe,
V2_QPC_BYTE_256_RQ_CQE_IDX_M,
V2_QPC_BYTE_256_RQ_CQE_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_rx_err",
roce_get_bit(context->byte_56_dqpn_err,
V2_QPC_BYTE_56_RQ_RX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_tx_err",
roce_get_bit(context->byte_56_dqpn_err,
V2_QPC_BYTE_56_RQ_TX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_rty_tx_err",
roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_RTY_TX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_db_doing",
roce_get_bit(context->byte_60_qpst_tempid,
V2_QPC_BYTE_60_RQ_DB_DOING_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rx_cqn",
roce_get_field(context->byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S)))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_qp_fill_sp(struct sk_buff *msg,
struct hns_roce_v2_qp_context *context)
{
if (rdma_nl_put_driver_u32(msg, "sq_pi",
roce_get_field(context->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_ci",
roce_get_field(context->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_shift",
roce_get_field(
context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M,
V2_QPC_BYTE_20_SQ_SHIFT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_maxidx",
roce_get_field(context->byte_200_sq_max,
V2_QPC_BYTE_200_SQ_MAX_IDX_M,
V2_QPC_BYTE_200_SQ_MAX_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_rx_err",
roce_get_bit(context->byte_56_dqpn_err,
V2_QPC_BYTE_56_SQ_RX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_tx_err",
roce_get_bit(context->byte_56_dqpn_err,
V2_QPC_BYTE_56_SQ_TX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_db_doing",
roce_get_bit(context->byte_60_qpst_tempid,
V2_QPC_BYTE_60_SQ_DB_DOING_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_tx_err",
roce_get_bit(context->byte_56_dqpn_err,
V2_QPC_BYTE_56_SQ_TX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "tx_cqn",
roce_get_field(context->byte_252_err_txcqn,
V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S)))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_fill_qp(struct sk_buff *msg,
struct hns_roce_v2_qp_context *context)
{
if (rdma_nl_put_driver_u32(msg, "smac_idx",
roce_get_field(
context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SMAC_IDX_M,
V2_QPC_BYTE_20_SMAC_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "vid",
roce_get_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "mtu",
roce_get_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sgid_idx",
roce_get_field(
context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "srqn",
roce_get_field(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQN_M,
V2_QPC_BYTE_76_SRQN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "srq_en",
roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQ_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "chk_flg",
roce_get_field(context->byte_212_lsn,
V2_QPC_BYTE_212_CHECK_FLG_M,
V2_QPC_BYTE_212_CHECK_FLG_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "retry_cnt",
roce_get_field(context->byte_212_lsn,
V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "err_type",
roce_get_field(context->byte_252_err_txcqn,
V2_QPC_BYTE_252_ERR_TYPE_M,
V2_QPC_BYTE_252_ERR_TYPE_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "flush_idx",
roce_get_field(
context->byte_256_sqflush_rqcqe,
V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
V2_QPC_BYTE_256_SQ_FLUSH_IDX_S)))
goto err;
if (hns_roce_qp_fill_rp(msg, context))
goto err;
if (hns_roce_qp_fill_sp(msg, context))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_fill_res_qp_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
struct ib_qp *ib_qp = container_of(res, struct ib_qp, res);
struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
struct hns_roce_v2_qp_context context;
struct nlattr *table_attr;
int ret;
if (!hr_dev->dfx->query_qpc_info)
return -EINVAL;
ret = hr_dev->dfx->query_qpc_info(hr_dev, hr_qp->qpn, (int *)&context);
if (ret)
goto err;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
if (hns_roce_fill_qp(msg, &context))
goto err_cancel_table;
nla_nest_end(msg, table_attr);
return 0;
err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
return -EMSGSIZE;
}
static int hns_roce_fill_mr(struct sk_buff *msg,
struct hns_roce_v2_mpt_entry *context)
{
u64 val_h32;
if (rdma_nl_put_driver_u32(msg, "status",
roce_get_field(context->byte_4_pd_hop_st,
V2_MPT_BYTE_4_MPT_ST_M,
V2_MPT_BYTE_4_MPT_ST_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "lkey", context->lkey))
goto err;
if (rdma_nl_put_driver_u32(msg, "size", context->pbl_size))
goto err;
if (rdma_nl_put_driver_u32(msg, "ra",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_RA_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "ri",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_R_INV_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "li",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_L_INV_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "atomic_en",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_ATOMIC_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rr_en",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_RR_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rw_en",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_RW_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "lw_en",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_LW_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "pbl_buf_pgsz",
roce_get_field(context->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S)))
goto err;
val_h32 = context->len_h;
if (rdma_nl_put_driver_u64(msg, "len",
val_h32 << 32 | context->len_l))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_fill_res_mr_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
struct ib_mr *ib_mr = container_of(res, struct ib_mr, res);
struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
struct hns_roce_v2_mpt_entry context;
int key = hr_dev->hr_stat.key;
struct nlattr *table_attr;
int ret;
if (!hr_dev->dfx->query_mpt_info)
return -EINVAL;
ret = hr_dev->dfx->query_mpt_info(hr_dev, key, (int *)&context);
if (ret)
goto err;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
if (hns_roce_fill_mr(msg, &context))
goto err_cancel_table;
nla_nest_end(msg, table_attr);
return 0;
err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
return -EMSGSIZE;
}
static int hns_roce_fill_pd(struct sk_buff *msg,
struct hns_roce_pd *hr_pd)
{
if (rdma_nl_put_driver_u32(msg, "pdn", hr_pd->pdn))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_fill_res_pd_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
struct ib_pd *ib_pd = container_of(res, struct ib_pd, res);
struct hns_roce_pd *hr_pd = to_hr_pd(ib_pd);
struct nlattr *table_attr;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
if (hns_roce_fill_pd(msg, hr_pd))
goto err_cancel_table;
nla_nest_end(msg, table_attr);
return 0;
err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
return -EMSGSIZE;
}
int hns_roce_fill_res_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
if (res->type == RDMA_RESTRACK_PD)
return hns_roce_fill_res_pd_entry(msg, res);
if (res->type == RDMA_RESTRACK_CQ)
return hns_roce_fill_res_cq_entry(msg, res);
if (res->type == RDMA_RESTRACK_QP)
return hns_roce_fill_res_qp_entry(msg, res);
if (res->type == RDMA_RESTRACK_MR)
return hns_roce_fill_res_mr_entry(msg, res);
return 0;
}
...@@ -209,6 +209,91 @@ void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) ...@@ -209,6 +209,91 @@ void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR); hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
} }
static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
struct ib_udata *udata, int srq_buf_size)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_ib_create_srq ucmd;
u32 page_shift;
u32 npages;
int ret;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
srq_buf_size, 0, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
if (hr_dev->caps.srqwqe_buf_pg_sz) {
npages = (ib_umem_page_count(srq->umem) +
(1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
(1 << hr_dev->caps.srqwqe_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
} else
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->umem),
srq->umem->page_shift, &srq->mtt);
if (ret)
goto err_user_buf;
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
if (ret)
goto err_user_srq_mtt;
/* config index queue BA */
srq->idx_que.umem = ib_umem_get(pd->uobject->context, ucmd.que_addr,
srq->idx_que.buf_size, 0, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
goto err_user_srq_mtt;
}
if (hr_dev->caps.idx_buf_pg_sz) {
npages = (ib_umem_page_count(srq->idx_que.umem) +
(1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
(1 << hr_dev->caps.idx_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
&srq->idx_que.mtt);
} else {
ret = hns_roce_mtt_init(hr_dev,
ib_umem_page_count(srq->idx_que.umem),
srq->idx_que.umem->page_shift,
&srq->idx_que.mtt);
}
if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
goto err_user_idx_mtt;
}
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
srq->idx_que.umem);
if (ret) {
dev_err(hr_dev->dev,
"hns_roce_ib_umem_write_mtt error for idx que\n");
goto err_user_idx_buf;
}
return 0;
err_user_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
err_user_idx_mtt:
ib_umem_release(srq->idx_que.umem);
err_user_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
err_user_buf:
ib_umem_release(srq->umem);
return ret;
}
static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
u32 page_shift) u32 page_shift)
{ {
...@@ -240,6 +325,93 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, ...@@ -240,6 +325,93 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
return 0; return 0;
} }
static int create_kernel_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
int srq_buf_size)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
int ret;
if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
&srq->buf, page_shift))
return -ENOMEM;
srq->head = 0;
srq->tail = srq->max - 1;
srq->wqe_ctr = 0;
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
&srq->mtt);
if (ret)
goto err_kernel_buf;
ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
if (ret)
goto err_kernel_srq_mtt;
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_create_idx_que(pd, srq, page_shift);
if (ret) {
dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
goto err_kernel_srq_mtt;
}
/* Init mtt table for idx_que */
ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
srq->idx_que.idx_buf.page_shift,
&srq->idx_que.mtt);
if (ret)
goto err_kernel_create_idx;
/* Write buffer address into the mtt table */
ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
&srq->idx_que.idx_buf);
if (ret)
goto err_kernel_idx_buf;
srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_kernel_idx_buf;
}
return 0;
err_kernel_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
err_kernel_create_idx:
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
&srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);
err_kernel_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
err_kernel_buf:
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
return ret;
}
static void destroy_user_srq(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq)
{
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
ib_umem_release(srq->idx_que.umem);
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
ib_umem_release(srq->umem);
}
static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq, int srq_buf_size)
{
kvfree(srq->wrid);
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
}
struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr, struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata) struct ib_udata *udata)
...@@ -248,10 +420,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, ...@@ -248,10 +420,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
struct hns_roce_srq *srq; struct hns_roce_srq *srq;
int srq_desc_size; int srq_desc_size;
int srq_buf_size; int srq_buf_size;
u32 page_shift; int ret;
int ret = 0;
u32 npages;
u16 xrcdn;
u32 cqn; u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */ /* Check the actual SRQ wqe and SRQ sge num */
...@@ -277,140 +446,30 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, ...@@ -277,140 +446,30 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ; srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz; srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
if (udata) { if (pd->uobject) {
struct hns_roce_ib_create_srq ucmd; ret = create_user_srq(pd, srq, udata, srq_buf_size);
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
ret = -EFAULT;
goto err_srq;
}
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
srq_buf_size, 0, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
}
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
if (hr_dev->caps.srqwqe_buf_pg_sz) {
npages = (ib_umem_page_count(srq->umem) +
(1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
(1 << hr_dev->caps.srqwqe_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages,
page_shift,
&srq->mtt);
} else
ret = hns_roce_mtt_init(hr_dev,
ib_umem_page_count(srq->umem),
srq->umem->page_shift,
&srq->mtt);
if (ret)
goto err_buf;
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
if (ret)
goto err_srq_mtt;
/* config index queue BA */
srq->idx_que.umem = ib_umem_get(pd->uobject->context,
ucmd.que_addr,
srq->idx_que.buf_size, 0, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev,
"ib_umem_get error for index queue\n");
goto err_srq_mtt;
}
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
if (hr_dev->caps.idx_buf_pg_sz) {
npages = (ib_umem_page_count(srq->idx_que.umem) +
(1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
(1 << hr_dev->caps.idx_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages,
page_shift, &srq->idx_que.mtt);
} else {
ret = hns_roce_mtt_init(hr_dev,
ib_umem_page_count(srq->idx_que.umem),
srq->idx_que.umem->page_shift,
&srq->idx_que.mtt);
}
if (ret) {
dev_err(hr_dev->dev,
"hns_roce_mtt_init error for idx que\n");
goto err_idx_mtt;
}
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
srq->idx_que.umem);
if (ret) { if (ret) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev, "Create user srq fail\n");
"hns_roce_ib_umem_write_mtt error for idx que\n"); goto err_srq;
goto err_idx_buf;
} }
} else { } else {
u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; ret = create_kernel_srq(pd, srq, srq_buf_size);
if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
(1 << page_shift) * 2,
&srq->buf, page_shift)) {
ret = -ENOMEM;
goto err_buf;
}
srq->head = 0;
srq->tail = srq->max - 1;
srq->wqe_ctr = 0;
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages,
srq->buf.page_shift, &srq->mtt);
if (ret)
goto err_buf;
ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
if (ret)
goto err_srq_mtt;
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_create_idx_que(pd, srq, page_shift);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", dev_err(hr_dev->dev, "Create kernel srq fail\n");
ret); goto err_srq;
goto err_srq_mtt;
}
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
/* Init mtt table for idx_que */
ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
srq->idx_que.idx_buf.page_shift,
&srq->idx_que.mtt);
if (ret)
goto err_create_idx;
/* Write buffer address into the mtt table */
ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
&srq->idx_que.idx_buf);
if (ret)
goto err_idx_buf;
srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_idx_buf;
} }
} }
cqn = ib_srq_has_cq(srq_init_attr->srq_type) ? cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
to_hr_cq(srq_init_attr->ext.cq)->cqn : 0; to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
xrcdn = (srq_init_attr->srq_type == IB_SRQT_XRC) ?
to_hr_xrcd(srq_init_attr->ext.xrc.xrcd)->xrcdn : 0;
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG; srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, xrcdn, ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, 0, &srq->mtt,
&srq->mtt, 0, srq); 0, srq);
if (ret) if (ret)
goto err_wrid; goto err_wrid;
...@@ -420,35 +479,20 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, ...@@ -420,35 +479,20 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
if (pd->uobject) { if (pd->uobject) {
if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
ret = -EFAULT; ret = -EFAULT;
goto err_wrid; goto err_srqc_alloc;
} }
} }
return &srq->ibsrq; return &srq->ibsrq;
err_wrid: err_srqc_alloc:
kvfree(srq->wrid); hns_roce_srq_free(hr_dev, srq);
err_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
err_idx_mtt:
if (udata)
ib_umem_release(srq->idx_que.umem);
err_create_idx:
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
&srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);
err_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
err_buf: err_wrid:
if (udata) if (pd->uobject)
ib_umem_release(srq->umem); destroy_user_srq(hr_dev, srq);
else else
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf); destroy_kernel_srq(hr_dev, srq, srq_buf_size);
err_srq: err_srq:
kfree(srq); kfree(srq);
...@@ -478,20 +522,6 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq) ...@@ -478,20 +522,6 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq)
return 0; return 0;
} }
struct hns_roce_srq *hns_roce_srq_lookup(struct hns_roce_dev *hr_dev, u32 srqn)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct hns_roce_srq *srq;
rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree,
srqn & (hr_dev->caps.max_srqs - 1));
rcu_read_unlock();
return srq;
}
EXPORT_SYMBOL_GPL(hns_roce_srq_lookup);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev) int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册