提交 6d22d0eb 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

driver: roce: update roce driver from driver team

driver inclusion
category: feature

-----------------------------------------

Based on 984f952682c9d0a603ed2fbbc541eab485a667be
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 9228d6c1
...@@ -7,8 +7,9 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 ...@@ -7,8 +7,9 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_sysfs.o hns_roce_sysfs.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
hns-roce-hw-v1-objs := hns_roce_hw_v1.o hns-roce-hw-v1-objs := hns_roce_hw_v1.o
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_sysfs_v2.o hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o hns_roce_hw_sysfs_v2.o
...@@ -59,6 +59,8 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, ...@@ -59,6 +59,8 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
bool vlan_en = false; bool vlan_en = false;
rdfx_func_cnt(hr_dev, RDFX_FUNC_CREATE_AH);
ah = kzalloc(sizeof(*ah), GFP_ATOMIC); ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah) if (!ah)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -139,6 +141,8 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) ...@@ -139,6 +141,8 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
{ {
struct hns_roce_ah *ah = to_hr_ah(ibah); struct hns_roce_ah *ah = to_hr_ah(ibah);
rdfx_func_cnt(to_hr_dev(ibah->device), RDFX_FUNC_QUERY_AH);
memset(ah_attr, 0, sizeof(*ah_attr)); memset(ah_attr, 0, sizeof(*ah_attr));
rdma_ah_set_sl(ah_attr, (le32_to_cpu(ah->av.sl_tclass_flowlabel) >> rdma_ah_set_sl(ah_attr, (le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
...@@ -159,6 +163,8 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) ...@@ -159,6 +163,8 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
int hns_roce_destroy_ah(struct ib_ah *ah) int hns_roce_destroy_ah(struct ib_ah *ah)
{ {
rdfx_func_cnt(to_hr_dev(ah->device), RDFX_FUNC_DESTROY_AH);
kfree(to_hr_ah(ah)); kfree(to_hr_ah(ah));
return 0; return 0;
......
...@@ -308,43 +308,21 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev, ...@@ -308,43 +308,21 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
&buf->hr_buf); &buf->hr_buf);
} }
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, static int create_user_cq(struct hns_roce_dev *hr_dev,
const struct ib_cq_init_attr *attr, struct hns_roce_cq *hr_cq,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata,
struct hns_roce_ib_create_cq_resp *resp,
struct hns_roce_uar *uar,
int cq_entries)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq ucmd; struct hns_roce_ib_create_cq ucmd;
struct hns_roce_ib_create_cq_resp resp = {}; struct device *dev = hr_dev->dev;
struct hns_roce_cq *hr_cq = NULL;
struct hns_roce_uar *uar = NULL;
int vector = attr->comp_vector;
int cq_entries = attr->cqe;
int ret; int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
cq_entries, hr_dev->caps.max_cqes);
return ERR_PTR(-EINVAL);
}
hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
if (!hr_cq)
return ERR_PTR(-ENOMEM);
if (hr_dev->caps.min_cqes)
cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1;
spin_lock_init(&hr_cq->lock);
if (context) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "Failed to copy_from_udata.\n"); dev_err(dev, "Failed to copy_from_udata.\n");
ret = -EFAULT; return -EFAULT;
goto err_cq;
} }
/* Get user space address, write it into mtt table */ /* Get user space address, write it into mtt table */
...@@ -353,11 +331,11 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -353,11 +331,11 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
cq_entries); cq_entries);
if (ret) { if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n"); dev_err(dev, "Failed to get_cq_umem.\n");
goto err_cq; return ret;
} }
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp))) { (udata->outlen >= sizeof(*resp))) {
ret = hns_roce_db_map_user(to_hr_ucontext(context), ret = hns_roce_db_map_user(to_hr_ucontext(context),
ucmd.db_addr, &hr_cq->db); ucmd.db_addr, &hr_cq->db);
if (ret) { if (ret) {
...@@ -365,16 +343,32 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -365,16 +343,32 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
goto err_mtt; goto err_mtt;
} }
hr_cq->db_en = 1; hr_cq->db_en = 1;
resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB; resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
} }
/* Get user space parameters */ /* Get user space parameters */
uar = &to_hr_ucontext(context)->uar; uar = &to_hr_ucontext(context)->uar;
} else {
return 0;
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
ib_umem_release(hr_cq->umem);
return ret;
}
static int create_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, struct hns_roce_uar *uar,
int cq_entries)
{
struct device *dev = hr_dev->dev;
int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
if (ret) if (ret)
goto err_cq; return ret;
hr_cq->set_ci_db = hr_cq->db.db_record; hr_cq->set_ci_db = hr_cq->db.db_record;
*hr_cq->set_ci_db = 0; *hr_cq->set_ci_db = 0;
...@@ -382,8 +376,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -382,8 +376,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
} }
/* Init mmt table and write buff address to mtt table */ /* Init mmt table and write buff address to mtt table */
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
cq_entries);
if (ret) { if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n"); dev_err(dev, "Failed to alloc_cq_buf.\n");
goto err_db; goto err_db;
...@@ -392,6 +385,87 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -392,6 +385,87 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
uar = &hr_dev->priv_uar; uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset + hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
DB_REG_OFFSET * uar->index; DB_REG_OFFSET * uar->index;
return 0;
err_db:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
return ret;
}
static void destroy_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq,
struct ib_ucontext *context,
struct ib_udata *udata,
struct hns_roce_ib_create_cq_resp *resp)
{
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(*resp)))
hns_roce_db_unmap_user(to_hr_ucontext(context),
&hr_cq->db);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
ib_umem_release(hr_cq->umem);
}
static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
{
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = NULL;
struct hns_roce_uar *uar = NULL;
int vector = attr->comp_vector;
int cq_entries = attr->cqe;
int ret;
rdfx_func_cnt(hr_dev, RDFX_FUNC_CREATE_CQ);
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
cq_entries, hr_dev->caps.max_cqes);
return ERR_PTR(-EINVAL);
}
hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
if (!hr_cq)
return ERR_PTR(-ENOMEM);
if (hr_dev->caps.min_cqes)
cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1;
spin_lock_init(&hr_cq->lock);
if (context) {
ret = create_user_cq(hr_dev, hr_cq, context, udata, &resp, uar,
cq_entries);
if (ret) {
dev_err(dev, "Create cq fail in user mode!\n");
goto err_cq;
}
} else {
ret = create_kernel_cq(hr_dev, hr_cq, uar, cq_entries);
if (ret) {
dev_err(dev, "Create cq fail in user mode!\n");
goto err_cq;
}
} }
/* Allocate cq index, fill cq_context */ /* Allocate cq index, fill cq_context */
...@@ -423,28 +497,18 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -423,28 +497,18 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
goto err_cqc; goto err_cqc;
} }
rdfx_alloc_cq_buf(hr_dev, hr_cq);
return &hr_cq->ib_cq; return &hr_cq->ib_cq;
err_cqc: err_cqc:
hns_roce_free_cq(hr_dev, hr_cq); hns_roce_free_cq(hr_dev, hr_cq);
err_dbmap: err_dbmap:
if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp)))
hns_roce_db_unmap_user(to_hr_ucontext(context),
&hr_cq->db);
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (context) if (context)
ib_umem_release(hr_cq->umem); destroy_user_cq(hr_dev, hr_cq, context, udata, &resp);
else else
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, destroy_kernel_cq(hr_dev, hr_cq);
hr_cq->ib_cq.cqe);
err_db:
if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
hns_roce_free_db(hr_dev, &hr_cq->db);
err_cq: err_cq:
kfree(hr_cq); kfree(hr_cq);
...@@ -458,6 +522,10 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) ...@@ -458,6 +522,10 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
int ret = 0; int ret = 0;
rdfx_func_cnt(hr_dev, RDFX_FUNC_DESTROY_CQ);
rdfx_inc_dealloc_cq_cnt(hr_dev);
rdfx_free_cq_buff(hr_dev, hr_cq);
if (hr_dev->hw->destroy_cq) { if (hr_dev->hw->destroy_cq) {
ret = hr_dev->hw->destroy_cq(ib_cq); ret = hr_dev->hw->destroy_cq(ib_cq);
} else { } else {
......
...@@ -496,7 +496,7 @@ struct hns_roce_idx_que { ...@@ -496,7 +496,7 @@ struct hns_roce_idx_que {
u32 buf_size; u32 buf_size;
struct ib_umem *umem; struct ib_umem *umem;
struct hns_roce_mtt mtt; struct hns_roce_mtt mtt;
u64 *bitmap; unsigned long *bitmap;
}; };
struct hns_roce_srq { struct hns_roce_srq {
...@@ -654,8 +654,6 @@ struct hns_roce_qp { ...@@ -654,8 +654,6 @@ struct hns_roce_qp {
u32 doorbell_qpn; u32 doorbell_qpn;
__le32 sq_signal_bits; __le32 sq_signal_bits;
u32 sq_next_wqe; u32 sq_next_wqe;
int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct hns_roce_wq sq; struct hns_roce_wq sq;
struct ib_umem *umem; struct ib_umem *umem;
...@@ -919,6 +917,12 @@ struct hns_roce_stat { ...@@ -919,6 +917,12 @@ struct hns_roce_stat {
}; };
struct hns_roce_dfx_hw { struct hns_roce_dfx_hw {
int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer);
int (*query_qpc_info)(struct hns_roce_dev *hr_dev, u32 qpn,
int *buffer);
int (*query_mpt_info)(struct hns_roce_dev *hr_dev, u32 key,
int *buffer);
int (*query_cqc_stat)(struct hns_roce_dev *hr_dev, int (*query_cqc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc); char *buf, int *desc);
int (*query_cmd_stat)(struct hns_roce_dev *hr_dev, int (*query_cmd_stat)(struct hns_roce_dev *hr_dev,
...@@ -1072,6 +1076,7 @@ struct hns_roce_dev { ...@@ -1072,6 +1076,7 @@ struct hns_roce_dev {
const struct hns_roce_hw *hw; const struct hns_roce_hw *hw;
const struct hns_roce_dfx_hw *dfx; const struct hns_roce_dfx_hw *dfx;
void *priv; void *priv;
void *dfx_priv;
struct workqueue_struct *irq_workq; struct workqueue_struct *irq_workq;
struct hns_roce_stat hr_stat; struct hns_roce_stat hr_stat;
u32 func_num; u32 func_num;
...@@ -1257,8 +1262,6 @@ int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr, ...@@ -1257,8 +1262,6 @@ int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
struct ib_udata *udata); struct ib_udata *udata);
int hns_roce_destroy_srq(struct ib_srq *ibsrq); int hns_roce_destroy_srq(struct ib_srq *ibsrq);
struct hns_roce_srq *hns_roce_srq_lookup(struct hns_roce_dev *hr_dev, u32 srqn);
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
...@@ -1304,6 +1307,139 @@ void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type); ...@@ -1304,6 +1307,139 @@ void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
int hns_roce_init(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev); void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_fill_res_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res);
int hns_roce_register_sysfs(struct hns_roce_dev *hr_dev); int hns_roce_register_sysfs(struct hns_roce_dev *hr_dev);
void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev); void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev);
#ifdef CONFIG_INFINIBAND_HNS_DFX
enum {
RDFX_FUNC_MODIFY_DEVICE,
RDFX_FUNC_QUERY_DEVICE,
RDFX_FUNC_QUERY_PORT,
RDFX_FUNC_MODIFY_PORT,
RDFX_FUNC_GET_LINK_LAYER,
RDFX_FUNC_GET_NETDEV,
RDFX_FUNC_QUERY_GID,
RDFX_FUNC_ADD_GID,
RDFX_FUNC_DEL_GID,
RDFX_FUNC_QUERY_PKEY,
RDFX_FUNC_ALLOC_UCONTEXT,
RDFX_FUNC_DEALLOC_UCONTEXT,
RDFX_FUNC_MMAP,
RDFX_FUNC_ALLOC_PD,
RDFX_FUNC_DEALLOC_PD,
RDFX_FUNC_CREATE_AH,
RDFX_FUNC_QUERY_AH,
RDFX_FUNC_DESTROY_AH,
RDFX_FUNC_CREATE_QP,
RDFX_FUNC_MODIFY_QP,
RDFX_FUNC_QUERY_QP,
RDFX_FUNC_DESTROY_QP,
RDFX_FUNC_POST_SEND,
RDFX_FUNC_POST_RECV,
RDFX_FUNC_CREATE_CQ,
RDFX_FUNC_MODIFY_CQ,
RDFX_FUNC_DESTROY_CQ,
RDFX_FUNC_REQ_NOTIFY_CQ,
RDFX_FUNC_POLL_CQ,
RDFX_FUNC_RESIZE_CQ,
RDFX_FUNC_GET_DMA_MR,
RDFX_FUNC_REG_USER_MR,
RDFX_FUNC_REREG_USER_MR,
RDFX_FUNC_DEREG_MR,
RDFX_FUNC_PORT_IMMUTABLE
};
void alloc_rdfx_info(struct hns_roce_dev *hr_dev);
void rdfx_set_dev_name(struct hns_roce_dev *hr_dev);
void free_rdfx_info(struct hns_roce_dev *hr_dev);
void rdfx_func_cnt(struct hns_roce_dev *hr_dev, int func);
void rdfx_inc_dealloc_qp_cnt(struct hns_roce_dev *hr_dev);
void rdfx_inc_arm_cq_cnt(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
enum ib_cq_notify_flags flags);
void rdfx_inc_dereg_mr_cnt(struct hns_roce_dev *hr_dev);
void rdfx_inc_sq_db_cnt(struct hns_roce_dev *hr_dev, u32 qpn);
void rdfx_inc_rq_db_cnt(struct hns_roce_dev *hr_dev, u32 qpn);
void rdfx_inc_ceqe_cnt(struct hns_roce_dev *hr_dev, int ceqn);
void rdfx_inc_dealloc_cq_cnt(struct hns_roce_dev *hr_dev);
struct rdfx_qp_info *rdfx_get_rdfx_qp(struct hns_roce_dev *hr_dev,
unsigned long qpn);
void rdfx_put_rdfx_qp(struct hns_roce_dev *hr_dev, unsigned long qpn);
#ifndef CONFIG_INFINIBAND_HNS_DFX_ENHANCE
void rdfx_release_rdfx_qp(struct hns_roce_dev *hr_dev, unsigned long qpn);
#else
#define rdfx_release_rdfx_qp(hr_dev, qpn)
#endif
struct rdfx_cq_info *rdfx_get_rdfx_cq(struct hns_roce_dev *hr_dev,
unsigned long cqn);
void rdfx_put_rdfx_cq(struct hns_roce_dev *hr_dev, unsigned long cqn);
void rdfx_release_rdfx_cq(struct hns_roce_dev *hr_dev, unsigned long cqn);
struct rdfx_ceq_info *rdfx_get_rdfx_ceq(struct hns_roce_dev *hr_dev,
unsigned long ceqn);
void rdfx_put_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn);
void rdfx_release_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn);
void rdfx_alloc_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn,
unsigned int eq_cmd);
void rdfx_alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
void rdfx_free_cq_buff(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
void rdfx_alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void rdfx_set_qp_attr(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state new_state);
void rdfx_alloc_rdfx_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
void rdfx_release_rdfx_mr(struct hns_roce_dev *hr_dev, unsigned long key);
void rdfx_alloc_rdfx_pd(struct hns_roce_dev *hr_dev, struct hns_roce_pd *pd);
void rdfx_release_rdfx_pd(struct hns_roce_dev *hr_dev, unsigned long pdn);
#ifdef CONFIG_KERNEL_419
void rdfx_cp_rq_wqe_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, int ind, void *wqe,
const struct ib_recv_wr *wr);
#else
void rdfx_cp_rq_wqe_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, int ind, void *wqe,
struct ib_recv_wr *wr);
#endif
void rdfx_cp_cqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
void *cqe);
void rdfx_set_rdfx_cq_ci(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq);
#else
#define alloc_rdfx_info(hr_dev)
#define rdfx_set_dev_name(hr_dev)
#define free_rdfx_info(hr_dev)
#define rdfx_func_cnt(hr_dev, func)
#define rdfx_inc_dealloc_qp_cnt(hr_dev)
#define rdfx_inc_arm_cq_cnt(hr_dev, hr_cq, flags)
#define rdfx_inc_dereg_mr_cnt(hr_dev)
#define rdfx_inc_sq_db_cnt(hr_dev, qpn)
#define rdfx_inc_rq_db_cnt(hr_dev, qpn)
#define rdfx_inc_ceqe_cnt(hr_dev, ceqn)
#define rdfx_inc_dealloc_cq_cnt(hr_dev)
#define rdfx_get_rdfx_qp(hr_dev, qpn)
#define rdfx_put_rdfx_qp(hr_dev, qpn)
#define rdfx_release_rdfx_qp(hr_dev, qpn)
#define rdfx_get_rdfx_cq(hr_dev, cqn)
#define rdfx_put_rdfx_cq(hr_dev, cqn)
#define rdfx_release_rdfx_cq(hr_dev, cqn)
#define rdfx_get_rdfx_ceq(hr_dev, ceqn)
#define rdfx_put_rdfx_ceq(hr_dev, ceqn)
#define rdfx_release_rdfx_ceq(hr_dev, ceqn)
#define rdfx_alloc_rdfx_ceq(hr_dev, ceqn, eq_cmd)
#define rdfx_alloc_cq_buf(hr_dev, hr_cq)
#define rdfx_free_cq_buff(hr_dev, hr_cq)
#define rdfx_alloc_qp_buf(hr_dev, hr_qp)
#define rdfx_set_qp_attr(hr_dev, hr_qp, attr, attr_mask, new_state)
#define rdfx_alloc_rdfx_mr(hr_dev, mr)
#define rdfx_release_rdfx_mr(hr_dev, key)
#define rdfx_alloc_rdfx_pd(hr_dev, pd)
#define rdfx_release_rdfx_pd(hr_dev, pdn)
#define rdfx_cp_rq_wqe_buf(hr_dev, hr_qp, ind, wqe, wr)
#define rdfx_cp_cqe_buf(hr_dev, hr_cq, cqe)
#define rdfx_set_rdfx_cq_ci(hr_dev, hr_cq)
#endif
#endif /* _HNS_ROCE_DEVICE_H */ #endif /* _HNS_ROCE_DEVICE_H */
...@@ -42,20 +42,47 @@ ...@@ -42,20 +42,47 @@
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
{ {
if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) || int hop_num = 0;
(hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
(hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
(hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
(hr_dev->caps.scc_ctx_hop_num && type == HEM_TYPE_SCC_CTX) ||
(hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
(hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
(hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
(hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
(hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
(hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX))
return true;
switch (type) {
case HEM_TYPE_QPC:
hop_num = hr_dev->caps.qpc_hop_num;
break;
case HEM_TYPE_MTPT:
hop_num = hr_dev->caps.mpt_hop_num;
break;
case HEM_TYPE_CQC:
hop_num = hr_dev->caps.cqc_hop_num;
break;
case HEM_TYPE_SRQC:
hop_num = hr_dev->caps.srqc_hop_num;
break;
case HEM_TYPE_SCC_CTX:
hop_num = hr_dev->caps.scc_ctx_hop_num;
break;
case HEM_TYPE_QPC_TIMER:
hop_num = hr_dev->caps.qpc_timer_hop_num;
break;
case HEM_TYPE_CQC_TIMER:
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
case HEM_TYPE_CQE:
hop_num = hr_dev->caps.cqe_hop_num;
break;
case HEM_TYPE_MTT:
hop_num = hr_dev->caps.mtt_hop_num;
break;
case HEM_TYPE_SRQWQE:
hop_num = hr_dev->caps.srqwqe_hop_num;
break;
case HEM_TYPE_IDX:
hop_num = hr_dev->caps.idx_hop_num;
break;
default:
return false; return false;
}
return hop_num ? true : false;
} }
EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop); EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop);
...@@ -94,17 +121,13 @@ static int hns_roce_get_bt_num(u32 table_type, u32 hop_num) ...@@ -94,17 +121,13 @@ static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
return 0; return 0;
} }
int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, static int get_hem_table_config(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long *obj, struct hns_roce_hem_mhop *mhop,
struct hns_roce_hem_mhop *mhop) u32 type)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
u32 chunk_ba_num;
u32 table_idx;
u32 bt_num;
u32 chunk_size;
switch (table->type) { switch (type) {
case HEM_TYPE_QPC: case HEM_TYPE_QPC:
mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
...@@ -195,10 +218,26 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -195,10 +218,26 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
break; break;
default: default:
dev_err(dev, "Table %d not support multi-hop addressing!\n", dev_err(dev, "Table %d not support multi-hop addressing!\n",
table->type); type);
return -EINVAL; return -EINVAL;
} }
return 0;
}
int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long *obj,
struct hns_roce_hem_mhop *mhop)
{
struct device *dev = hr_dev->dev;
u32 chunk_ba_num;
u32 table_idx;
u32 bt_num;
u32 chunk_size;
if (get_hem_table_config(hr_dev, mhop, table->type))
return -EINVAL;
if (!obj) if (!obj)
return 0; return 0;
...@@ -890,7 +929,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -890,7 +929,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
unsigned long obj_size, unsigned long nobj, unsigned long obj_size, unsigned long nobj,
int use_lowmem) int use_lowmem)
{ {
struct device *dev = hr_dev->dev;
unsigned long obj_per_chunk; unsigned long obj_per_chunk;
unsigned long num_hem; unsigned long num_hem;
...@@ -903,99 +941,21 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -903,99 +941,21 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
if (!table->hem) if (!table->hem)
return -ENOMEM; return -ENOMEM;
} else { } else {
struct hns_roce_hem_mhop mhop = {};
unsigned long buf_chunk_size; unsigned long buf_chunk_size;
unsigned long bt_chunk_size; unsigned long bt_chunk_size;
unsigned long bt_chunk_num; unsigned long bt_chunk_num;
unsigned long num_bt_l0 = 0; unsigned long num_bt_l0 = 0;
u32 hop_num; u32 hop_num;
switch (type) { if (get_hem_table_config(hr_dev, &mhop, type))
case HEM_TYPE_QPC:
buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.qpc_bt_num;
hop_num = hr_dev->caps.qpc_hop_num;
break;
case HEM_TYPE_MTPT:
buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.mpt_bt_num;
hop_num = hr_dev->caps.mpt_hop_num;
break;
case HEM_TYPE_CQC:
buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.cqc_bt_num;
hop_num = hr_dev->caps.cqc_hop_num;
break;
case HEM_TYPE_SCC_CTX:
buf_chunk_size = 1 << (hr_dev->caps.scc_ctx_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.scc_ctx_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.scc_ctx_bt_num;
hop_num = hr_dev->caps.scc_ctx_hop_num;
break;
case HEM_TYPE_QPC_TIMER:
buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
hop_num = hr_dev->caps.qpc_timer_hop_num;
break;
case HEM_TYPE_CQC_TIMER:
buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
case HEM_TYPE_SRQC:
buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.srqc_bt_num;
hop_num = hr_dev->caps.srqc_hop_num;
break;
case HEM_TYPE_MTT:
buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.mtt_hop_num;
break;
case HEM_TYPE_CQE:
buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.cqe_hop_num;
break;
case HEM_TYPE_SRQWQE:
buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.srqwqe_hop_num;
break;
case HEM_TYPE_IDX:
buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.idx_hop_num;
break;
default:
dev_err(dev,
"Table %d not support to init hem table here!\n",
type);
return -EINVAL; return -EINVAL;
}
buf_chunk_size = mhop.buf_chunk_size;
bt_chunk_size = mhop.bt_chunk_size;
num_bt_l0 = mhop.ba_l0_num;
hop_num = mhop.hop_num;
obj_per_chunk = buf_chunk_size / obj_size; obj_per_chunk = buf_chunk_size / obj_size;
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
bt_chunk_num = bt_chunk_size / 8; bt_chunk_num = bt_chunk_size / 8;
......
...@@ -47,6 +47,9 @@ ...@@ -47,6 +47,9 @@
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h" #include "hns_roce_hw_v2.h"
#ifdef CONFIG_INFINIBAND_HNS_TEST
#include "hns_hw_v2_test.h"
#endif
static int loopback; static int loopback;
static int dcqcn; static int dcqcn;
static int is_d; static int is_d;
...@@ -304,6 +307,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -304,6 +307,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags); spin_lock_irqsave(&qp->sq.lock, flags);
ind = qp->sq_next_wqe; ind = qp->sq_next_wqe;
sge_ind = qp->next_sge; sge_ind = qp->next_sge;
rdfx_func_cnt(hr_dev, RDFX_FUNC_POST_SEND);
rdfx_get_rdfx_qp(hr_dev, ibqp->qp_num);
for (nreq = 0; wr; ++nreq, wr = wr->next) { for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
...@@ -589,6 +594,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -589,6 +594,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
rdfx_cp_sq_wqe_buf(hr_dev, qp, ind, wqe, rc_sq_wqe, wr);
ind++; ind++;
} else { } else {
dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type); dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
...@@ -635,6 +642,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -635,6 +642,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
} }
} }
} }
rdfx_inc_sq_db_cnt(hr_dev, ibqp->qp_num);
rdfx_put_rdfx_qp(hr_dev, ibqp->qp_num);
spin_unlock_irqrestore(&qp->sq.lock, flags); spin_unlock_irqrestore(&qp->sq.lock, flags);
...@@ -673,6 +682,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -673,6 +682,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
return -EINVAL; return -EINVAL;
} }
rdfx_func_cnt(hr_dev, RDFX_FUNC_POST_RECV);
rdfx_get_rdfx_qp(hr_dev, ibqp->qp_num);
for (nreq = 0; wr; ++nreq, wr = wr->next) { for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&hr_qp->rq, nreq, if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
hr_qp->ibqp.recv_cq)) { hr_qp->ibqp.recv_cq)) {
...@@ -717,6 +729,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -717,6 +729,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
hr_qp->rq.wrid[ind] = wr->wr_id; hr_qp->rq.wrid[ind] = wr->wr_id;
rdfx_cp_rq_wqe_buf(hr_dev, hr_qp, ind, wqe, wr);
ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1); ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
} }
...@@ -741,7 +755,11 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -741,7 +755,11 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
return ret; return ret;
} }
} }
rdfx_inc_rq_db_cnt(hr_dev, hr_qp->qpn);
} }
rdfx_put_rdfx_qp(hr_dev, hr_qp->qpn);
spin_unlock_irqrestore(&hr_qp->rq.lock, flags); spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
return ret; return ret;
...@@ -1015,6 +1033,7 @@ void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, ...@@ -1015,6 +1033,7 @@ void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
else else
desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
} }
EXPORT_SYMBOL(hns_roce_cmq_setup_basic_desc);
static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev) static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
{ {
...@@ -1158,6 +1177,7 @@ int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, ...@@ -1158,6 +1177,7 @@ int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
EXPORT_SYMBOL(hns_roce_cmq_send);
static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
{ {
...@@ -1335,7 +1355,7 @@ static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id) ...@@ -1335,7 +1355,7 @@ static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id)
static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
{ {
int i; int i;
int vf_num = hr_dev->func_num - 1; int vf_num = 0;/*should be (hr_dev->func_num-1) when enable ROCE VF*/
/* Clear vf first, then clear pf*/ /* Clear vf first, then clear pf*/
for (i = vf_num; i >= 0; i--) for (i = vf_num; i >= 0; i--)
...@@ -1576,6 +1596,14 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) ...@@ -1576,6 +1596,14 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
VF_RES_B_DATA_3_VF_SL_NUM_M, VF_RES_B_DATA_3_VF_SL_NUM_M,
VF_RES_B_DATA_3_VF_SL_NUM_S, VF_RES_B_DATA_3_VF_SL_NUM_S,
HNS_ROCE_VF_SL_NUM); HNS_ROCE_VF_SL_NUM);
roce_set_field(req_b->vf_sccc_idx_num,
VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
roce_set_field(req_b->vf_sccc_idx_num,
VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
HNS_ROCE_VF_SCCC_BT_NUM(d));
} }
} }
...@@ -1724,6 +1752,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1724,6 +1752,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM; caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM; caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM; caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM; caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM; caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM; caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
...@@ -1805,7 +1834,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1805,7 +1834,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
HNS_ROCE_CAP_FLAG_RECORD_DB | HNS_ROCE_CAP_FLAG_RECORD_DB |
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB; HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
caps->pkey_table_len[0] = 1; caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM(d);
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM; caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM; caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
caps->local_ca_ack_delay = 0; caps->local_ca_ack_delay = 0;
...@@ -2452,6 +2481,9 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, ...@@ -2452,6 +2481,9 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf; struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
int ret = 0; int ret = 0;
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
if (flags & IB_MR_REREG_PD) { if (flags & IB_MR_REREG_PD) {
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, pdn); V2_MPT_BYTE_4_PD_S, pdn);
...@@ -2683,7 +2715,12 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, ...@@ -2683,7 +2715,12 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
u32 vector) u32 vector)
{ {
struct hns_roce_v2_cq_context *cq_context; struct hns_roce_v2_cq_context *cq_context;
unsigned int cq_period = HNS_ROCE_V2_CQ_DEFAULT_INTERVAL;
unsigned int cq_max_cnt = HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM;
#ifdef CONFIG_INFINIBAND_HNS_TEST
test_set_cqc_param(&cq_period, &cq_max_cnt);
#endif
cq_context = mb_buf; cq_context = mb_buf;
memset(cq_context, 0, sizeof(*cq_context)); memset(cq_context, 0, sizeof(*cq_context));
...@@ -2744,12 +2781,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, ...@@ -2744,12 +2781,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_56_cqe_period_maxcnt, roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_M,
V2_CQC_BYTE_56_CQ_MAX_CNT_S, V2_CQC_BYTE_56_CQ_MAX_CNT_S, cq_max_cnt);
HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
roce_set_field(cq_context->byte_56_cqe_period_maxcnt, roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_M,
V2_CQC_BYTE_56_CQ_PERIOD_S, V2_CQC_BYTE_56_CQ_PERIOD_S, cq_period);
HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
} }
static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
...@@ -2763,6 +2798,9 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, ...@@ -2763,6 +2798,9 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
doorbell[0] = 0; doorbell[0] = 0;
doorbell[1] = 0; doorbell[1] = 0;
rdfx_func_cnt(to_hr_dev(ibcq->device), RDFX_FUNC_REQ_NOTIFY_CQ);
rdfx_inc_arm_cq_cnt(to_hr_dev(ibcq->device), hr_cq, flags);
notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL; V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
/* /*
...@@ -3106,9 +3144,19 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, ...@@ -3106,9 +3144,19 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
spin_lock_irqsave(&hr_cq->lock, flags); spin_lock_irqsave(&hr_cq->lock, flags);
rdfx_func_cnt(to_hr_dev(ibcq->device), RDFX_FUNC_POLL_CQ);
rdfx_get_rdfx_cq(to_hr_dev(ibcq->device), hr_cq->cqn);
for (npolled = 0; npolled < num_entries; ++npolled) { for (npolled = 0; npolled < num_entries; ++npolled) {
rdfx_cp_cqe_buf(to_hr_dev(ibcq->device), hr_cq,
get_sw_cqe_v2(hr_cq, hr_cq->cons_index));
if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled)) if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
break; break;
rdfx_set_cqe_info(to_hr_dev(ibcq->device), hr_cq,
get_cqe_v2(hr_cq, (hr_cq->cons_index - 1) &
(hr_cq->ib_cq.cqe)));
} }
if (npolled) { if (npolled) {
...@@ -3117,16 +3165,58 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, ...@@ -3117,16 +3165,58 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index); hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
} }
rdfx_set_rdfx_cq_ci(to_hr_dev(ibcq->device), hr_cq);
rdfx_put_rdfx_cq(to_hr_dev(ibcq->device), hr_cq->cqn);
spin_unlock_irqrestore(&hr_cq->lock, flags); spin_unlock_irqrestore(&hr_cq->lock, flags);
return npolled; return npolled;
} }
static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
int step_idx)
{
int op;
switch (type) {
case HEM_TYPE_QPC:
op = HNS_ROCE_CMD_WRITE_QPC_BT0;
break;
case HEM_TYPE_MTPT:
op = HNS_ROCE_CMD_WRITE_MPT_BT0;
break;
case HEM_TYPE_CQC:
op = HNS_ROCE_CMD_WRITE_CQC_BT0;
break;
case HEM_TYPE_SRQC:
op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
break;
case HEM_TYPE_SCC_CTX:
if (step_idx) {
/* No need to notify Hardware when step_idx is 1 or 2 */
return -EINVAL;
}
op = HNS_ROCE_CMD_WRITE_SCC_CTX_BT0;
break;
case HEM_TYPE_QPC_TIMER:
op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
break;
case HEM_TYPE_CQC_TIMER:
op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
break;
default:
dev_warn(hr_dev->dev,
"Table %d not to be written by mailbox!\n", type);
return -EINVAL;
}
return op + step_idx;
}
static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, struct hns_roce_hem_table *table, int obj,
int step_idx) int step_idx)
{ {
struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_hem_iter iter; struct hns_roce_hem_iter iter;
struct hns_roce_hem_mhop mhop; struct hns_roce_hem_mhop mhop;
...@@ -3139,7 +3229,7 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, ...@@ -3139,7 +3229,7 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
u64 bt_ba = 0; u64 bt_ba = 0;
u32 chunk_ba_num; u32 chunk_ba_num;
u32 hop_num; u32 hop_num;
u16 op = 0xff; int op;
if (!hns_roce_check_whether_mhop(hr_dev, table->type)) if (!hns_roce_check_whether_mhop(hr_dev, table->type))
return 0; return 0;
...@@ -3161,38 +3251,9 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, ...@@ -3161,38 +3251,9 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
hem_idx = i; hem_idx = i;
} }
switch (table->type) { op = get_op_for_set_hem(hr_dev, table->type, step_idx);
case HEM_TYPE_QPC: if (op == -EINVAL)
op = HNS_ROCE_CMD_WRITE_QPC_BT0;
break;
case HEM_TYPE_MTPT:
op = HNS_ROCE_CMD_WRITE_MPT_BT0;
break;
case HEM_TYPE_CQC:
op = HNS_ROCE_CMD_WRITE_CQC_BT0;
break;
case HEM_TYPE_SRQC:
op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
break;
case HEM_TYPE_SCC_CTX:
if (step_idx) {
/* No need to notify Hardware when step_idx is 1 or 2*/
return 0;
}
op = HNS_ROCE_CMD_WRITE_SCC_CTX_BT0;
break;
case HEM_TYPE_QPC_TIMER:
op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
break;
case HEM_TYPE_CQC_TIMER:
op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
break;
default:
dev_warn(dev, "Table %d not to be written by mailbox!\n",
table->type);
return 0; return 0;
}
op += step_idx;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
...@@ -3997,9 +4058,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3997,9 +4058,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port; port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
/* when loop_idc is 1, it should loopback */ /* when loop_idc is 1, it should loopback */
if (ibqp->qp_type == IB_QPT_UC || ibqp->qp_type == IB_QPT_RC || if (ibqp->qp_type == IB_QPT_RC) {
ibqp->qp_type == IB_QPT_XRC_INI ||
ibqp->qp_type == IB_QPT_XRC_TGT) {
roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S,
hr_dev->loop_idc); hr_dev->loop_idc);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0); roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
...@@ -4209,7 +4268,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, ...@@ -4209,7 +4268,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
#ifdef CONFIG_KERNEL_419 #ifdef CONFIG_KERNEL_419
const struct ib_gid_attr *gid_attr = NULL; const struct ib_gid_attr *gid_attr = attr->ah_attr.grh.sgid_attr;
#else #else
struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_ROCE}; struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_ROCE};
union ib_gid zgid = { {0} }; union ib_gid zgid = { {0} };
...@@ -4228,7 +4287,6 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, ...@@ -4228,7 +4287,6 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
#ifdef CONFIG_KERNEL_419 #ifdef CONFIG_KERNEL_419
if (is_roce_protocol) { if (is_roce_protocol) {
gid_attr = attr->ah_attr.grh.sgid_attr;
vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev); vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
if (is_vlan_dev(gid_attr->ndev)) { if (is_vlan_dev(gid_attr->ndev)) {
...@@ -4676,6 +4734,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -4676,6 +4734,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
*hr_qp->rdb.db_record = 0; *hr_qp->rdb.db_record = 0;
} }
rdfx_set_qp_attr(hr_dev, hr_qp, attr, attr_mask, new_state);
out: out:
kfree(context); kfree(context);
return ret; return ret;
...@@ -4719,6 +4779,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ...@@ -4719,6 +4779,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int state; int state;
int ret; int ret;
rdfx_func_cnt(hr_dev, RDFX_FUNC_QUERY_QP);
context = kzalloc(sizeof(*context), GFP_KERNEL); context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) if (!context)
return -ENOMEM; return -ENOMEM;
...@@ -4852,8 +4914,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ...@@ -4852,8 +4914,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET); hr_qp->state, IB_QPS_RESET);
if (ret) { if (ret) {
dev_err(dev, "modify QP %06lx to ERR failed.\n", dev_err(dev, "modify QP to Reset failed.\n");
hr_qp->qpn);
return ret; return ret;
} }
} }
...@@ -4917,9 +4978,14 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp) ...@@ -4917,9 +4978,14 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
int ret; int ret;
rdfx_inc_dealloc_qp_cnt(hr_dev);
rdfx_func_cnt(hr_dev, RDFX_FUNC_DESTROY_QP);
rdfx_release_rdfx_qp(hr_dev, ibqp->qp_num);
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject); ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret); dev_err(hr_dev->dev, "Destroy qp 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
return ret; return ret;
} }
...@@ -5189,19 +5255,19 @@ static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry) ...@@ -5189,19 +5255,19 @@ static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry) static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
{ {
u32 buf_chk_sz; u32 chk_sz;
unsigned long off; unsigned long off;
buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT); chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE; off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
if (eq->hop_num == HNS_ROCE_HOP_NUM_0) if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) + return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
off % buf_chk_sz); off % chk_sz);
else else
return (struct hns_roce_aeqe *)((u8 *) return (struct hns_roce_aeqe *)((u8 *)(eq->buf[off / chk_sz]) +
(eq->buf[off / buf_chk_sz]) + off % buf_chk_sz); off % chk_sz);
} }
static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
...@@ -5375,6 +5441,8 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, ...@@ -5375,6 +5441,8 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
dev_warn(dev, "cons_index overflow, set back to 0.\n"); dev_warn(dev, "cons_index overflow, set back to 0.\n");
eq->cons_index = 0; eq->cons_index = 0;
} }
rdfx_inc_ceqe_cnt(hr_dev, eq->eqn);
} }
set_eq_cons_index_v2(eq); set_eq_cons_index_v2(eq);
...@@ -5491,14 +5559,17 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) ...@@ -5491,14 +5559,17 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret; int ret;
if (eqn < hr_dev->caps.num_comp_vectors) if (eqn < hr_dev->caps.num_comp_vectors) {
ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
0, HNS_ROCE_CMD_DESTROY_CEQC, 0, HNS_ROCE_CMD_DESTROY_CEQC,
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
else
rdfx_release_rdfx_ceq(hr_dev, eqn);
} else {
ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
0, HNS_ROCE_CMD_DESTROY_AEQC, 0, HNS_ROCE_CMD_DESTROY_AEQC,
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
}
if (ret) if (ret)
dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
} }
...@@ -5596,10 +5667,17 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev, ...@@ -5596,10 +5667,17 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
void *mb_buf) void *mb_buf)
{ {
struct hns_roce_eq_context *eqc; struct hns_roce_eq_context *eqc;
unsigned int eq_period = HNS_ROCE_V2_EQ_DEFAULT_INTERVAL;
unsigned int eq_max_cnt = HNS_ROCE_V2_EQ_DEFAULT_BURST_NUM;
unsigned int eq_arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
eqc = mb_buf; eqc = mb_buf;
memset(eqc, 0, sizeof(struct hns_roce_eq_context)); memset(eqc, 0, sizeof(struct hns_roce_eq_context));
#ifdef CONFIG_INFINIBAND_HNS_TEST
test_set_eq_param(eq->type_flag, &eq_period, &eq_max_cnt, &eq_arm_st);
#endif
/* init eqc */ /* init eqc */
eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG; eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
eq->hop_num = hr_dev->caps.eqe_hop_num; eq->hop_num = hr_dev->caps.eqe_hop_num;
...@@ -5610,6 +5688,9 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev, ...@@ -5610,6 +5688,9 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz; eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz; eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
eq->shift = ilog2((unsigned int)eq->entries); eq->shift = ilog2((unsigned int)eq->entries);
eq->eq_max_cnt = eq_max_cnt;
eq->eq_period = eq_period;
eq->arm_st = eq_arm_st;
if (!eq->hop_num) if (!eq->hop_num)
eq->eqe_ba = eq->buf_list->map; eq->eqe_ba = eq->buf_list->map;
...@@ -5763,8 +5844,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -5763,8 +5844,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT); bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) /
/ buf_chk_sz; buf_chk_sz;
bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8); bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
/* hop_num = 0 */ /* hop_num = 0 */
...@@ -5991,6 +6072,8 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, ...@@ -5991,6 +6072,8 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
} }
} }
rdfx_alloc_rdfx_ceq(hr_dev, eq->eqn, eq_cmd);
hns_roce_config_eqc(hr_dev, eq, mailbox->buf); hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
...@@ -6022,6 +6105,94 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, ...@@ -6022,6 +6105,94 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
int comp_num, int aeq_num, int other_num)
{
struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
int i, j;
int ret;
for (i = 0; i < irq_num; i++) {
hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
GFP_KERNEL);
if (!hr_dev->irq_names[i]) {
ret = -ENOMEM;
goto err_kzalloc_failed;
}
}
/* irq contains: abnormal + AEQ + CEQ*/
for (j = 0; j < irq_num; j++)
if (j < other_num)
snprintf((char *)hr_dev->irq_names[j],
HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j);
else if (j < (other_num + aeq_num))
snprintf((char *)hr_dev->irq_names[j],
HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
j - other_num);
else
snprintf((char *)hr_dev->irq_names[j],
HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
j - other_num - aeq_num);
for (j = 0; j < irq_num; j++) {
if (j < other_num)
ret = request_irq(hr_dev->irq[j],
hns_roce_v2_msix_interrupt_abn,
0, hr_dev->irq_names[j], hr_dev);
else if (j < (other_num + comp_num))
ret = request_irq(eq_table->eq[j - other_num].irq,
hns_roce_v2_msix_interrupt_eq,
0, hr_dev->irq_names[j + aeq_num],
&eq_table->eq[j - other_num]);
else
ret = request_irq(eq_table->eq[j - other_num].irq,
hns_roce_v2_msix_interrupt_eq,
0, hr_dev->irq_names[j - comp_num],
&eq_table->eq[j - other_num]);
if (ret) {
dev_err(hr_dev->dev, "Request irq error!\n");
goto err_request_failed;
}
}
return 0;
err_request_failed:
for (j -= 1; j >= 0; j--)
if (j < other_num)
free_irq(hr_dev->irq[j], hr_dev);
else
free_irq(eq_table->eq[j - other_num].irq,
&eq_table->eq[j - other_num]);
err_kzalloc_failed:
for (i -= 1; i >= 0; i--)
kfree(hr_dev->irq_names[i]);
return ret;
}
static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
{
int irq_num;
int eq_num;
int i;
eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
irq_num = eq_num + hr_dev->caps.num_other_vectors;
for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
free_irq(hr_dev->irq[i], hr_dev);
for (i = 0; i < eq_num; i++)
free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
for (i = 0; i < irq_num; i++)
kfree(hr_dev->irq_names[i]);
}
static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
...@@ -6033,7 +6204,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) ...@@ -6033,7 +6204,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
int other_num; int other_num;
int comp_num; int comp_num;
int aeq_num; int aeq_num;
int i, j, k; int i;
int ret; int ret;
other_num = hr_dev->caps.num_other_vectors; other_num = hr_dev->caps.num_other_vectors;
...@@ -6047,27 +6218,18 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) ...@@ -6047,27 +6218,18 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
if (!eq_table->eq) if (!eq_table->eq)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < irq_num; i++) {
hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
GFP_KERNEL);
if (!hr_dev->irq_names[i]) {
ret = -ENOMEM;
goto err_failed_kzalloc;
}
}
/* create eq */ /* create eq */
for (j = 0; j < eq_num; j++) { for (i = 0; i < eq_num; i++) {
eq = &eq_table->eq[j]; eq = &eq_table->eq[i];
eq->hr_dev = hr_dev; eq->hr_dev = hr_dev;
eq->eqn = j; eq->eqn = i;
if (j < comp_num) { if (i < comp_num) {
/* CEQ */ /* CEQ */
eq_cmd = HNS_ROCE_CMD_CREATE_CEQC; eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
eq->type_flag = HNS_ROCE_CEQ; eq->type_flag = HNS_ROCE_CEQ;
eq->entries = hr_dev->caps.ceqe_depth; eq->entries = hr_dev->caps.ceqe_depth;
eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE; eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
eq->irq = hr_dev->irq[j + other_num + aeq_num]; eq->irq = hr_dev->irq[i + other_num + aeq_num];
eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM; eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL; eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
} else { } else {
...@@ -6076,7 +6238,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) ...@@ -6076,7 +6238,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
eq->type_flag = HNS_ROCE_AEQ; eq->type_flag = HNS_ROCE_AEQ;
eq->entries = hr_dev->caps.aeqe_depth; eq->entries = hr_dev->caps.aeqe_depth;
eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE; eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
eq->irq = hr_dev->irq[j - comp_num + other_num]; eq->irq = hr_dev->irq[i - comp_num + other_num];
eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM; eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL; eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
} }
...@@ -6091,67 +6253,32 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) ...@@ -6091,67 +6253,32 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
/* enable irq */ /* enable irq */
hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE); hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
/* irq contains: abnormal + AEQ + CEQ*/ ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
for (k = 0; k < irq_num; k++) aeq_num, other_num);
if (k < other_num)
snprintf((char *)hr_dev->irq_names[k],
HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
else if (k < (other_num + aeq_num))
snprintf((char *)hr_dev->irq_names[k],
HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
k - other_num);
else
snprintf((char *)hr_dev->irq_names[k],
HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
k - other_num - aeq_num);
for (k = 0; k < irq_num; k++) {
if (k < other_num)
ret = request_irq(hr_dev->irq[k],
hns_roce_v2_msix_interrupt_abn,
0, hr_dev->irq_names[k], hr_dev);
else if (k < (other_num + comp_num))
ret = request_irq(eq_table->eq[k - other_num].irq,
hns_roce_v2_msix_interrupt_eq,
0, hr_dev->irq_names[k + aeq_num],
&eq_table->eq[k - other_num]);
else
ret = request_irq(eq_table->eq[k - other_num].irq,
hns_roce_v2_msix_interrupt_eq,
0, hr_dev->irq_names[k - comp_num],
&eq_table->eq[k - other_num]);
if (ret) { if (ret) {
dev_err(dev, "Request irq error!\n"); dev_err(dev, "Request irq failed.\n");
goto err_request_irq_fail; goto err_request_irq_fail;
} }
}
hr_dev->irq_workq = hr_dev->irq_workq =
create_singlethread_workqueue("hns_roce_irq_workqueue"); create_singlethread_workqueue("hns_roce_irq_workqueue");
if (!hr_dev->irq_workq) { if (!hr_dev->irq_workq) {
dev_err(dev, "Create irq workqueue failed!\n"); dev_err(dev, "Create irq workqueue failed!\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err_request_irq_fail; goto err_create_wq_fail;
} }
return 0; return 0;
err_create_wq_fail:
__hns_roce_free_irq(hr_dev);
err_request_irq_fail: err_request_irq_fail:
for (k -= 1; k >= 0; k--) hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
if (k < other_num)
free_irq(hr_dev->irq[k], hr_dev);
else
free_irq(eq_table->eq[k - other_num].irq,
&eq_table->eq[k - other_num]);
err_create_eq_fail: err_create_eq_fail:
for (j -= 1; j >= 0; j--)
hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
err_failed_kzalloc:
for (i -= 1; i >= 0; i--) for (i -= 1; i >= 0; i--)
kfree(hr_dev->irq_names[i]); hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
kfree(eq_table->eq); kfree(eq_table->eq);
return ret; return ret;
...@@ -6170,20 +6297,14 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) ...@@ -6170,20 +6297,14 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
/* Disable irq */ /* Disable irq */
hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
for (i = 0; i < hr_dev->caps.num_other_vectors; i++) __hns_roce_free_irq(hr_dev);
free_irq(hr_dev->irq[i], hr_dev);
for (i = 0; i < eq_num; i++) { for (i = 0; i < eq_num; i++) {
hns_roce_v2_destroy_eqc(hr_dev, i); hns_roce_v2_destroy_eqc(hr_dev, i);
free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]); hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
} }
for (i = 0; i < irq_num; i++)
kfree(hr_dev->irq_names[i]);
kfree(eq_table->eq); kfree(eq_table->eq);
flush_workqueue(hr_dev->irq_workq); flush_workqueue(hr_dev->irq_workq);
...@@ -6378,7 +6499,7 @@ static int find_empty_entry(struct hns_roce_idx_que *idx_que) ...@@ -6378,7 +6499,7 @@ static int find_empty_entry(struct hns_roce_idx_que *idx_que)
/* bitmap[i] is set zero if all bits are allocated */ /* bitmap[i] is set zero if all bits are allocated */
for (i = 0; idx_que->bitmap[i] == 0; ++i) for (i = 0; idx_que->bitmap[i] == 0; ++i)
; ;
bit_num = ffs(idx_que->bitmap[i]); bit_num = __ffs64(idx_que->bitmap[i]) + 1;
idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1)); idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1));
return i * sizeof(u64) * 8 + (bit_num - 1); return i * sizeof(u64) * 8 + (bit_num - 1);
...@@ -6475,6 +6596,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, ...@@ -6475,6 +6596,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
} }
static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = { static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
.query_cqc_info = hns_roce_v2_query_cqc_info,
.query_qpc_info = hns_roce_v2_query_qpc_info,
.query_mpt_info = hns_roce_v2_query_mpt_info,
.query_cqc_stat = hns_roce_v2_query_cqc_stat, .query_cqc_stat = hns_roce_v2_query_cqc_stat,
.query_cmd_stat = hns_roce_v2_query_cmd_stat, .query_cmd_stat = hns_roce_v2_query_cmd_stat,
.query_ceqc_stat = hns_roce_v2_query_ceqc_stat, .query_ceqc_stat = hns_roce_v2_query_ceqc_stat,
...@@ -6538,16 +6662,9 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, ...@@ -6538,16 +6662,9 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle) struct hnae3_handle *handle)
{ {
struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_priv *priv = hr_dev->priv;
const struct pci_device_id *id;
int d; int d;
int i; int i;
id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
if (!id) {
dev_err(hr_dev->dev, "device is not compatible!\n");
return -ENXIO;
}
hr_dev->hw = &hns_roce_hw_v2; hr_dev->hw = &hns_roce_hw_v2;
hr_dev->dfx = &hns_roce_dfx_hw_v2; hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
...@@ -6640,6 +6757,7 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, ...@@ -6640,6 +6757,7 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
{ {
const struct hnae3_ae_ops *ops = handle->ae_algo->ops; const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
const struct pci_device_id *id;
struct hns_roce_dev *hr_dev; struct hns_roce_dev *hr_dev;
unsigned long end; unsigned long end;
int ret; int ret;
...@@ -6651,6 +6769,10 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) ...@@ -6651,6 +6769,10 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto head_chk_err; goto head_chk_err;
} }
id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
if (!id)
return 0;
ret = __hns_roce_hw_v2_init_instance(handle); ret = __hns_roce_hw_v2_init_instance(handle);
if (ret) { if (ret) {
handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
......
...@@ -35,13 +35,14 @@ ...@@ -35,13 +35,14 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#define HNS_ROCE_VF_QPC_BT_NUM(d) (d ? (256) : (8)) #define HNS_ROCE_VF_QPC_BT_NUM(d) (d ? (8) : (256))
#define HNS_ROCE_VF_SRQC_BT_NUM(d) (d ? (64) : (8)) #define HNS_ROCE_VF_SCCC_BT_NUM(d) (d ? (8) : (64))
#define HNS_ROCE_VF_CQC_BT_NUM(d) (d ? (64) : (8)) #define HNS_ROCE_VF_SRQC_BT_NUM(d) (d ? (8) : (64))
#define HNS_ROCE_VF_MPT_BT_NUM(d) (d ? (64) : (8)) #define HNS_ROCE_VF_CQC_BT_NUM(d) (d ? (8) : (64))
#define HNS_ROCE_VF_EQC_NUM(d) (d ? (64) : (8)) #define HNS_ROCE_VF_MPT_BT_NUM(d) (d ? (8) : (64))
#define HNS_ROCE_VF_SMAC_NUM(d) (d ? (32) : (8)) #define HNS_ROCE_VF_EQC_NUM(d) (d ? (8) : (64))
#define HNS_ROCE_VF_SGID_NUM(d) (d ? (32) : (8)) #define HNS_ROCE_VF_SMAC_NUM(d) (d ? (8) : (32))
#define HNS_ROCE_VF_SGID_NUM(d) (d ? (8) : (32))
#define HNS_ROCE_VF_SL_NUM 8 #define HNS_ROCE_VF_SL_NUM 8
#define HNS_ROCE_V2_MAX_QP_NUM 0x100000 #define HNS_ROCE_V2_MAX_QP_NUM 0x100000
...@@ -62,8 +63,8 @@ ...@@ -62,8 +63,8 @@
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20 #define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256 #define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1 #define HNS_ROCE_V2_PHY_UAR_NUM 1
#define HNS_ROCE_V2_MAX_IRQ_NUM(d) (d ? (65) : (3)) #define HNS_ROCE_V2_MAX_IRQ_NUM(d) (d ? (3) : (65))
#define HNS_ROCE_V2_COMP_VEC_NUM(d) (d ? (63) : (1)) #define HNS_ROCE_V2_COMP_VEC_NUM(d) (d ? (1) : (63))
#define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_AEQE_VEC_NUM 1
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 #define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
...@@ -114,7 +115,7 @@ ...@@ -114,7 +115,7 @@
#define HNS_ROCE_EQE_HOP_NUM 2 #define HNS_ROCE_EQE_HOP_NUM 2
#define HNS_ROCE_IDX_HOP_NUM 1 #define HNS_ROCE_IDX_HOP_NUM 1
#define HNS_ROCE_V2_GID_INDEX_NUM 256 #define HNS_ROCE_V2_GID_INDEX_NUM(d) (d ? (8) : (256))
#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18) #define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
...@@ -166,8 +167,8 @@ ...@@ -166,8 +167,8 @@
#define HNS_ICL_SWITCH_CMD_ROCEE_SEL BIT(HNS_ICL_SWITCH_CMD_ROCEE_SEL_SHIFT) #define HNS_ICL_SWITCH_CMD_ROCEE_SEL BIT(HNS_ICL_SWITCH_CMD_ROCEE_SEL_SHIFT)
#define CMD_CSQ_DESC_NUM (1024) #define CMD_CSQ_DESC_NUM 1024
#define CMD_CRQ_DESC_NUM (1024) #define CMD_CRQ_DESC_NUM 1024
enum { enum {
NO_ARMED = 0x0, NO_ARMED = 0x0,
...@@ -682,6 +683,7 @@ struct hns_roce_v2_qp_context { ...@@ -682,6 +683,7 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_76_RQIE_S 28 #define V2_QPC_BYTE_76_RQIE_S 28
#define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30 #define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30
#define V2_QPC_BYTE_76_RQ_RTY_TX_ERR_S 31
#define V2_QPC_BYTE_80_RX_CQN_S 0 #define V2_QPC_BYTE_80_RX_CQN_S 0
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0) #define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
...@@ -1463,7 +1465,8 @@ struct hns_roce_vf_res_b { ...@@ -1463,7 +1465,8 @@ struct hns_roce_vf_res_b {
__le32 vf_smac_idx_num; __le32 vf_smac_idx_num;
__le32 vf_sgid_idx_num; __le32 vf_sgid_idx_num;
__le32 vf_qid_idx_sl_num; __le32 vf_qid_idx_sl_num;
__le32 rsv[2]; __le32 vf_sccc_idx_num;
__le32 rsv1;
}; };
#define VF_RES_B_DATA_0_VF_ID_S 0 #define VF_RES_B_DATA_0_VF_ID_S 0
...@@ -1487,6 +1490,13 @@ struct hns_roce_vf_res_b { ...@@ -1487,6 +1490,13 @@ struct hns_roce_vf_res_b {
#define VF_RES_B_DATA_3_VF_SL_NUM_S 16 #define VF_RES_B_DATA_3_VF_SL_NUM_S 16
#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16) #define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S 0
#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M GENMASK(8, 0)
#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9
#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9)
struct hns_roce_vf_switch { struct hns_roce_vf_switch {
__le32 rocee_sel; __le32 rocee_sel;
__le32 fun_id; __le32 fun_id;
...@@ -1702,6 +1712,9 @@ struct hns_roce_eq_context { ...@@ -1702,6 +1712,9 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_EQ_ARMED 1 #define HNS_ROCE_V2_EQ_ARMED 1
#define HNS_ROCE_V2_EQ_ALWAYS_ARMED 3 #define HNS_ROCE_V2_EQ_ALWAYS_ARMED 3
#define HNS_ROCE_V2_EQ_DEFAULT_INTERVAL 0x10
#define HNS_ROCE_V2_EQ_DEFAULT_BURST_NUM 0x10
#define HNS_ROCE_EQ_INIT_EQE_CNT 0 #define HNS_ROCE_EQ_INIT_EQE_CNT 0
#define HNS_ROCE_EQ_INIT_PROD_IDX 0 #define HNS_ROCE_EQ_INIT_PROD_IDX 0
#define HNS_ROCE_EQ_INIT_REPORT_TIMER 0 #define HNS_ROCE_EQ_INIT_REPORT_TIMER 0
...@@ -1912,11 +1925,38 @@ int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev, ...@@ -1912,11 +1925,38 @@ int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev,
int hns_roce_v2_modify_eq(struct hns_roce_dev *hr_dev, int hns_roce_v2_modify_eq(struct hns_roce_dev *hr_dev,
u16 eq_count, u16 eq_period, u16 type); u16 eq_count, u16 eq_period, u16 type);
int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer);
int hns_roce_v2_query_qpc_info(struct hns_roce_dev *hr_dev, u32 qpn,
int *buffer);
int hns_roce_v2_query_mpt_info(struct hns_roce_dev *hr_dev, u32 key,
int *buffer);
void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
enum hns_roce_opcode_type opcode, enum hns_roce_opcode_type opcode,
bool is_read); bool is_read);
int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num); struct hns_roce_cmq_desc *desc, int num);
#ifdef CONFIG_INFINIBAND_HNS_DFX
#ifdef CONFIG_KERNEL_419
void rdfx_cp_sq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
unsigned int ind, void *wqe,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
const struct ib_send_wr *wr);
#else
void rdfx_cp_sq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
unsigned int ind, void *wqe,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
struct ib_send_wr *wr);
#endif
void rdfx_set_cqe_info(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
struct hns_roce_v2_cqe *cqe);
#else
#define rdfx_set_cqe_info(hr_dev, hr_cq, cqe)
#define rdfx_cp_sq_wqe_buf(hr_dev, qp, ind, wqe, rc_sq_wqe, wr)
#endif
#define HNS_ROCE_V2_SCC_CTX_DONE_S 0 #define HNS_ROCE_V2_SCC_CTX_DONE_S 0
......
// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
// Copyright (c) 2018 Hisilicon Limited.
#include "hnae3.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hw_v2.h"
int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer)
{
struct hns_roce_v2_cq_context *context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
HNS_ROCE_CMD_QUERY_CQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
dev_err(hr_dev->dev, "QUERY cqc cmd process error\n");
goto err_mailbox;
}
memcpy(buffer, context, sizeof(*context));
err_mailbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
int hns_roce_v2_query_qpc_info(struct hns_roce_dev *hr_dev, u32 qpn,
int *buffer)
{
struct hns_roce_v2_qp_context *context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0,
HNS_ROCE_CMD_QUERY_QPC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
dev_err(hr_dev->dev, "QUERY qpc cmd process error\n");
goto err_mailbox;
}
context = mailbox->buf;
memcpy(buffer, context, sizeof(*context));
err_mailbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
int hns_roce_v2_query_mpt_info(struct hns_roce_dev *hr_dev, u32 key,
int *buffer)
{
struct hns_roce_v2_mpt_entry *context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0,
HNS_ROCE_CMD_QUERY_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
dev_err(hr_dev->dev, "QUERY mpt cmd process error\n");
goto err_mailbox;
}
memcpy(buffer, context, sizeof(*context));
err_mailbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
...@@ -148,6 +148,8 @@ static int hns_roce_add_gid(struct ib_device *device, u8 port_num, ...@@ -148,6 +148,8 @@ static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
unsigned long flags; unsigned long flags;
int ret; int ret;
rdfx_func_cnt(hr_dev, RDFX_FUNC_ADD_GID);
if (port >= hr_dev->caps.num_ports || if (port >= hr_dev->caps.num_ports ||
index > hr_dev->caps.gid_table_len[port]) { index > hr_dev->caps.gid_table_len[port]) {
dev_err(hr_dev->dev, "add gid failed. port - %d, index - %d\n", dev_err(hr_dev->dev, "add gid failed. port - %d, index - %d\n",
...@@ -175,6 +177,8 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num, ...@@ -175,6 +177,8 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
unsigned long flags; unsigned long flags;
int ret; int ret;
rdfx_func_cnt(hr_dev, RDFX_FUNC_DEL_GID);
if (port >= hr_dev->caps.num_ports) if (port >= hr_dev->caps.num_ports)
return -EINVAL; return -EINVAL;
...@@ -269,6 +273,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev, ...@@ -269,6 +273,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
rdfx_func_cnt(hr_dev, RDFX_FUNC_QUERY_DEVICE);
memset(props, 0, sizeof(*props)); memset(props, 0, sizeof(*props));
props->fw_ver = hr_dev->caps.fw_ver; props->fw_ver = hr_dev->caps.fw_ver;
...@@ -314,8 +320,10 @@ static int hns_roce_query_device(struct ib_device *ib_dev, ...@@ -314,8 +320,10 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
IB_DEVICE_MEM_WINDOW_TYPE_2B; IB_DEVICE_MEM_WINDOW_TYPE_2B;
} }
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
}
return 0; return 0;
} }
...@@ -326,6 +334,8 @@ static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev, ...@@ -326,6 +334,8 @@ static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct net_device *ndev; struct net_device *ndev;
rdfx_func_cnt(hr_dev, RDFX_FUNC_GET_NETDEV);
if (port_num < 1 || port_num > hr_dev->caps.num_ports) if (port_num < 1 || port_num > hr_dev->caps.num_ports)
return NULL; return NULL;
...@@ -349,6 +359,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, ...@@ -349,6 +359,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
enum ib_mtu mtu; enum ib_mtu mtu;
u8 port; u8 port;
rdfx_func_cnt(hr_dev, RDFX_FUNC_QUERY_PORT);
assert(port_num > 0); assert(port_num > 0);
port = port_num - 1; port = port_num - 1;
...@@ -387,12 +399,16 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, ...@@ -387,12 +399,16 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
u8 port_num) u8 port_num)
{ {
rdfx_func_cnt(to_hr_dev(device), RDFX_FUNC_GET_LINK_LAYER);
return IB_LINK_LAYER_ETHERNET; return IB_LINK_LAYER_ETHERNET;
} }
static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index, static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
union ib_gid *gid) union ib_gid *gid)
{ {
rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_QUERY_GID);
return 0; return 0;
} }
...@@ -401,6 +417,8 @@ static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index, ...@@ -401,6 +417,8 @@ static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
{ {
*pkey = PKEY_ID; *pkey = PKEY_ID;
rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_QUERY_PKEY);
return 0; return 0;
} }
...@@ -409,6 +427,8 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask, ...@@ -409,6 +427,8 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
{ {
unsigned long flags; unsigned long flags;
rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_MODIFY_DEVICE);
if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -424,6 +444,8 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask, ...@@ -424,6 +444,8 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask, static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
struct ib_port_modify *props) struct ib_port_modify *props)
{ {
rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_MODIFY_PORT);
return 0; return 0;
} }
...@@ -438,6 +460,8 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, ...@@ -438,6 +460,8 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
if (!hr_dev->active) if (!hr_dev->active)
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
rdfx_func_cnt(hr_dev, RDFX_FUNC_ALLOC_UCONTEXT);
resp.qp_tab_size = hr_dev->caps.num_qps; resp.qp_tab_size = hr_dev->caps.num_qps;
context = kmalloc(sizeof(*context), GFP_KERNEL); context = kmalloc(sizeof(*context), GFP_KERNEL);
...@@ -474,6 +498,9 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) ...@@ -474,6 +498,9 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
{ {
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
rdfx_func_cnt(to_hr_dev(ibcontext->device),
RDFX_FUNC_DEALLOC_UCONTEXT);
hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar); hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
kfree(context); kfree(context);
...@@ -529,6 +556,8 @@ static int hns_roce_mmap(struct ib_ucontext *context, ...@@ -529,6 +556,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(context->device); struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
rdfx_func_cnt(hr_dev, RDFX_FUNC_MMAP);
if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
return -EINVAL; return -EINVAL;
...@@ -557,6 +586,8 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num, ...@@ -557,6 +586,8 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr attr; struct ib_port_attr attr;
int ret; int ret;
rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_PORT_IMMUTABLE);
ret = ib_query_port(ib_dev, port_num, &attr); ret = ib_query_port(ib_dev, port_num, &attr);
if (ret) if (ret)
return ret; return ret;
...@@ -720,6 +751,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -720,6 +751,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
/* OTHERS */ /* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable; ib_dev->get_port_immutable = hns_roce_port_immutable;
ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext; ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext;
ib_dev->res.fill_res_entry = hns_roce_fill_res_entry;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
ib_dev->alloc_xrcd = hns_roce_ib_alloc_xrcd; ib_dev->alloc_xrcd = hns_roce_ib_alloc_xrcd;
...@@ -1060,19 +1092,32 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -1060,19 +1092,32 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
return ret; return ret;
} }
int hns_roce_init(struct hns_roce_dev *hr_dev) int hns_roce_reset(struct hns_roce_dev *hr_dev)
{ {
int ret; int ret;
struct device *dev = hr_dev->dev;
if (hr_dev->hw->reset) { if (hr_dev->hw->reset) {
ret = hr_dev->hw->reset(hr_dev, true); ret = hr_dev->hw->reset(hr_dev, true);
if (ret)
return ret;
}
hr_dev->is_reset = false;
return 0;
}
int hns_roce_init(struct hns_roce_dev *hr_dev)
{
int ret;
struct device *dev = hr_dev->dev;
alloc_rdfx_info(hr_dev);
ret = hns_roce_reset(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Reset RoCE engine failed!\n"); dev_err(dev, "Reset RoCE engine failed!\n");
return ret; return ret;
} }
}
hr_dev->is_reset = false;
if (hr_dev->hw->cmq_init) { if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev); ret = hr_dev->hw->cmq_init(hr_dev);
...@@ -1133,7 +1178,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) ...@@ -1133,7 +1178,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
goto error_failed_register_device; goto error_failed_register_device;
(void)hns_roce_register_sysfs(hr_dev); (void)hns_roce_register_sysfs(hr_dev);
rdfx_set_dev_name(hr_dev);
return 0; return 0;
error_failed_register_device: error_failed_register_device:
...@@ -1187,6 +1232,8 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev) ...@@ -1187,6 +1232,8 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
hr_dev->hw->cmq_exit(hr_dev); hr_dev->hw->cmq_exit(hr_dev);
if (hr_dev->hw->reset) if (hr_dev->hw->reset)
hr_dev->hw->reset(hr_dev, false); hr_dev->hw->reset(hr_dev, false);
free_rdfx_info(hr_dev);
} }
EXPORT_SYMBOL_GPL(hns_roce_exit); EXPORT_SYMBOL_GPL(hns_roce_exit);
......
...@@ -39,6 +39,10 @@ ...@@ -39,6 +39,10 @@
#include "hns_roce_cmd.h" #include "hns_roce_cmd.h"
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
#ifdef CONFIG_INFINIBAND_HNS_TEST
#include "hns_roce_test.h"
#endif
static u32 hw_index_to_key(unsigned long ind) static u32 hw_index_to_key(unsigned long ind)
{ {
return (u32)(ind >> 24) | (ind << 8); return (u32)(ind >> 24) | (ind << 8);
...@@ -351,31 +355,11 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, ...@@ -351,31 +355,11 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
mr->pbl_bt_l0 = NULL; mr->pbl_bt_l0 = NULL;
mr->pbl_l0_dma_addr = 0; mr->pbl_l0_dma_addr = 0;
} }
static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages,
/* PBL multi hop addressing */ struct hns_roce_mr *mr, u32 pbl_bt_sz)
static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int mr_alloc_done = 0;
int npages_allocated;
int i = 0, j = 0;
u32 pbl_bt_sz;
u32 mhop_num;
u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
u64 bt_idx;
u64 size;
mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
if (mhop_num == HNS_ROCE_HOP_NUM_0)
return 0;
/* hop_num = 1 */
if (mhop_num == 1) {
if (npages > pbl_bt_sz / 8) { if (npages > pbl_bt_sz / 8) {
dev_err(dev, "npages %d is larger than buf_pg_sz!", dev_err(dev, "npages %d is larger than buf_pg_sz!",
npages); npages);
...@@ -389,45 +373,26 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -389,45 +373,26 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
mr->pbl_size = npages; mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_dma_addr; mr->pbl_ba = mr->pbl_dma_addr;
mr->pbl_hop_num = mhop_num; mr->pbl_hop_num = 1;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0; return 0;
}
mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
sizeof(*mr->pbl_l1_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l1_dma_addr)
return -ENOMEM;
mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1), }
GFP_KERNEL);
if (!mr->pbl_bt_l1)
goto err_kcalloc_bt_l1;
if (mhop_num == 3) {
mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_l2_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l2_dma_addr)
goto err_kcalloc_l2_dma;
mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num, static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages,
sizeof(*mr->pbl_bt_l2), struct hns_roce_mr *mr, u32 pbl_bt_sz)
GFP_KERNEL); {
if (!mr->pbl_bt_l2) struct device *dev = hr_dev->dev;
goto err_kcalloc_bt_l2; int npages_allocated;
} u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
u64 size;
int i;
/* alloc L0 BT */ pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l0_dma_addr),
GFP_KERNEL);
if (!mr->pbl_bt_l0)
goto err_dma_alloc_l0;
if (mhop_num == 2) {
/* alloc L1 BT */ /* alloc L1 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) { for (i = 0; i < pbl_bt_sz / 8; i++) {
if (pbl_bt_cnt + 1 < pbl_last_bt_num) { if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
...@@ -441,7 +406,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -441,7 +406,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_bt_l1[i]) { if (!mr->pbl_bt_l1[i]) {
hns_roce_loop_free(hr_dev, mr, 1, i, 0); hns_roce_loop_free(hr_dev, mr, 1, i, 0);
goto err_dma_alloc_l0; return -ENOMEM;
} }
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
...@@ -450,7 +415,39 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -450,7 +415,39 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
if (pbl_bt_cnt >= pbl_last_bt_num) if (pbl_bt_cnt >= pbl_last_bt_num)
break; break;
} }
} else if (mhop_num == 3) {
mr->l0_chunk_last_num = i + 1;
return 0;
}
static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr, u32 pbl_bt_sz)
{
struct device *dev = hr_dev->dev;
int mr_alloc_done = 0;
int npages_allocated;
u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
u64 bt_idx;
u64 size;
int i;
int j = 0;
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_l2_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l2_dma_addr)
return -ENOMEM;
mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_bt_l2),
GFP_KERNEL);
if (!mr->pbl_bt_l2)
goto err_kcalloc_bt_l2;
/* alloc L1, L2 BT */ /* alloc L1, L2 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) { for (i = 0; i < pbl_bt_sz / 8; i++) {
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
...@@ -495,17 +492,10 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -495,17 +492,10 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
if (mr_alloc_done) if (mr_alloc_done)
break; break;
} }
}
mr->l0_chunk_last_num = i + 1; mr->l0_chunk_last_num = i + 1;
if (mhop_num == 3)
mr->l1_chunk_last_num = j + 1; mr->l1_chunk_last_num = j + 1;
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_l0_dma_addr;
mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0; return 0;
...@@ -517,6 +507,65 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -517,6 +507,65 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
kfree(mr->pbl_l2_dma_addr); kfree(mr->pbl_l2_dma_addr);
mr->pbl_l2_dma_addr = NULL; mr->pbl_l2_dma_addr = NULL;
return -ENOMEM;
}
/* PBL multi hop addressing */
static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr)
{
struct device *dev = hr_dev->dev;
u32 pbl_bt_sz;
u32 mhop_num;
mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
if (mhop_num == HNS_ROCE_HOP_NUM_0)
return 0;
/* hop_num = 1 */
if (mhop_num == 1)
return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz);
mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
sizeof(*mr->pbl_l1_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l1_dma_addr)
return -ENOMEM;
mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
GFP_KERNEL);
if (!mr->pbl_bt_l1)
goto err_kcalloc_bt_l1;
/* alloc L0 BT */
mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l0_dma_addr),
GFP_KERNEL);
if (!mr->pbl_bt_l0)
goto err_kcalloc_l2_dma;
if (mhop_num == 2) {
if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
goto err_kcalloc_l2_dma;
}
if (mhop_num == 3) {
if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
goto err_kcalloc_l2_dma;
}
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_l0_dma_addr;
mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0;
err_kcalloc_l2_dma: err_kcalloc_l2_dma:
kfree(mr->pbl_bt_l1); kfree(mr->pbl_bt_l1);
mr->pbl_bt_l1 = NULL; mr->pbl_bt_l1 = NULL;
...@@ -949,6 +998,10 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) ...@@ -949,6 +998,10 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
if (ret) if (ret)
goto err_free; goto err_free;
#ifdef CONFIG_INFINIBAND_HNS_TEST
test_set_mr_access(mr);
#endif
ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr); ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
if (ret) if (ret)
goto err_mr; goto err_mr;
...@@ -956,6 +1009,9 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) ...@@ -956,6 +1009,9 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
mr->ibmr.rkey = mr->ibmr.lkey = mr->key; mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
mr->umem = NULL; mr->umem = NULL;
rdfx_func_cnt(to_hr_dev(pd->device), RDFX_FUNC_GET_DMA_MR);
rdfx_alloc_rdfx_mr(to_hr_dev(pd->device), mr);
return &mr->ibmr; return &mr->ibmr;
err_mr: err_mr:
...@@ -1153,6 +1209,9 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1153,6 +1209,9 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->ibmr.rkey = mr->ibmr.lkey = mr->key; mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
rdfx_func_cnt(to_hr_dev(pd->device), RDFX_FUNC_REG_USER_MR);
rdfx_alloc_rdfx_mr(to_hr_dev(pd->device), mr);
return &mr->ibmr; return &mr->ibmr;
err_mr: err_mr:
...@@ -1166,43 +1225,18 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1166,43 +1225,18 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd, u64 start, u64 length,
struct ib_udata *udata) u64 virt_addr, int mr_access_flags,
struct hns_roce_cmd_mailbox *mailbox,
u32 pdn)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_cmd_mailbox *mailbox;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
unsigned long mtpt_idx;
u32 pdn = 0;
int npages; int npages;
int ret; int ret;
if (!mr->enabled)
return -EINVAL;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
HNS_ROCE_CMD_QUERY_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret)
goto free_cmd_mbox;
ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
if (ret)
dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
mr->enabled = 0;
if (flags & IB_MR_REREG_PD)
pdn = to_hr_pd(pd)->pdn;
if (flags & IB_MR_REREG_TRANS) {
if (mr->size != ~0ULL) { if (mr->size != ~0ULL) {
npages = ib_umem_page_count(mr->umem); npages = ib_umem_page_count(mr->umem);
...@@ -1219,7 +1253,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, ...@@ -1219,7 +1253,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem); ret = PTR_ERR(mr->umem);
mr->umem = NULL; mr->umem = NULL;
goto free_cmd_mbox; return -ENOMEM;
} }
npages = ib_umem_page_count(mr->umem); npages = ib_umem_page_count(mr->umem);
...@@ -1236,19 +1270,14 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, ...@@ -1236,19 +1270,14 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
goto release_umem; goto release_umem;
} }
} }
}
ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
mr_access_flags, virt_addr, mr_access_flags, virt_addr,
length, mailbox->buf); length, mailbox->buf);
if (ret) { if (ret)
if (flags & IB_MR_REREG_TRANS)
goto release_umem; goto release_umem;
else
goto free_cmd_mbox;
}
if (flags & IB_MR_REREG_TRANS) {
ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
if (ret) { if (ret) {
if (mr->size != ~0ULL) { if (mr->size != ~0ULL) {
...@@ -1264,12 +1293,72 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, ...@@ -1264,12 +1293,72 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
goto release_umem; goto release_umem;
} }
release_umem:
ib_umem_release(mr->umem);
return ret;
}
int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_cmd_mailbox *mailbox;
struct device *dev = hr_dev->dev;
unsigned long mtpt_idx;
u32 pdn = 0;
int ret;
rdfx_func_cnt(hr_dev, RDFX_FUNC_REREG_USER_MR);
if (!mr->enabled)
return -EINVAL;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
HNS_ROCE_CMD_QUERY_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret)
goto free_cmd_mbox;
ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
if (ret)
dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
mr->enabled = 0;
if (flags & IB_MR_REREG_PD) {
pdn = to_hr_pd(pd)->pdn;
ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
mr_access_flags, virt_addr,
length, mailbox->buf);
if (ret)
goto free_cmd_mbox;
}
if (flags & IB_MR_REREG_TRANS) {
ret = rereg_mr_trans(ibmr, flags,
start, length,
virt_addr, mr_access_flags,
mailbox, pdn);
if (ret)
goto free_cmd_mbox;
} }
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx); ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) { if (ret) {
dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
goto release_umem; ib_umem_release(mr->umem);
goto free_cmd_mbox;
} }
mr->enabled = 1; mr->enabled = 1;
...@@ -1280,9 +1369,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, ...@@ -1280,9 +1369,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
return 0; return 0;
release_umem:
ib_umem_release(mr->umem);
free_cmd_mbox: free_cmd_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
...@@ -1295,6 +1381,10 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr) ...@@ -1295,6 +1381,10 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr)
struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mr *mr = to_hr_mr(ibmr);
int ret = 0; int ret = 0;
rdfx_func_cnt(hr_dev, RDFX_FUNC_DEREG_MR);
rdfx_inc_dereg_mr_cnt(hr_dev);
rdfx_release_rdfx_mr(hr_dev, mr->key);
if (hr_dev->hw->dereg_mr) { if (hr_dev->hw->dereg_mr) {
ret = hr_dev->hw->dereg_mr(hr_dev, mr); ret = hr_dev->hw->dereg_mr(hr_dev, mr);
} else { } else {
...@@ -1350,6 +1440,9 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, ...@@ -1350,6 +1440,9 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
mr->ibmr.rkey = mr->ibmr.lkey = mr->key; mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
mr->umem = NULL; mr->umem = NULL;
rdfx_func_cnt(hr_dev, RDFX_FUNC_REG_USER_MR);
rdfx_alloc_rdfx_mr(hr_dev, mr);
return &mr->ibmr; return &mr->ibmr;
err_free_mr: err_free_mr:
......
...@@ -125,6 +125,8 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, ...@@ -125,6 +125,8 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
} }
} }
rdfx_func_cnt(hr_dev, RDFX_FUNC_ALLOC_PD);
rdfx_alloc_rdfx_pd(hr_dev, pd);
#endif #endif
return &pd->ibpd; return &pd->ibpd;
} }
...@@ -132,6 +134,10 @@ EXPORT_SYMBOL_GPL(hns_roce_alloc_pd); ...@@ -132,6 +134,10 @@ EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
int hns_roce_dealloc_pd(struct ib_pd *pd) int hns_roce_dealloc_pd(struct ib_pd *pd)
{ {
rdfx_func_cnt(to_hr_dev(pd->device), RDFX_FUNC_DEALLOC_PD);
rdfx_release_rdfx_pd(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
kfree(to_hr_pd(pd)); kfree(to_hr_pd(pd));
......
...@@ -350,16 +350,12 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, ...@@ -350,16 +350,12 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
return 0; return 0;
} }
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd) struct hns_roce_ib_create_qp *ucmd)
{ {
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride); u8 max_sq_stride = ilog2(roundup_sq_stride);
u32 page_size;
u32 max_cnt;
u32 ex_sge_num;
/* Sanity check SQ size before proceeding */ /* Sanity check SQ size before proceeding */
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
...@@ -375,6 +371,25 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -375,6 +371,25 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
return -EINVAL; return -EINVAL;
} }
return 0;
}
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd)
{
u32 ex_sge_num;
u32 page_size;
u32 max_cnt;
int ret;
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
dev_err(hr_dev->dev, "Sanity check sq size fail\n");
return ret;
}
hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride; hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
...@@ -416,8 +431,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -416,8 +431,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), PAGE_SIZE); hr_qp->sq.wqe_shift), PAGE_SIZE);
} else { } else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sge.sge_cnt = hr_qp->sge.sge_cnt = ex_sge_num ?
max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num); max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) + hr_qp->rq.wqe_shift), page_size) +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt << HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
...@@ -446,6 +461,35 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -446,6 +461,35 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
return 0; return 0;
} }
static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
struct device *dev = hr_dev->dev;
if (hr_qp->sq.max_gs > 2) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
hr_qp->sge.sge_shift = 4;
}
/* ud sqwqe's sge use extend sge */
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->sq.max_gs);
hr_qp->sge.sge_shift = 4;
}
if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
return -EINVAL;
}
}
return 0;
}
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
...@@ -454,6 +498,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -454,6 +498,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
u32 page_size; u32 page_size;
u32 max_cnt; u32 max_cnt;
int size; int size;
int ret;
if (cap->max_send_wr > hr_dev->caps.max_wqes || if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg || cap->max_send_sge > hr_dev->caps.max_sq_sg ||
...@@ -463,8 +508,6 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -463,8 +508,6 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
} }
hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
hr_qp->sq_max_wqes_per_wr = 1;
hr_qp->sq_spare_wqes = 0;
if (hr_dev->caps.min_wqes) if (hr_dev->caps.min_wqes)
max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes); max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
...@@ -484,25 +527,10 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -484,25 +527,10 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
else else
hr_qp->sq.max_gs = max_cnt; hr_qp->sq.max_gs = max_cnt;
if (hr_qp->sq.max_gs > 2) { ret = set_extend_sge_param(hr_dev, hr_qp);
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * if (ret) {
(hr_qp->sq.max_gs - 2)); dev_err(dev, "set extend sge parameters fail\n");
hr_qp->sge.sge_shift = 4; return ret;
}
/* ud sqwqe's sge use extend sge */
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->sq.max_gs);
hr_qp->sge.sge_shift = 4;
}
if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
return -EINVAL;
}
} }
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
...@@ -536,7 +564,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -536,7 +564,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
{ {
if (attr->qp_type == IB_QPT_XRC_TGT) if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
return 0; return 0;
return 1; return 1;
...@@ -874,11 +902,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -874,11 +902,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_free_db(hr_dev, &hr_qp->rdb); hns_roce_free_db(hr_dev, &hr_qp->rdb);
err_rq_sge_list: err_rq_sge_list:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr))
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
err_wqe_list: err_wqe_list:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr))
kfree(hr_qp->rq_inl_buf.wqe_list); kfree(hr_qp->rq_inl_buf.wqe_list);
err_out: err_out:
...@@ -918,7 +948,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -918,7 +948,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
hr_qp); hr_qp);
if (ret) { if (ret) {
dev_err(dev, "Create RC QP failed\n"); dev_err(dev, "Create RC QP 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
kfree(hr_qp); kfree(hr_qp);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -965,6 +996,9 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -965,6 +996,9 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
} }
} }
rdfx_func_cnt(hr_dev, RDFX_FUNC_CREATE_QP);
rdfx_alloc_qp_buf(hr_dev, hr_qp);
return &hr_qp->ibqp; return &hr_qp->ibqp;
} }
EXPORT_SYMBOL_GPL(hns_roce_create_qp); EXPORT_SYMBOL_GPL(hns_roce_create_qp);
...@@ -1069,6 +1103,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1069,6 +1103,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret = -EINVAL; int ret = -EINVAL;
rdfx_func_cnt(hr_dev, RDFX_FUNC_MODIFY_QP);
mutex_lock(&hr_qp->mutex); mutex_lock(&hr_qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? cur_state = attr_mask & IB_QP_CUR_STATE ?
......
// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
// Copyright (c) 2018 Hisilicon Limited.
#include <rdma/rdma_cm.h>
#include <rdma/restrack.h>
#include <uapi/rdma/rdma_netlink.h>
#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
static int hns_roce_fill_cq(struct sk_buff *msg,
struct hns_roce_v2_cq_context *context)
{
if (rdma_nl_put_driver_u32(msg, "state",
roce_get_field(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_ARM_ST_M,
V2_CQC_BYTE_4_ARM_ST_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "ceqn",
roce_get_field(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_CEQN_M,
V2_CQC_BYTE_4_CEQN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "poll",
roce_get_bit(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_POLL_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "shift",
roce_get_field(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_SHIFT_M,
V2_CQC_BYTE_4_SHIFT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "cmd_sn",
roce_get_field(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_CMD_SN_M,
V2_CQC_BYTE_4_CMD_SN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "cqn",
roce_get_field(context->byte_8_cqn,
V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "hopnum",
roce_get_field(context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_HOP_NUM_M,
V2_CQC_BYTE_16_CQE_HOP_NUM_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "pi",
roce_get_field(context->byte_28_cq_pi,
V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M,
V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "ci",
roce_get_field(context->byte_32_cq_ci,
V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M,
V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rdb_en",
roce_get_field(context->byte_44_db_record,
V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
V2_CQC_BYTE_44_DB_RECORD_ADDR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "coalesce",
roce_get_field(
context->byte_56_cqe_period_maxcnt,
V2_CQC_BYTE_56_CQ_MAX_CNT_M,
V2_CQC_BYTE_56_CQ_MAX_CNT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "period",
roce_get_field(
context->byte_56_cqe_period_maxcnt,
V2_CQC_BYTE_56_CQ_PERIOD_M,
V2_CQC_BYTE_56_CQ_PERIOD_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "cnt",
roce_get_field(context->byte_52_cqe_cnt,
V2_CQC_BYTE_52_CQE_CNT_M,
V2_CQC_BYTE_52_CQE_CNT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "se_idx",
roce_get_field(context->byte_64_se_cqe_idx,
V2_CQC_BYTE_64_SE_CQE_IDX_M,
V2_CQC_BYTE_64_SE_CQE_IDX_S)))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
struct ib_cq *ib_cq = container_of(res, struct ib_cq, res);
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
struct hns_roce_v2_cq_context context;
struct nlattr *table_attr;
int ret;
if (!hr_dev->dfx->query_cqc_info)
return -EINVAL;
ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)&context);
if (ret)
goto err;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
if (hns_roce_fill_cq(msg, &context))
goto err_cancel_table;
nla_nest_end(msg, table_attr);
return 0;
err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
return -EMSGSIZE;
}
static int hns_roce_qp_fill_rp(struct sk_buff *msg,
struct hns_roce_v2_qp_context *context)
{
if (rdma_nl_put_driver_u32(msg, "rq_pi",
roce_get_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_ci",
roce_get_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_shift",
roce_get_field(
context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M,
V2_QPC_BYTE_20_RQ_SHIFT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_cqeidx",
roce_get_field(
context->byte_256_sqflush_rqcqe,
V2_QPC_BYTE_256_RQ_CQE_IDX_M,
V2_QPC_BYTE_256_RQ_CQE_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_rx_err",
roce_get_bit(context->byte_56_dqpn_err,
V2_QPC_BYTE_56_RQ_RX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_tx_err",
roce_get_bit(context->byte_56_dqpn_err,
V2_QPC_BYTE_56_RQ_TX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_rty_tx_err",
roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_RTY_TX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_db_doing",
roce_get_bit(context->byte_60_qpst_tempid,
V2_QPC_BYTE_60_RQ_DB_DOING_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rx_cqn",
roce_get_field(context->byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S)))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_qp_fill_sp(struct sk_buff *msg,
struct hns_roce_v2_qp_context *context)
{
if (rdma_nl_put_driver_u32(msg, "sq_pi",
roce_get_field(context->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_ci",
roce_get_field(context->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_shift",
roce_get_field(
context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M,
V2_QPC_BYTE_20_SQ_SHIFT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_maxidx",
roce_get_field(context->byte_200_sq_max,
V2_QPC_BYTE_200_SQ_MAX_IDX_M,
V2_QPC_BYTE_200_SQ_MAX_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_rx_err",
roce_get_bit(context->byte_56_dqpn_err,
V2_QPC_BYTE_56_SQ_RX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_tx_err",
roce_get_bit(context->byte_56_dqpn_err,
V2_QPC_BYTE_56_SQ_TX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_db_doing",
roce_get_bit(context->byte_60_qpst_tempid,
V2_QPC_BYTE_60_SQ_DB_DOING_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_tx_err",
roce_get_bit(context->byte_56_dqpn_err,
V2_QPC_BYTE_56_SQ_TX_ERR_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "tx_cqn",
roce_get_field(context->byte_252_err_txcqn,
V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S)))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_fill_qp(struct sk_buff *msg,
struct hns_roce_v2_qp_context *context)
{
if (rdma_nl_put_driver_u32(msg, "smac_idx",
roce_get_field(
context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SMAC_IDX_M,
V2_QPC_BYTE_20_SMAC_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "vid",
roce_get_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "mtu",
roce_get_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "sgid_idx",
roce_get_field(
context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "srqn",
roce_get_field(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQN_M,
V2_QPC_BYTE_76_SRQN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "srq_en",
roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQ_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "chk_flg",
roce_get_field(context->byte_212_lsn,
V2_QPC_BYTE_212_CHECK_FLG_M,
V2_QPC_BYTE_212_CHECK_FLG_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "retry_cnt",
roce_get_field(context->byte_212_lsn,
V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "err_type",
roce_get_field(context->byte_252_err_txcqn,
V2_QPC_BYTE_252_ERR_TYPE_M,
V2_QPC_BYTE_252_ERR_TYPE_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "flush_idx",
roce_get_field(
context->byte_256_sqflush_rqcqe,
V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
V2_QPC_BYTE_256_SQ_FLUSH_IDX_S)))
goto err;
if (hns_roce_qp_fill_rp(msg, context))
goto err;
if (hns_roce_qp_fill_sp(msg, context))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_fill_res_qp_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
struct ib_qp *ib_qp = container_of(res, struct ib_qp, res);
struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
struct hns_roce_v2_qp_context context;
struct nlattr *table_attr;
int ret;
if (!hr_dev->dfx->query_qpc_info)
return -EINVAL;
ret = hr_dev->dfx->query_qpc_info(hr_dev, hr_qp->qpn, (int *)&context);
if (ret)
goto err;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
if (hns_roce_fill_qp(msg, &context))
goto err_cancel_table;
nla_nest_end(msg, table_attr);
return 0;
err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
return -EMSGSIZE;
}
static int hns_roce_fill_mr(struct sk_buff *msg,
struct hns_roce_v2_mpt_entry *context)
{
u64 val_h32;
if (rdma_nl_put_driver_u32(msg, "status",
roce_get_field(context->byte_4_pd_hop_st,
V2_MPT_BYTE_4_MPT_ST_M,
V2_MPT_BYTE_4_MPT_ST_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "lkey", context->lkey))
goto err;
if (rdma_nl_put_driver_u32(msg, "size", context->pbl_size))
goto err;
if (rdma_nl_put_driver_u32(msg, "ra",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_RA_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "ri",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_R_INV_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "li",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_L_INV_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "atomic_en",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_ATOMIC_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rr_en",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_RR_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "rw_en",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_RW_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "lw_en",
roce_get_bit(context->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_LW_EN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "pbl_buf_pgsz",
roce_get_field(context->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S)))
goto err;
val_h32 = context->len_h;
if (rdma_nl_put_driver_u64(msg, "len",
val_h32 << 32 | context->len_l))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_fill_res_mr_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
struct ib_mr *ib_mr = container_of(res, struct ib_mr, res);
struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
struct hns_roce_v2_mpt_entry context;
int key = hr_dev->hr_stat.key;
struct nlattr *table_attr;
int ret;
if (!hr_dev->dfx->query_mpt_info)
return -EINVAL;
ret = hr_dev->dfx->query_mpt_info(hr_dev, key, (int *)&context);
if (ret)
goto err;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
if (hns_roce_fill_mr(msg, &context))
goto err_cancel_table;
nla_nest_end(msg, table_attr);
return 0;
err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
return -EMSGSIZE;
}
static int hns_roce_fill_pd(struct sk_buff *msg,
struct hns_roce_pd *hr_pd)
{
if (rdma_nl_put_driver_u32(msg, "pdn", hr_pd->pdn))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_fill_res_pd_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
struct ib_pd *ib_pd = container_of(res, struct ib_pd, res);
struct hns_roce_pd *hr_pd = to_hr_pd(ib_pd);
struct nlattr *table_attr;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
if (hns_roce_fill_pd(msg, hr_pd))
goto err_cancel_table;
nla_nest_end(msg, table_attr);
return 0;
err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
return -EMSGSIZE;
}
int hns_roce_fill_res_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
if (res->type == RDMA_RESTRACK_PD)
return hns_roce_fill_res_pd_entry(msg, res);
if (res->type == RDMA_RESTRACK_CQ)
return hns_roce_fill_res_cq_entry(msg, res);
if (res->type == RDMA_RESTRACK_QP)
return hns_roce_fill_res_qp_entry(msg, res);
if (res->type == RDMA_RESTRACK_MR)
return hns_roce_fill_res_mr_entry(msg, res);
return 0;
}
...@@ -209,129 +209,54 @@ void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) ...@@ -209,129 +209,54 @@ void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR); hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
} }
static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
u32 page_shift) struct ib_udata *udata, int srq_buf_size)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que;
u32 bitmap_num;
int i;
idx_que->entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, 8 * sizeof(u64));
idx_que->bitmap = kcalloc(1, bitmap_num / 8, GFP_KERNEL);
if (!idx_que->bitmap)
return -ENOMEM;
bitmap_num = bitmap_num / (8 * sizeof(u64));
idx_que->buf_size = srq->max * idx_que->entry_sz;
if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
&idx_que->idx_buf, page_shift)) {
kfree(idx_que->bitmap);
return -ENOMEM;
}
for (i = 0; i < bitmap_num; i++)
idx_que->bitmap[i] = ~(0UL);
return 0;
}
struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_srq *srq; struct hns_roce_ib_create_srq ucmd;
int srq_desc_size;
int srq_buf_size;
u32 page_shift; u32 page_shift;
int ret = 0;
u32 npages; u32 npages;
u16 xrcdn; int ret;
u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */
if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
return ERR_PTR(-EINVAL);
srq = kzalloc(sizeof(*srq), GFP_KERNEL);
if (!srq)
return ERR_PTR(-ENOMEM);
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
srq->max_gs = srq_init_attr->attr.max_sge;
srq_desc_size = max(16, 16 * srq->max_gs);
srq->wqe_shift = ilog2(srq_desc_size);
srq_buf_size = srq->max * srq_desc_size;
srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
struct hns_roce_ib_create_srq ucmd; return -EFAULT;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
ret = -EFAULT;
goto err_srq;
}
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
srq_buf_size, 0, 0); srq_buf_size, 0, 0);
if (IS_ERR(srq->umem)) { if (IS_ERR(srq->umem))
ret = PTR_ERR(srq->umem); return PTR_ERR(srq->umem);
goto err_srq;
}
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
if (hr_dev->caps.srqwqe_buf_pg_sz) { if (hr_dev->caps.srqwqe_buf_pg_sz) {
npages = (ib_umem_page_count(srq->umem) + npages = (ib_umem_page_count(srq->umem) +
(1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) / (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
(1 << hr_dev->caps.srqwqe_buf_pg_sz); (1 << hr_dev->caps.srqwqe_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages, ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
page_shift,
&srq->mtt);
} else } else
ret = hns_roce_mtt_init(hr_dev, ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->umem),
ib_umem_page_count(srq->umem), srq->umem->page_shift, &srq->mtt);
srq->umem->page_shift,
&srq->mtt);
if (ret) if (ret)
goto err_buf; goto err_user_buf;
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem); ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
if (ret) if (ret)
goto err_srq_mtt; goto err_user_srq_mtt;
/* config index queue BA */ /* config index queue BA */
srq->idx_que.umem = ib_umem_get(pd->uobject->context, srq->idx_que.umem = ib_umem_get(pd->uobject->context, ucmd.que_addr,
ucmd.que_addr,
srq->idx_que.buf_size, 0, 0); srq->idx_que.buf_size, 0, 0);
if (IS_ERR(srq->idx_que.umem)) { if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
"ib_umem_get error for index queue\n"); goto err_user_srq_mtt;
goto err_srq_mtt;
} }
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
if (hr_dev->caps.idx_buf_pg_sz) { if (hr_dev->caps.idx_buf_pg_sz) {
npages = (ib_umem_page_count(srq->idx_que.umem) + npages = (ib_umem_page_count(srq->idx_que.umem) +
(1 << hr_dev->caps.idx_buf_pg_sz) - 1) / (1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
(1 << hr_dev->caps.idx_buf_pg_sz); (1 << hr_dev->caps.idx_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages, ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
page_shift, &srq->idx_que.mtt); &srq->idx_que.mtt);
} else { } else {
ret = hns_roce_mtt_init(hr_dev, ret = hns_roce_mtt_init(hr_dev,
ib_umem_page_count(srq->idx_que.umem), ib_umem_page_count(srq->idx_que.umem),
...@@ -340,9 +265,8 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, ...@@ -340,9 +265,8 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
} }
if (ret) { if (ret) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
"hns_roce_mtt_init error for idx que\n"); goto err_user_idx_mtt;
goto err_idx_mtt;
} }
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt, ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
...@@ -350,67 +274,202 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, ...@@ -350,67 +274,202 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
if (ret) { if (ret) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev,
"hns_roce_ib_umem_write_mtt error for idx que\n"); "hns_roce_ib_umem_write_mtt error for idx que\n");
goto err_idx_buf; goto err_user_idx_buf;
} }
} else {
u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
if (hns_roce_buf_alloc(hr_dev, srq_buf_size, return 0;
(1 << page_shift) * 2,
&srq->buf, page_shift)) { err_user_idx_buf:
ret = -ENOMEM; hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
goto err_buf;
err_user_idx_mtt:
ib_umem_release(srq->idx_que.umem);
err_user_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
err_user_buf:
ib_umem_release(srq->umem);
return ret;
}
static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
u32 page_shift)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que;
u32 bitmap_num;
int i;
idx_que->entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, 8 * sizeof(u64));
idx_que->bitmap = kcalloc(1, bitmap_num / 8, GFP_KERNEL);
if (!idx_que->bitmap)
return -ENOMEM;
bitmap_num = bitmap_num / (8 * sizeof(u64));
idx_que->buf_size = srq->max * idx_que->entry_sz;
if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
&idx_que->idx_buf, page_shift)) {
kfree(idx_que->bitmap);
return -ENOMEM;
} }
for (i = 0; i < bitmap_num; i++)
idx_que->bitmap[i] = ~(0UL);
return 0;
}
static int create_kernel_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
int srq_buf_size)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
int ret;
if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
&srq->buf, page_shift))
return -ENOMEM;
srq->head = 0; srq->head = 0;
srq->tail = srq->max - 1; srq->tail = srq->max - 1;
srq->wqe_ctr = 0; srq->wqe_ctr = 0;
srq->mtt.mtt_type = MTT_TYPE_SRQWQE; ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, &srq->mtt);
srq->buf.page_shift, &srq->mtt);
if (ret) if (ret)
goto err_buf; goto err_kernel_buf;
ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf); ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
if (ret) if (ret)
goto err_srq_mtt; goto err_kernel_srq_mtt;
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_create_idx_que(pd, srq, page_shift); ret = hns_roce_create_idx_que(pd, srq, page_shift);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
ret); goto err_kernel_srq_mtt;
goto err_srq_mtt;
} }
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
/* Init mtt table for idx_que */ /* Init mtt table for idx_que */
ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages, ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
srq->idx_que.idx_buf.page_shift, srq->idx_que.idx_buf.page_shift,
&srq->idx_que.mtt); &srq->idx_que.mtt);
if (ret) if (ret)
goto err_create_idx; goto err_kernel_create_idx;
/* Write buffer address into the mtt table */ /* Write buffer address into the mtt table */
ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt, ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
&srq->idx_que.idx_buf); &srq->idx_que.idx_buf);
if (ret) if (ret)
goto err_idx_buf; goto err_kernel_idx_buf;
srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL); srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) { if (!srq->wrid) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_idx_buf; goto err_kernel_idx_buf;
}
return 0;
err_kernel_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
err_kernel_create_idx:
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
&srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);
err_kernel_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
err_kernel_buf:
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
return ret;
}
static void destroy_user_srq(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq)
{
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
ib_umem_release(srq->idx_que.umem);
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
ib_umem_release(srq->umem);
}
static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq, int srq_buf_size)
{
kvfree(srq->wrid);
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
}
struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_srq *srq;
int srq_desc_size;
int srq_buf_size;
int ret;
u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */
if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
return ERR_PTR(-EINVAL);
srq = kzalloc(sizeof(*srq), GFP_KERNEL);
if (!srq)
return ERR_PTR(-ENOMEM);
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
srq->max_gs = srq_init_attr->attr.max_sge;
srq_desc_size = max(16, 16 * srq->max_gs);
srq->wqe_shift = ilog2(srq_desc_size);
srq_buf_size = srq->max * srq_desc_size;
srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
if (pd->uobject) {
ret = create_user_srq(pd, srq, udata, srq_buf_size);
if (ret) {
dev_err(hr_dev->dev, "Create user srq fail\n");
goto err_srq;
}
} else {
ret = create_kernel_srq(pd, srq, srq_buf_size);
if (ret) {
dev_err(hr_dev->dev, "Create kernel srq fail\n");
goto err_srq;
} }
} }
cqn = ib_srq_has_cq(srq_init_attr->srq_type) ? cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
to_hr_cq(srq_init_attr->ext.cq)->cqn : 0; to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
xrcdn = (srq_init_attr->srq_type == IB_SRQT_XRC) ?
to_hr_xrcd(srq_init_attr->ext.xrc.xrcd)->xrcdn : 0;
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG; srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, xrcdn, ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, 0, &srq->mtt,
&srq->mtt, 0, srq); 0, srq);
if (ret) if (ret)
goto err_wrid; goto err_wrid;
...@@ -420,35 +479,20 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, ...@@ -420,35 +479,20 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
if (pd->uobject) { if (pd->uobject) {
if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
ret = -EFAULT; ret = -EFAULT;
goto err_wrid; goto err_srqc_alloc;
} }
} }
return &srq->ibsrq; return &srq->ibsrq;
err_wrid: err_srqc_alloc:
kvfree(srq->wrid); hns_roce_srq_free(hr_dev, srq);
err_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
err_idx_mtt:
if (udata)
ib_umem_release(srq->idx_que.umem);
err_create_idx:
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
&srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);
err_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
err_buf: err_wrid:
if (udata) if (pd->uobject)
ib_umem_release(srq->umem); destroy_user_srq(hr_dev, srq);
else else
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf); destroy_kernel_srq(hr_dev, srq, srq_buf_size);
err_srq: err_srq:
kfree(srq); kfree(srq);
...@@ -478,20 +522,6 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq) ...@@ -478,20 +522,6 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq)
return 0; return 0;
} }
struct hns_roce_srq *hns_roce_srq_lookup(struct hns_roce_dev *hr_dev, u32 srqn)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct hns_roce_srq *srq;
rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree,
srqn & (hr_dev->caps.max_srqs - 1));
rcu_read_unlock();
return srq;
}
EXPORT_SYMBOL_GPL(hns_roce_srq_lookup);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev) int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册