提交 8b3021f3 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

driver: roce: update roce driver from driver team

Sync roce driver from driver team.
Based on c63ba8b3f1dd8882a7cbe237cffc61c7fa1429f8
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 09262653
......@@ -7,8 +7,8 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_sysfs.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
hns-roce-hw-v1-objs := hns_roce_hw_v1.o
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
hns-roce-hw-v2-objs := hns_roce_hw_v2.o
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_sysfs_v2.o
......@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "roce_k_compat.h"
#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
......@@ -44,42 +45,92 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev;
#ifdef CONFIG_KERNEL_419
const struct ib_gid_attr *gid_attr;
#else
struct ib_gid_attr gid_attr;
union ib_gid sgid;
int ret;
#endif
struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff;
struct in6_addr in6;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
bool vlan_en = false;
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
/* Get mac address */
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
gid_attr = ah_attr->grh.sgid_attr;
if (is_vlan_dev(gid_attr->ndev))
vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) &
HNS_ROCE_VLAN_SL_BIT_MASK) <<
HNS_ROCE_VLAN_SL_SHIFT;
ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
(rdma_ah_get_port_num(ah_attr) <<
HNS_ROCE_PORT_NUM_SHIFT));
ah->av.gid_index = grh->sgid_index;
ah->av.vlan = cpu_to_le16(vlan_tag);
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
ah->av.vlan);
if (rdma_ah_get_static_rate(ah_attr))
ah->av.stat_rate = IB_RATE_10_GBPS;
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
ah->av.sl_tclass_flowlabel = cpu_to_le32(rdma_ah_get_sl(ah_attr) <<
HNS_ROCE_SL_SHIFT);
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
/* Get mac address */
memcpy(&in6, grh->dgid.raw, sizeof(grh->dgid.raw));
if (rdma_is_multicast_addr(&in6)) {
rdma_get_mcast_mac(&in6, ah->av.mac);
} else {
u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
if (!dmac) {
kfree(ah);
return ERR_PTR(-EINVAL);
}
memcpy(ah->av.mac, dmac, ETH_ALEN);
}
#ifdef CONFIG_KERNEL_419
gid_attr = ah_attr->grh.sgid_attr;
if (is_vlan_dev(gid_attr->ndev)) {
vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
vlan_en = true;
}
#else
/* Get source gid */
ret = ib_get_cached_gid(ibpd->device,
rdma_ah_get_port_num(ah_attr),
grh->sgid_index, &sgid, &gid_attr);
if (ret) {
dev_err(dev, "get sgid failed! ret = %d\n", ret);
kfree(ah);
return ERR_PTR(ret);
}
if (gid_attr.ndev) {
if (is_vlan_dev(gid_attr.ndev)) {
vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
vlan_en = true;
}
dev_put(gid_attr.ndev);
}
#endif
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) &
HNS_ROCE_VLAN_SL_BIT_MASK) <<
HNS_ROCE_VLAN_SL_SHIFT;
ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
(rdma_ah_get_port_num(ah_attr) <<
HNS_ROCE_PORT_NUM_SHIFT));
ah->av.gid_index = grh->sgid_index;
ah->av.vlan = cpu_to_le16(vlan_tag);
ah->av.vlan_en = vlan_en;
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
ah->av.vlan);
if (rdma_ah_get_static_rate(ah_attr))
ah->av.stat_rate = IB_RATE_10_GBPS;
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
ah->av.sl_tclass_flowlabel =
cpu_to_le32(rdma_ah_get_sl(ah_attr) <<
HNS_ROCE_SL_SHIFT);
ah->av.sl_tclass_flowlabel |=
cpu_to_le32((grh->traffic_class <<
HNS_ROCE_TCLASS_SHIFT) |
grh->flow_label);
ah->av.hop_limit = grh->hop_limit;
}
return &ah->ibah;
}
......
......@@ -30,6 +30,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "roce_k_compat.h"
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
......@@ -239,9 +240,13 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_cleanup_srq_table(hr_dev);
hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev);
hns_roce_cleanup_mr_table(hr_dev);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
hns_roce_cleanup_xrcd_table(hr_dev);
hns_roce_cleanup_pd_table(hr_dev);
hns_roce_cleanup_uar_table(hr_dev);
}
......@@ -176,17 +176,33 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout)
{
if (hr_dev->is_reset)
return 0;
int ret;
if (hr_dev->hw->rst_prc_mbox) {
ret = hr_dev->hw->rst_prc_mbox(hr_dev);
if (ret == CMD_RST_PRC_SUCCESS)
return 0;
else if (ret == CMD_RST_PRC_EBUSY)
return -EBUSY;
}
if (hr_dev->cmd.use_events)
return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
timeout);
ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
timeout);
else
return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
timeout);
ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
timeout);
if (ret == CMD_RST_PRC_EBUSY)
return -EBUSY;
if (ret && (hr_dev->hw->rst_prc_mbox &&
hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS))
return 0;
return ret;
}
EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
......
......@@ -53,6 +53,7 @@ enum {
HNS_ROCE_CMD_QUERY_QPC = 0x42,
HNS_ROCE_CMD_MODIFY_CQC = 0x52,
HNS_ROCE_CMD_QUERY_CQC = 0x53,
/* CQC BT commands */
HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
......@@ -89,6 +90,18 @@ enum {
HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
/* CTX BT commands */
HNS_ROCE_CMD_READ_SCC_CTX_BT0 = 0xa4,
HNS_ROCE_CMD_WRITE_SCC_CTX_BT0 = 0xa5,
/* QPC TIMER commands */
HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 = 0x33,
HNS_ROCE_CMD_READ_QPC_TIMER_BT0 = 0x37,
/* CQC TIMER commands */
HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 = 0x23,
HNS_ROCE_CMD_READ_CQC_TIMER_BT0 = 0x27,
/* EQC commands */
HNS_ROCE_CMD_CREATE_AEQC = 0x80,
HNS_ROCE_CMD_MODIFY_AEQC = 0x81,
......@@ -120,6 +133,10 @@ enum {
HNS_ROCE_CMD_SQD2RTS_QP = 0x20,
HNS_ROCE_CMD_2RST_QP = 0x21,
HNS_ROCE_CMD_QUERY_QP = 0x22,
HNS_ROCE_CMD_SW2HW_SRQ = 0x70,
HNS_ROCE_CMD_MODIFY_SRQC = 0x72,
HNS_ROCE_CMD_QUERY_SRQC = 0x73,
HNS_ROCE_CMD_HW2SW_SRQ = 0x74,
};
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
......
......@@ -33,6 +33,8 @@
#ifndef _HNS_ROCE_COMMON_H
#define _HNS_ROCE_COMMON_H
#include "roce_k_compat.h"
#ifndef assert
#define assert(cond)
#endif
......@@ -376,9 +378,6 @@
#define ROCEE_RX_CMQ_TAIL_REG 0x07024
#define ROCEE_RX_CMQ_HEAD_REG 0x07028
#define ROCEE_VF_MB_CFG0_REG 0x40
#define ROCEE_VF_MB_STATUS_REG 0x58
#define ROCEE_VF_EQ_DB_CFG0_REG 0x238
#define ROCEE_VF_EQ_DB_CFG1_REG 0x23C
......
......@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "roce_k_compat.h"
#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
......
......@@ -6,6 +6,7 @@
#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
#include "roce_k_compat.h"
#include "hns_roce_device.h"
int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
......
......@@ -88,6 +88,7 @@
#define BITMAP_RR 1
#define MR_TYPE_MR 0x00
#define MR_TYPE_FRMR 0x01
#define MR_TYPE_DMA 0x03
#define PKEY_ID 0xffff
......@@ -95,11 +96,16 @@
#define NODE_DESC_SIZE 64
#define DB_REG_OFFSET 0x1000
#define HNS_ROCE_CEQ_MAX_BURST_NUM 0xffff
#define HNS_ROCE_CEQ_MAX_INTERVAL 0xffff
#define HNS_ROCE_EQ_MAXCNT_MASK 1
#define HNS_ROCE_EQ_PERIOD_MASK 2
#define SERV_TYPE_RC 0
#define SERV_TYPE_RD 1
#define SERV_TYPE_UC 2
#define SERV_TYPE_RD 2
#define SERV_TYPE_UC 1
#define SERV_TYPE_UD 3
#define SERV_TYPE_XRC 5
/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
......@@ -108,6 +114,12 @@
#define PAGES_SHIFT_24 24
#define PAGES_SHIFT_32 32
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define HNS_ROCE_FRMR_MAX_PA 512
#define SRQ_DB_REG 0x230
enum {
HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
......@@ -193,17 +205,51 @@ enum {
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
HNS_ROCE_CAP_FLAG_XRC = BIT(6),
HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
HNS_ROCE_CAP_FLAG_MW = BIT(7),
HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
};
enum hns_roce_mtt_type {
MTT_TYPE_WQE,
MTT_TYPE_CQE,
MTT_TYPE_SRQWQE,
MTT_TYPE_IDX
};
enum {
HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
};
enum hns_roce_reset_stage {
HNS_ROCE_STATE_NON_RST,
HNS_ROCE_STATE_RST_BEF_DOWN,
HNS_ROCE_STATE_RST_DOWN,
HNS_ROCE_STATE_RST_UNINIT,
HNS_ROCE_STATE_RST_INIT,
HNS_ROCE_STATE_RST_INITED,
};
enum hns_roce_instance_state {
HNS_ROCE_STATE_NON_INIT,
HNS_ROCE_STATE_INIT,
HNS_ROCE_STATE_INITED,
HNS_ROCE_STATE_UNINIT,
};
enum {
HNS_ROCE_RST_DIRECT_RETURN = 0,
};
enum {
CMD_RST_PRC_OTHERS,
CMD_RST_PRC_SUCCESS,
CMD_RST_PRC_EBUSY,
};
#define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_PORT_DOWN 0
......@@ -213,6 +259,8 @@ enum {
#define PAGE_ADDR_SHIFT 12
#define HNS_ROCE_DISABLE_DB 1
struct hns_roce_uar {
u64 pfn;
unsigned long index;
......@@ -239,6 +287,13 @@ struct hns_roce_pd {
unsigned long pdn;
};
struct hns_roce_xrcd {
struct ib_xrcd ibxrcd;
unsigned long xrcdn;
struct ib_pd *pd;
struct ib_cq *cq;
};
struct hns_roce_bitmap {
/* Bitmap Traversal last a bit which is 1 */
unsigned long last;
......@@ -293,6 +348,16 @@ struct hns_roce_mtt {
enum hns_roce_mtt_type mtt_type;
};
struct hns_roce_mw {
struct ib_mw ibmw;
u32 pdn;
u32 rkey;
int enabled; /* MW's active status */
u32 pbl_buf_pg_sz;
u32 pbl_ba_pg_sz;
u32 pbl_hop_num;
};
/* Only support 4K page size for mr register */
#define MR_SIZE_4K 0
......@@ -304,6 +369,7 @@ struct hns_roce_mr {
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
u32 access;/* Access permission of MR */
u32 npages;
int enabled; /* MR's active status */
int type; /* MR's register type */
u64 *pbl_buf;/* MR's PBL space */
......@@ -330,6 +396,10 @@ struct hns_roce_mr_table {
struct hns_roce_hem_table mtpt_table;
struct hns_roce_buddy mtt_cqe_buddy;
struct hns_roce_hem_table mtt_cqe_table;
struct hns_roce_buddy mtt_srqwqe_buddy;
struct hns_roce_hem_table mtt_srqwqe_table;
struct hns_roce_buddy mtt_idx_buddy;
struct hns_roce_hem_table mtt_idx_table;
};
struct hns_roce_wq {
......@@ -420,9 +490,37 @@ struct hns_roce_cq {
struct completion free;
};
struct hns_roce_idx_que {
struct hns_roce_buf idx_buf;
int entry_sz;
u32 buf_size;
struct ib_umem *umem;
struct hns_roce_mtt mtt;
u64 *bitmap;
};
struct hns_roce_srq {
struct ib_srq ibsrq;
int srqn;
void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
unsigned long srqn;
int max;
int max_gs;
int wqe_shift;
void __iomem *db_reg_l;
refcount_t refcount;
struct completion free;
struct hns_roce_buf buf;
u64 *wrid;
struct ib_umem *umem;
struct hns_roce_mtt mtt;
struct hns_roce_idx_que idx_que;
spinlock_t lock;
int head;
int tail;
u16 wqe_ctr;
struct mutex mutex;
};
struct hns_roce_uar_table {
......@@ -435,6 +533,14 @@ struct hns_roce_qp_table {
struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table;
struct hns_roce_hem_table trrl_table;
struct hns_roce_hem_table scc_ctx_table;
};
struct hns_roce_qpc_timer_table {
struct hns_roce_bitmap bitmap;
spinlock_t lock;
struct radix_tree_root tree;
struct hns_roce_hem_table table;
};
struct hns_roce_cq_table {
......@@ -444,6 +550,20 @@ struct hns_roce_cq_table {
struct hns_roce_hem_table table;
};
struct hns_roce_cqc_timer_table {
struct hns_roce_bitmap bitmap;
spinlock_t lock;
struct radix_tree_root tree;
struct hns_roce_hem_table table;
};
struct hns_roce_srq_table {
struct hns_roce_bitmap bitmap;
spinlock_t lock;
struct radix_tree_root tree;
struct hns_roce_hem_table table;
};
struct hns_roce_raq_table {
struct hns_roce_buf_list *e_raq_buf;
};
......@@ -457,6 +577,7 @@ struct hns_roce_av {
u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[6];
__le16 vlan;
bool vlan_en;
};
struct hns_roce_ah {
......@@ -541,6 +662,7 @@ struct hns_roce_qp {
struct hns_roce_mtt mtt;
u32 buff_size;
struct mutex mutex;
u16 xrcdn;
u8 port;
u8 phy_port;
u8 sl;
......@@ -576,7 +698,7 @@ struct hns_roce_ib_iboe {
enum {
HNS_ROCE_EQ_STAT_INVALID = 0,
HNS_ROCE_EQ_STAT_VALID = 2,
HNS_ROCE_EQ_STAT_VALID = 1,
};
struct hns_roce_ceqe {
......@@ -592,6 +714,12 @@ struct hns_roce_aeqe {
u32 rsv1;
} qp_event;
struct {
__le32 srq;
u32 rsv0;
u32 rsv1;
} srq_event;
struct {
__le32 cq;
u32 rsv0;
......@@ -656,19 +784,29 @@ struct hns_roce_eq_table {
};
struct hns_roce_caps {
u64 fw_ver;
u8 num_ports;
int gid_table_len[HNS_ROCE_MAX_PORTS];
int pkey_table_len[HNS_ROCE_MAX_PORTS];
int local_ca_ack_delay;
int num_uars;
u32 phy_num_uars;
u32 max_sq_sg; /* 2 */
u32 max_sq_sg;
u32 max_sq_inline; /* 32 */
u32 max_rq_sg; /* 2 */
int num_qps; /* 256k */
u32 max_wqes; /* 16k */
u32 max_sq_desc_sz; /* 64 */
u32 max_rq_desc_sz; /* 64 */
u32 max_rq_sg;
u32 max_extend_sg;
int num_qps;
int reserved_qps;
int num_qpc_timer;
int num_cqc_timer;
u32 max_srq_sg;
int num_srqs;
u32 max_wqes;
u32 max_srqs;
u32 max_srq_wrs;
u32 max_srq_sges;
u32 max_sq_desc_sz;
u32 max_rq_desc_sz;
u32 max_srq_desc_sz;
int max_qp_init_rdma;
int max_qp_dest_rdma;
......@@ -677,16 +815,22 @@ struct hns_roce_caps {
int min_cqes;
u32 min_wqes;
int reserved_cqs;
int num_aeq_vectors; /* 1 */
int reserved_srqs;
u32 max_srqwqes;
int num_aeq_vectors;
int num_comp_vectors;
int num_other_vectors;
int num_mtpts;
u32 num_mtt_segs;
u32 num_cqe_segs;
u32 num_srqwqe_segs;
u32 num_idx_segs;
int reserved_mrws;
int reserved_uars;
int num_pds;
int reserved_pds;
int num_xrcds;
int reserved_xrcds;
u32 mtt_entry_sz;
u32 cq_entry_sz;
u32 page_size_cap;
......@@ -696,6 +840,11 @@ struct hns_roce_caps {
int irrl_entry_sz;
int trrl_entry_sz;
int cqc_entry_sz;
int srqc_entry_sz;
int idx_entry_sz;
int scc_ctx_entry_sz;
int qpc_timer_entry_sz;
int cqc_timer_entry_sz;
u32 pbl_ba_pg_sz;
u32 pbl_buf_pg_sz;
u32 pbl_hop_num;
......@@ -703,9 +852,12 @@ struct hns_roce_caps {
int ceqe_depth;
enum ib_mtu max_mtu;
u32 qpc_bt_num;
u32 qpc_timer_bt_num;
u32 srqc_bt_num;
u32 cqc_bt_num;
u32 cqc_timer_bt_num;
u32 mpt_bt_num;
u32 scc_ctx_bt_num;
u32 qpc_ba_pg_sz;
u32 qpc_buf_pg_sz;
u32 qpc_hop_num;
......@@ -721,9 +873,24 @@ struct hns_roce_caps {
u32 mtt_ba_pg_sz;
u32 mtt_buf_pg_sz;
u32 mtt_hop_num;
u32 scc_ctx_ba_pg_sz;
u32 scc_ctx_buf_pg_sz;
u32 scc_ctx_hop_num;
u32 qpc_timer_ba_pg_sz;
u32 qpc_timer_buf_pg_sz;
u32 qpc_timer_hop_num;
u32 cqc_timer_ba_pg_sz;
u32 cqc_timer_buf_pg_sz;
u32 cqc_timer_hop_num;
u32 cqe_ba_pg_sz;
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
u32 srqwqe_ba_pg_sz;
u32 srqwqe_buf_pg_sz;
u32 srqwqe_hop_num;
u32 idx_ba_pg_sz;
u32 idx_buf_pg_sz;
u32 idx_hop_num;
u32 eqe_ba_pg_sz;
u32 eqe_buf_pg_sz;
u32 eqe_hop_num;
......@@ -738,9 +905,40 @@ struct hns_roce_work {
struct hns_roce_dev *hr_dev;
struct work_struct work;
u32 qpn;
u32 cqn;
int event_type;
int sub_type;
};
struct hns_roce_stat {
int cqn;
int srqn;
u32 ceqn;
u32 qpn;
u32 aeqn;
int key;
};
struct hns_roce_dfx_hw {
int (*query_cqc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_cmd_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_qpc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_aeqc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_srqc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_pkt_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_mpt_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*query_ceqc_stat)(struct hns_roce_dev *hr_dev,
char *buf, int *desc);
int (*modify_eq)(struct hns_roce_dev *hr_dev,
u16 eq_count, u16 eq_period, u16 type);
};
struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
......@@ -753,8 +951,14 @@ struct hns_roce_hw {
u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event);
int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
#ifdef CONFIG_KERNEL_419
int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
#else
int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
union ib_gid *gid, const struct ib_gid_attr *attr);
#endif
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
......@@ -764,6 +968,8 @@ struct hns_roce_hw {
struct hns_roce_mr *mr, int flags, u32 pdn,
int mr_access_flags, u64 iova, u64 size,
void *mb_buf);
int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector);
......@@ -778,10 +984,19 @@ struct hns_roce_hw {
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
int (*destroy_qp)(struct ib_qp *ibqp);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
#ifdef CONFIG_KERNEL_419
int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
#else
int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
#endif
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
......@@ -789,6 +1004,22 @@ struct hns_roce_hw {
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
void (*write_srqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq, u32 pdn, u16 xrcd, u32 cqn,
void *mb_buf, u64 *mtts_wqe, u64 *mtts_idx,
dma_addr_t dma_handle_wqe,
dma_addr_t dma_handle_idx);
int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int (*query_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
#ifdef CONFIG_KERNEL_419
int (*post_srq_recv)(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
#else
int (*post_srq_recv)(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
#endif
};
struct hns_roce_dev {
......@@ -802,6 +1033,8 @@ struct hns_roce_dev {
spinlock_t bt_cmd_lock;
bool active;
bool is_reset;
bool dis_db;
unsigned long reset_cnt;
struct hns_roce_ib_iboe iboe;
struct list_head pgdir_list;
......@@ -820,21 +1053,28 @@ struct hns_roce_dev {
struct hns_roce_cmdq cmd;
struct hns_roce_bitmap pd_bitmap;
struct hns_roce_bitmap xrcd_bitmap;
struct hns_roce_uar_table uar_table;
struct hns_roce_mr_table mr_table;
struct hns_roce_cq_table cq_table;
struct hns_roce_srq_table srq_table;
struct hns_roce_qp_table qp_table;
struct hns_roce_eq_table eq_table;
struct hns_roce_qpc_timer_table qpc_timer_table;
struct hns_roce_cqc_timer_table cqc_timer_table;
int cmd_mod;
int loop_idc;
u32 sdb_offset;
u32 odb_offset;
dma_addr_t tptr_dma_addr; /*only for hw v1*/
u32 tptr_size; /*only for hw v1*/
dma_addr_t uar2_dma_addr;
u32 uar2_size;
const struct hns_roce_hw *hw;
const struct hns_roce_dfx_hw *dfx;
void *priv;
struct workqueue_struct *irq_workq;
struct hns_roce_stat hr_stat;
u32 func_num;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
......@@ -853,6 +1093,11 @@ static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
return container_of(ibpd, struct hns_roce_pd, ibpd);
}
static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
{
return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
}
static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
{
return container_of(ibah, struct hns_roce_ah, ibah);
......@@ -863,6 +1108,11 @@ static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
return container_of(ibmr, struct hns_roce_mr, ibmr);
}
static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
{
return container_of(ibmw, struct hns_roce_mw, ibmw);
}
static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct hns_roce_qp, ibqp);
......@@ -926,16 +1176,20 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct hns_roce_buf *buf);
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
......@@ -961,6 +1215,11 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
struct ib_udata *udata);
int hns_roce_dealloc_pd(struct ib_pd *pd);
struct ib_xrcd *hns_roce_ib_alloc_xrcd(struct ib_device *ib_dev,
struct ib_ucontext *context,
struct ib_udata *udata);
int hns_roce_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
......@@ -968,12 +1227,20 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata);
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr);
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
struct ib_udata *udata);
int hns_roce_dealloc_mw(struct ib_mw *ibmw);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
......@@ -982,6 +1249,16 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct ib_umem *umem);
struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int hns_roce_destroy_srq(struct ib_srq *ibsrq);
struct hns_roce_srq *hns_roce_srq_lookup(struct hns_roce_dev *hr_dev, u32 srqn);
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
......@@ -1001,7 +1278,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
int cnt);
__be32 send_ieth(const struct ib_send_wr *wr);
__be32 send_ieth(struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
......@@ -1023,8 +1300,10 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_register_sysfs(struct hns_roce_dev *hr_dev);
void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev);
#endif /* _HNS_ROCE_DEVICE_H */
......@@ -30,6 +30,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "roce_k_compat.h"
#include <linux/platform_device.h>
#include "hns_roce_device.h"
......@@ -45,8 +46,13 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
(hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
(hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
(hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
(hr_dev->caps.scc_ctx_hop_num && type == HEM_TYPE_SCC_CTX) ||
(hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
(hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
(hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
(hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT))
(hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
(hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
(hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX))
return true;
return false;
......@@ -123,6 +129,30 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
mhop->hop_num = hr_dev->caps.cqc_hop_num;
break;
case HEM_TYPE_SCC_CTX:
mhop->buf_chunk_size = 1 << (hr_dev->caps.scc_ctx_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.scc_ctx_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = hr_dev->caps.scc_ctx_bt_num;
mhop->hop_num = hr_dev->caps.scc_ctx_hop_num;
break;
case HEM_TYPE_QPC_TIMER:
mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
break;
case HEM_TYPE_CQC_TIMER:
mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
case HEM_TYPE_SRQC:
mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ PAGE_SHIFT);
......@@ -147,6 +177,22 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
mhop->ba_l0_num = mhop->bt_chunk_size / 8;
mhop->hop_num = hr_dev->caps.cqe_hop_num;
break;
case HEM_TYPE_SRQWQE:
mhop->buf_chunk_size = 1 << (hr_dev->caps.srqwqe_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8;
mhop->hop_num = hr_dev->caps.srqwqe_hop_num;
break;
case HEM_TYPE_IDX:
mhop->buf_chunk_size = 1 << (hr_dev->caps.idx_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8;
mhop->hop_num = hr_dev->caps.idx_hop_num;
break;
default:
dev_err(dev, "Table %d not support multi-hop addressing!\n",
table->type);
......@@ -157,7 +203,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
return 0;
/*
* QPC/MTPT/CQC/SRQC alloc hem for buffer pages.
* QPC/MTPT/CQC/SRQC/SCC_CTX alloc hem for buffer pages.
* MTT/CQE alloc hem for bt pages.
*/
bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
......@@ -468,7 +514,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
}
/*
* alloc buffer space chunk for QPC/MTPT/CQC/SRQC.
* alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCC_CTX.
* alloc bt space chunk for MTT/CQE.
*/
size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
......@@ -575,6 +621,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
mutex_unlock(&table->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(hns_roce_table_get);
static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
......@@ -640,7 +687,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
}
/*
* free buffer space chunk for QPC/MTPT/CQC/SRQC.
* free buffer space chunk for QPC/MTPT/CQC/SRQC/SCC_CTX.
* free bt space chunk for MTT/CQE.
*/
hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
......@@ -717,6 +764,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
mutex_unlock(&table->mutex);
}
EXPORT_SYMBOL_GPL(hns_roce_table_put);
void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
......@@ -886,6 +934,30 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
num_bt_l0 = hr_dev->caps.cqc_bt_num;
hop_num = hr_dev->caps.cqc_hop_num;
break;
case HEM_TYPE_SCC_CTX:
buf_chunk_size = 1 << (hr_dev->caps.scc_ctx_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.scc_ctx_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.scc_ctx_bt_num;
hop_num = hr_dev->caps.scc_ctx_hop_num;
break;
case HEM_TYPE_QPC_TIMER:
buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
hop_num = hr_dev->caps.qpc_timer_hop_num;
break;
case HEM_TYPE_CQC_TIMER:
buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
case HEM_TYPE_SRQC:
buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ PAGE_SHIFT);
......@@ -906,6 +978,18 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.cqe_hop_num;
break;
case HEM_TYPE_SRQWQE:
buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.srqwqe_hop_num;
break;
case HEM_TYPE_IDX:
buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.idx_hop_num;
break;
default:
dev_err(dev,
"Table %d not support to init hem table here!\n",
......@@ -1041,7 +1125,25 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
{
if ((hr_dev->caps.num_idx_segs))
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_idx_table);
if (hr_dev->caps.num_srqwqe_segs)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_srqwqe_table);
if (hr_dev->caps.srqc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->srq_table.table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
if (hr_dev->caps.qpc_timer_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qpc_timer_table.table);
if (hr_dev->caps.cqc_timer_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->cqc_timer_table.table);
if (hr_dev->caps.scc_ctx_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.scc_ctx_table);
if (hr_dev->caps.trrl_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.trrl_table);
......
......@@ -44,10 +44,15 @@ enum {
HEM_TYPE_MTPT,
HEM_TYPE_CQC,
HEM_TYPE_SRQC,
HEM_TYPE_SCC_CTX,
HEM_TYPE_QPC_TIMER,
HEM_TYPE_CQC_TIMER,
/* UNMAP HEM */
HEM_TYPE_MTT,
HEM_TYPE_CQE,
HEM_TYPE_SRQWQE,
HEM_TYPE_IDX,
HEM_TYPE_IRRL,
HEM_TYPE_TRRL,
};
......
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2016-2017 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/acpi.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <net/addrconf.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_umem.h>
#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_v2_mpt_entry *mpt_ctx;
struct hns_roce_cmd_mailbox *mailbox;
u64 bt0_ba = 0;
u64 bt1_ba = 0;
int *mpt;
int ret;
int i;
char *buff;
int key = hr_dev->hr_stat.key;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0,
HNS_ROCE_CMD_READ_MPT_BT0,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba));
else
goto err_cmd;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0,
HNS_ROCE_CMD_READ_MPT_BT1,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(&bt1_ba, mailbox->buf, sizeof(bt1_ba));
else
goto err_cmd;
mpt_ctx = kzalloc(sizeof(*mpt_ctx), GFP_KERNEL);
if (!mpt_ctx) {
ret = -ENOMEM;
goto err_cmd;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0,
HNS_ROCE_CMD_QUERY_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(mpt_ctx, mailbox->buf, sizeof(*mpt_ctx));
else
goto err_mailbox;
*desc += sprintf(buff + *desc, "MPT(0x%x) BT0: 0x%llx\n", key, bt0_ba);
*desc += sprintf(buff + *desc, "MPT(0x%x) BT1: 0x%llx\n", key, bt1_ba);
mpt = (int *)mpt_ctx;
for (i = 0; i < (sizeof(*mpt_ctx) >> 2); i += 8) {
*desc += sprintf(buff + *desc,
"MPT(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
key, *mpt, *(mpt + 1), *(mpt + 2),
*(mpt + 3), *(mpt + 4), *(mpt + 5),
*(mpt + 6), *(mpt + 7));
mpt += 8;
}
memcpy(buf, buff, *desc);
err_mailbox:
kfree(mpt_ctx);
err_cmd:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
kfree(buff);
return ret;
}
int hns_roce_v2_query_srqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_srq_context *srq_context;
u64 bt0_ba = 0;
u64 bt1_ba = 0;
int *srqc;
int ret;
int i = 0;
char *buff;
int srqn = hr_dev->hr_stat.srqn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srqn, 0,
HNS_ROCE_CMD_READ_SRQC_BT0,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba));
else
goto err_cmd;
srq_context = kzalloc(sizeof(*srq_context), GFP_KERNEL);
if (!srq_context) {
ret = -ENOMEM;
goto err_cmd;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srqn, 0,
HNS_ROCE_CMD_QUERY_SRQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
else
goto err_mailbox;
*desc += sprintf(buff + *desc,
"SRQC(0x%x) BT0: 0x%llx\n", srqn, bt0_ba);
*desc += sprintf(buff + *desc,
"SRQC(0x%x) BT1: 0x%llx\n", srqn, bt1_ba);
srqc = (int *)srq_context;
for (i = 0; i < (sizeof(*srq_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc,
"SRQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
srqn, *srqc, *(srqc + 1), *(srqc + 2),
*(srqc + 3), *(srqc + 4), *(srqc + 5),
*(srqc + 6), *(srqc + 7));
srqc += 8;
}
memcpy(buf, buff, *desc);
err_mailbox:
kfree(srq_context);
err_cmd:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
kfree(buff);
return ret;
}
int hns_roce_v2_query_qpc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_v2_qp_context *qp_context;
u64 bt0_ba = 0;
u64 bt1_ba = 0;
int *qpc;
int ret;
int i = 0;
char *buff;
int qpn = hr_dev->hr_stat.qpn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0,
HNS_ROCE_CMD_READ_QPC_BT0,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba));
else
goto err_cmd;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0,
HNS_ROCE_CMD_READ_QPC_BT1,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(&bt1_ba, mailbox->buf, sizeof(bt1_ba));
else
goto err_cmd;
qp_context = kzalloc(sizeof(*qp_context), GFP_KERNEL);
if (!qp_context) {
ret = -ENOMEM;
goto err_cmd;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0,
HNS_ROCE_CMD_QUERY_QPC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(qp_context, mailbox->buf, sizeof(*qp_context));
else
goto err_mailbox;
*desc += sprintf(buff + *desc, "QPC(0x%x) BT0: 0x%llx\n", qpn, bt0_ba);
*desc += sprintf(buff + *desc, "QPC(0x%x) BT1: 0x%llx\n", qpn, bt1_ba);
qpc = (int *)qp_context;
for (i = 0; i < (sizeof(*qp_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc,
"QPC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
qpn, *qpc, *(qpc + 1), *(qpc + 2),
*(qpc + 3), *(qpc + 4), *(qpc + 5),
*(qpc + 6), *(qpc + 7));
qpc += 8;
}
memcpy(buf, buff, *desc);
err_mailbox:
kfree(qp_context);
err_cmd:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
kfree(buff);
return ret;
}
int hns_roce_v2_query_aeqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_eq_context *eq_context;
int *aeqc;
int ret;
int i = 0;
char *buff;
int aeqn;
aeqn = hr_dev->hr_stat.aeqn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
goto err_aeqc_buff;
}
eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL);
if (!eq_context) {
ret = -ENOMEM;
goto err_context;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, aeqn, 0,
HNS_ROCE_CMD_QUERY_AEQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(eq_context, mailbox->buf, sizeof(*eq_context));
else
goto err_mailbox;
aeqc = (int *)eq_context;
for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc,
"AEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
aeqn, *aeqc, *(aeqc + 1), *(aeqc + 2),
*(aeqc + 3), *(aeqc + 4), *(aeqc + 5),
*(aeqc + 6), *(aeqc + 7));
aeqc += 8;
}
memcpy(buf, buff, *desc);
err_mailbox:
kfree(eq_context);
err_context:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_aeqc_buff:
kfree(buff);
return ret;
}
#define CMD_NUM_QUERY_PKT_CNT (8)
int hns_roce_v2_query_pkt_stat(struct hns_roce_dev *hr_dev,
char *buf, int *buff_size)
{
struct hns_roce_cmq_desc desc[CMD_NUM_QUERY_PKT_CNT] = { {0} };
struct rdfx_query_pkt_cnt *resp_query[CMD_NUM_QUERY_PKT_CNT];
struct hns_roce_cmq_desc desc_cqe = {0};
struct rdfx_query_cqe_cnt *resp_cqe =
(struct rdfx_query_cqe_cnt *)desc_cqe.data;
struct hns_roce_cmq_desc desc_cnp_rx = {0};
struct rdfx_query_cnp_rx_cnt *resp_cnp_rx =
(struct rdfx_query_cnp_rx_cnt *)desc_cnp_rx.data;
struct hns_roce_cmq_desc desc_cnp_tx = {0};
struct rdfx_query_cnp_tx_cnt *resp_cnp_tx =
(struct rdfx_query_cnp_tx_cnt *)desc_cnp_tx.data;
int status;
int i;
char *buff;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
for (i = 0; i < CMD_NUM_QUERY_PKT_CNT; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i],
HNS_ROCE_OPC_QUEYR_PKT_CNT, true);
if (i < (CMD_NUM_QUERY_PKT_CNT - 1))
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
resp_query[i] = (struct rdfx_query_pkt_cnt *)desc[i].data;
}
status = hns_roce_cmq_send(hr_dev, desc, CMD_NUM_QUERY_PKT_CNT);
if (status)
return status;
hns_roce_cmq_setup_basic_desc(&desc_cqe,
HNS_ROCE_OPC_QUEYR_CQE_CNT, true);
status = hns_roce_cmq_send(hr_dev, &desc_cqe, 1);
if (status)
return status;
if (hr_dev->pci_dev->revision == 0x21) {
hns_roce_cmq_setup_basic_desc(&desc_cnp_rx,
HNS_ROCE_OPC_QUEYR_CNP_RX_CNT, true);
status = hns_roce_cmq_send(hr_dev, &desc_cnp_rx, 1);
if (status)
return status;
hns_roce_cmq_setup_basic_desc(&desc_cnp_tx,
HNS_ROCE_OPC_QUEYR_CNP_TX_CNT, true);
status = hns_roce_cmq_send(hr_dev, &desc_cnp_tx, 1);
if (status)
return status;
}
*buff_size += sprintf(buff + *buff_size,
"RX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->rc_pkt_num, resp_query[1]->rc_pkt_num,
resp_query[2]->rc_pkt_num, resp_query[3]->rc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"RX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->uc_pkt_num, resp_query[1]->uc_pkt_num,
resp_query[2]->uc_pkt_num, resp_query[3]->uc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"RX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->ud_pkt_num, resp_query[1]->ud_pkt_num,
resp_query[2]->ud_pkt_num, resp_query[3]->ud_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"RX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->xrc_pkt_num, resp_query[1]->xrc_pkt_num,
resp_query[2]->xrc_pkt_num, resp_query[3]->xrc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"RX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->total_pkt_num, resp_query[1]->total_pkt_num,
resp_query[2]->total_pkt_num, resp_query[3]->total_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"RX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->error_pkt_num, resp_query[1]->error_pkt_num,
resp_query[2]->error_pkt_num, resp_query[3]->error_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->rc_pkt_num, resp_query[5]->rc_pkt_num,
resp_query[6]->rc_pkt_num, resp_query[7]->rc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->uc_pkt_num, resp_query[5]->uc_pkt_num,
resp_query[6]->uc_pkt_num, resp_query[7]->uc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->ud_pkt_num, resp_query[5]->ud_pkt_num,
resp_query[6]->ud_pkt_num, resp_query[7]->ud_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->xrc_pkt_num, resp_query[5]->xrc_pkt_num,
resp_query[6]->xrc_pkt_num, resp_query[7]->xrc_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->total_pkt_num, resp_query[5]->total_pkt_num,
resp_query[6]->total_pkt_num, resp_query[7]->total_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"TX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->error_pkt_num, resp_query[5]->error_pkt_num,
resp_query[6]->error_pkt_num, resp_query[7]->error_pkt_num);
*buff_size += sprintf(buff + *buff_size,
"CQE : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_cqe->port0_cqe, resp_cqe->port1_cqe,
resp_cqe->port2_cqe, resp_cqe->port3_cqe);
*buff_size += sprintf(buff + *buff_size,
"CNP RX : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_cnp_rx->port0_cnp_rx, resp_cnp_rx->port1_cnp_rx,
resp_cnp_rx->port2_cnp_rx, resp_cnp_rx->port3_cnp_rx);
*buff_size += sprintf(buff + *buff_size,
"CNP TX : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_cnp_tx->port0_cnp_tx, resp_cnp_tx->port1_cnp_tx,
resp_cnp_tx->port2_cnp_tx, resp_cnp_tx->port3_cnp_tx);
memcpy(buf, buff, *buff_size);
kfree(buff);
return status;
}
int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_eq_context *eq_context;
int *ceqc;
int ret;
int i = 0;
char *buff;
int ceqn = hr_dev->hr_stat.ceqn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
goto err_ceqc_buff;
}
eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL);
if (!eq_context) {
ret = -ENOMEM;
goto err_context;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, ceqn, 0,
HNS_ROCE_CMD_QUERY_CEQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(eq_context, mailbox->buf, sizeof(*eq_context));
else
goto err_mailbox;
ceqc = (int *)eq_context;
for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc,
"CEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
ceqn, *ceqc, *(ceqc + 1), *(ceqc + 2),
*(ceqc + 3), *(ceqc + 4), *(ceqc + 5),
*(ceqc + 6), *(ceqc + 7));
ceqc += 8;
}
memcpy(buf, buff, *desc);
err_mailbox:
kfree(eq_context);
err_context:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_ceqc_buff:
kfree(buff);
return ret;
}
int hns_roce_v2_query_cmd_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_cmq_desc desc_cnt;
struct hns_roce_query_mbdb_cnt *resp_cnt =
(struct hns_roce_query_mbdb_cnt *)desc_cnt.data;
struct hns_roce_cmq_desc desc_dfx;
int status;
char *buff;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
hns_roce_cmq_setup_basic_desc(&desc_cnt,
HNS_ROCE_OPC_QUEYR_MBDB_CNT, true);
status = hns_roce_cmq_send(hr_dev, &desc_cnt, 1);
if (status)
return status;
hns_roce_cmq_setup_basic_desc(&desc_dfx,
HNS_ROCE_OPC_QUEYR_MDB_DFX, true);
status = hns_roce_cmq_send(hr_dev, &desc_dfx, 1);
if (status)
return status;
*desc += sprintf(buff + *desc, "MB ISSUE CNT : 0x%08x\n",
resp_cnt->mailbox_issue_cnt);
*desc += sprintf(buff + *desc, "MB EXEC CNT : 0x%08x\n",
resp_cnt->mailbox_exe_cnt);
*desc += sprintf(buff + *desc, "DB ISSUE CNT : 0x%08x\n",
resp_cnt->doorbell_issue_cnt);
*desc += sprintf(buff + *desc, "DB EXEC CNT : 0x%08x\n",
resp_cnt->doorbell_exe_cnt);
*desc += sprintf(buff + *desc, "EQDB ISSUE CNT : 0x%08x\n",
resp_cnt->eq_doorbell_issue_cnt);
*desc += sprintf(buff + *desc, "EQDB EXEC CNT : 0x%08x\n",
resp_cnt->eq_doorbell_exe_cnt);
memcpy(buf, buff, *desc);
kfree(buff);
return status;
}
int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev,
u64 *bt0_ba, u64 *bt1_ba, int cqn,
struct hns_roce_v2_cq_context *cq_context)
{
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
HNS_ROCE_CMD_READ_CQC_BT0,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(bt0_ba, mailbox->buf, sizeof(*bt0_ba));
else {
pr_err("QUERY CQ bt0 cmd process error\n");
goto out;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
HNS_ROCE_CMD_READ_CQC_BT1,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(bt1_ba, mailbox->buf, sizeof(*bt1_ba));
else {
pr_err("QUERY CQ bt1 cmd process error\n");
goto out;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
HNS_ROCE_CMD_QUERY_CQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
memcpy(cq_context, mailbox->buf, sizeof(*cq_context));
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc)
{
struct hns_roce_v2_cq_context *cq_context;
u64 bt0_ba = 0;
u64 bt1_ba = 0;
int *cqc;
int i, ret;
int cqn = hr_dev->hr_stat.cqn;
cq_context = kzalloc(sizeof(*cq_context), GFP_KERNEL);
if (!cq_context)
return -ENOMEM;
ret = hns_roce_v2_query_cqc(hr_dev, &bt0_ba, &bt1_ba, cqn, cq_context);
if (ret)
goto out;
*desc += sprintf(buf + *desc, "CQC(0x%x) BT0: 0x%llx\n", cqn, bt0_ba);
*desc += sprintf(buf + *desc, "CQC(0x%x) BT1: 0x%llx\n", cqn, bt1_ba);
cqc = (int *)cq_context;
for (i = 0; i < (sizeof(*cq_context) >> 2); i += 8) {
*desc += sprintf(buf + *desc,
"CQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
cqn, *cqc, *(cqc + 1), *(cqc + 2),
*(cqc + 3), *(cqc + 4), *(cqc + 5),
*(cqc + 6), *(cqc + 7));
cqc += 8;
}
out:
kfree(cq_context);
return ret;
}
int hns_roce_v2_modify_eq(struct hns_roce_dev *hr_dev,
u16 eq_count, u16 eq_period, u16 type)
{
struct hns_roce_eq *eq = hr_dev->eq_table.eq;
struct hns_roce_eq_context *eqc;
struct hns_roce_eq_context *eqc_mask;
struct hns_roce_cmd_mailbox *mailbox;
unsigned int eq_cmd;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
eqc = mailbox->buf;
eqc_mask = (struct hns_roce_eq_context *)mailbox->buf + 1;
memset(eqc_mask, 0xff, sizeof(*eqc_mask));
if (type == HNS_ROCE_EQ_MAXCNT_MASK) {
roce_set_field(eqc->byte_12,
HNS_ROCE_EQC_MAX_CNT_M,
HNS_ROCE_EQC_MAX_CNT_S, eq_count);
roce_set_field(eqc_mask->byte_12,
HNS_ROCE_EQC_MAX_CNT_M,
HNS_ROCE_EQC_MAX_CNT_S, 0);
} else if (type == HNS_ROCE_EQ_PERIOD_MASK) {
roce_set_field(eqc->byte_12,
HNS_ROCE_EQC_PERIOD_M,
HNS_ROCE_EQC_PERIOD_S, eq_period);
roce_set_field(eqc_mask->byte_12,
HNS_ROCE_EQC_PERIOD_M,
HNS_ROCE_EQC_PERIOD_S, 0);
}
eq_cmd = HNS_ROCE_CMD_MODIFY_CEQC;
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 1,
eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
dev_err(hr_dev->dev, "MODIFY EQ Failed to cmd mailbox.\n");
return ret;
}
......@@ -43,6 +43,8 @@
#include "hns_roce_hem.h"
#include "hns_roce_hw_v1.h"
static int loopback;
static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
{
dseg->lkey = cpu_to_le32(sg->lkey);
......@@ -58,9 +60,14 @@ static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
rseg->len = 0;
}
#ifdef CONFIG_KERNEL_419
static int hns_roce_v1_post_send(struct ib_qp *ibqp,
const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
#else
static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
#endif
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
......@@ -347,9 +354,14 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
return ret;
}
#ifdef CONFIG_KERNEL_419
static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
#else
static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
#endif
{
int ret = 0;
int nreq = 0;
......@@ -999,8 +1011,12 @@ static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
{
struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
struct device *dev = &hr_dev->pdev->dev;
#ifdef CONFIG_KERNEL_419
struct ib_send_wr send_wr;
const struct ib_send_wr *bad_wr;
#else
struct ib_send_wr send_wr, *bad_wr;
#endif
int ret;
memset(&send_wr, 0, sizeof(send_wr));
......@@ -1398,8 +1414,8 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
if (!tptr_buf->buf)
return -ENOMEM;
hr_dev->tptr_dma_addr = tptr_buf->map;
hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
hr_dev->uar2_dma_addr = tptr_buf->map;
hr_dev->uar2_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
return 0;
}
......@@ -1480,15 +1496,22 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
}
fwnode = &dsaf_node->fwnode;
} else if (is_acpi_device_node(dev->fwnode)) {
#ifdef CONFIG_KERNEL_419
struct fwnode_reference_args args;
#else
struct acpi_reference_args args;
#endif
ret = acpi_node_get_property_reference(dev->fwnode,
"dsaf-handle", 0, &args);
if (ret) {
dev_err(dev, "could not find dsaf-handle\n");
return ret;
}
#ifdef CONFIG_KERNEL_419
fwnode = args.fwnode;
#else
fwnode = acpi_fwnode_handle(args.adev);
#endif
} else {
dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO;
......@@ -1776,9 +1799,15 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
return 0;
}
#ifdef CONFIG_KERNEL_419
static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
int gid_index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
#else
static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
int gid_index, union ib_gid *gid,
const struct ib_gid_attr *attr)
#endif
{
u32 *p = NULL;
u8 gid_idx = 0;
......@@ -4917,14 +4946,24 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
continue;
pdev = of_find_device_by_node(net_node);
} else if (is_acpi_device_node(dev->fwnode)) {
#ifdef CONFIG_KERNEL_419
struct fwnode_reference_args args;
#else
struct acpi_reference_args args;
struct fwnode_handle *fwnode;
#endif
ret = acpi_node_get_property_reference(dev->fwnode,
"eth-handle",
i, &args);
if (ret)
continue;
#ifdef CONFIG_KERNEL_419
pdev = hns_roce_find_pdev(args.fwnode);
#else
fwnode = acpi_fwnode_handle(args.adev);
pdev = hns_roce_find_pdev(fwnode);
#endif
} else {
dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO;
......@@ -4954,7 +4993,7 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* cmd issue mode: 0 is poll, 1 is event */
hr_dev->cmd_mod = 1;
hr_dev->loop_idc = 0;
hr_dev->loop_idc = loopback;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
......@@ -5067,3 +5106,5 @@ MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
module_param(loopback, int, 0444);
MODULE_PARM_DESC(loopback, "default: 0");
......@@ -30,9 +30,22 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "roce_k_compat.h"
#include <linux/acpi.h>
#include <linux/of_platform.h>
#include <linux/module.h>
#include <linux/sched.h>
#ifdef HAVE_LINUX_MM_H
#include <linux/mm.h>
#else
#include <linux/sched/mm.h>
#endif
#ifdef HAVE_LINUX_SCHED_H
#include <linux/sched.h>
#else
#include <linux/sched/task.h>
#endif
#include <rdma/ib_addr.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
......@@ -74,20 +87,33 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
}
#ifdef CONFIG_NEW_KERNEL
#ifdef CONFIG_KERNEL_419
static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
#else
static int hns_roce_add_gid(const union ib_gid *gid,
const struct ib_gid_attr *attr, void **context)
#endif
{
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
u8 port = attr->port_num - 1;
unsigned long flags;
int ret;
if (port >= hr_dev->caps.num_ports)
if (port >= hr_dev->caps.num_ports ||
attr->index > hr_dev->caps.gid_table_len[port]) {
dev_err(hr_dev->dev, "add gid failed. port - %d, index - %d\n",
port, attr->index);
return -EINVAL;
}
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
#ifdef CONFIG_KERNEL_419
ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
#else
ret = hr_dev->hw->set_gid(hr_dev, port, attr->index,
(union ib_gid *)gid, attr);
#endif
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
return ret;
......@@ -112,6 +138,55 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
return ret;
}
#else
static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(device);
u8 port = port_num - 1;
unsigned long flags;
int ret;
if (port >= hr_dev->caps.num_ports ||
index > hr_dev->caps.gid_table_len[port]) {
dev_err(hr_dev->dev, "add gid failed. port - %d, index - %d\n",
port, index);
return -EINVAL;
}
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
ret = hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid,
attr);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
return ret;
}
static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
unsigned int index, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(device);
struct ib_gid_attr zattr = { };
union ib_gid zgid = { {0} };
u8 port = port_num - 1;
unsigned long flags;
int ret;
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
ret = hr_dev->hw->set_gid(hr_dev, port, index, &zgid, &zattr);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
return ret;
}
#endif
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
unsigned long event)
......@@ -196,6 +271,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
memset(props, 0, sizeof(*props));
props->fw_ver = hr_dev->caps.fw_ver;
props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
props->max_mr_size = (u64)(~(0ULL));
props->page_size_cap = hr_dev->caps.page_size_cap;
......@@ -206,8 +282,14 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_qp_wr = hr_dev->caps.max_wqes;
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_RC_RNR_NAK_GEN;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
props->device_cap_flags |= IB_DEVICE_XRC;
#ifdef CONFIG_KERNEL_419
props->max_send_sge = hr_dev->caps.max_sq_sg;
props->max_recv_sge = hr_dev->caps.max_rq_sg;
#else
props->max_sge = min(hr_dev->caps.max_sq_sg, hr_dev->caps.max_rq_sg);
#endif
props->max_sge_rd = 1;
props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes;
......@@ -215,10 +297,26 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_pd = hr_dev->caps.num_pds;
props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
props->atomic_cap = IB_ATOMIC_NONE;
props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->max_pkeys = 1;
props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
props->max_srq = hr_dev->caps.max_srqs;
props->max_srq_wr = hr_dev->caps.max_srq_wrs;
props->max_srq_sge = hr_dev->caps.max_srq_sges;
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
props->max_mw = hr_dev->caps.num_mtpts;
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
IB_DEVICE_MEM_WINDOW_TYPE_2B;
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
return 0;
}
......@@ -292,6 +390,12 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
return IB_LINK_LAYER_ETHERNET;
}
static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
union ib_gid *gid)
{
return 0;
}
static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
u16 *pkey)
{
......@@ -434,12 +538,11 @@ static int hns_roce_mmap(struct ib_ucontext *context,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
} else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr &&
hr_dev->tptr_size) {
/* vm_pgoff: 1 -- TPTR */
} else if (vma->vm_pgoff == 1 && hr_dev->uar2_dma_addr &&
hr_dev->uar2_size) {
if (io_remap_pfn_range(vma, vma->vm_start,
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
hr_dev->tptr_size,
hr_dev->uar2_dma_addr >> PAGE_SHIFT,
hr_dev->uar2_size,
vma->vm_page_prot))
return -EAGAIN;
} else
......@@ -508,7 +611,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev;
strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
if (!strlen(ib_dev->name))
strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
ib_dev->owner = THIS_MODULE;
ib_dev->node_type = RDMA_NODE_IB_CA;
......@@ -532,11 +636,18 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
(1ULL << IB_USER_VERBS_CMD_CREATE_QP) |
(1ULL << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
(1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
(1ULL << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) |
(1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) |
(1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) |
(1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV) |
(1ULL << IB_USER_VERBS_CMD_CREATE_XSRQ);
#ifdef MODIFY_CQ_MASK
ib_dev->uverbs_ex_cmd_mask |=
(1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
#endif
/* HCA||device||port */
ib_dev->modify_device = hns_roce_modify_device;
ib_dev->query_device = hns_roce_query_device;
......@@ -544,6 +655,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->modify_port = hns_roce_modify_port;
ib_dev->get_link_layer = hns_roce_get_link_layer;
ib_dev->get_netdev = hns_roce_get_netdev;
ib_dev->query_gid = hns_roce_query_gid;
ib_dev->add_gid = hns_roce_add_gid;
ib_dev->del_gid = hns_roce_del_gid;
ib_dev->query_pkey = hns_roce_query_pkey;
......@@ -559,6 +671,12 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->create_ah = hns_roce_create_ah;
ib_dev->query_ah = hns_roce_query_ah;
ib_dev->destroy_ah = hns_roce_destroy_ah;
/* SRQ */
ib_dev->create_srq = hns_roce_create_srq;
ib_dev->modify_srq = hr_dev->hw->modify_srq;
ib_dev->query_srq = hr_dev->hw->query_srq;
ib_dev->destroy_srq = hns_roce_destroy_srq;
ib_dev->post_srq_recv = hr_dev->hw->post_srq_recv;
/* QP */
ib_dev->create_qp = hns_roce_create_qp;
......@@ -584,11 +702,36 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
}
/* MW */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
ib_dev->alloc_mw = hns_roce_alloc_mw;
ib_dev->dealloc_mw = hns_roce_dealloc_mw;
ib_dev->uverbs_cmd_mask |=
(1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
(1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
}
/* FRMR */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
ib_dev->alloc_mr = hns_roce_alloc_mr;
ib_dev->map_mr_sg = hns_roce_map_mr_sg;
}
/* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable;
ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
ib_dev->alloc_xrcd = hns_roce_ib_alloc_xrcd;
ib_dev->dealloc_xrcd = hns_roce_ib_dealloc_xrcd;
ib_dev->uverbs_cmd_mask |=
(1ULL << IB_USER_VERBS_CMD_OPEN_XRCD) |
(1ULL << IB_USER_VERBS_CMD_CLOSE_XRCD);
}
#ifdef CONFIG_NEW_KERNEL
ib_dev->driver_id = RDMA_DRIVER_HNS;
#endif
ret = ib_register_device(ib_dev, NULL);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
......@@ -689,8 +832,111 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
goto err_unmap_trrl;
}
if (hr_dev->caps.scc_ctx_entry_sz) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->qp_table.scc_ctx_table,
HEM_TYPE_SCC_CTX,
hr_dev->caps.scc_ctx_entry_sz,
hr_dev->caps.num_qps, 1);
if (ret) {
dev_err(dev,
"Failed to init SCC context memory, aborting.\n");
goto err_unmap_cq;
}
}
if (hr_dev->caps.qpc_timer_entry_sz) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->qpc_timer_table.table,
HEM_TYPE_QPC_TIMER,
hr_dev->caps.qpc_timer_entry_sz,
hr_dev->caps.num_qpc_timer, 1);
if (ret) {
dev_err(dev,
"Failed to init QPC timer memory, aborting.\n");
goto err_unmap_ctx;
}
}
if (hr_dev->caps.cqc_timer_entry_sz) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->cqc_timer_table.table,
HEM_TYPE_CQC_TIMER,
hr_dev->caps.cqc_timer_entry_sz,
hr_dev->caps.num_cqc_timer, 1);
if (ret) {
dev_err(dev,
"Failed to init CQC timer memory, aborting.\n");
goto err_unmap_qpc_timer;
}
}
if (hr_dev->caps.srqc_entry_sz) {
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
HEM_TYPE_SRQC,
hr_dev->caps.srqc_entry_sz,
hr_dev->caps.num_srqs, 1);
if (ret) {
dev_err(dev,
"Failed to init SRQ context memory, aborting.\n");
goto err_unmap_cqc_timer;
}
}
if (hr_dev->caps.num_srqwqe_segs) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->mr_table.mtt_srqwqe_table,
HEM_TYPE_SRQWQE,
hr_dev->caps.mtt_entry_sz,
hr_dev->caps.num_srqwqe_segs, 1);
if (ret) {
dev_err(dev,
"Failed to init MTT srqwqe memory, aborting.\n");
goto err_unmap_srq;
}
}
if (hr_dev->caps.num_idx_segs) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->mr_table.mtt_idx_table,
HEM_TYPE_IDX,
hr_dev->caps.idx_entry_sz,
hr_dev->caps.num_idx_segs, 1);
if (ret) {
dev_err(dev,
"Failed to init MTT idx memory, aborting.\n");
goto err_unmap_srqwqe;
}
}
return 0;
err_unmap_srqwqe:
if (hr_dev->caps.num_srqwqe_segs)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_srqwqe_table);
err_unmap_srq:
if (hr_dev->caps.srqc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
err_unmap_cqc_timer:
if (hr_dev->caps.cqc_timer_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->cqc_timer_table.table);
err_unmap_qpc_timer:
if (hr_dev->caps.qpc_timer_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qpc_timer_table.table);
err_unmap_ctx:
if (hr_dev->caps.scc_ctx_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.scc_ctx_table);
err_unmap_cq:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
err_unmap_trrl:
if (hr_dev->caps.trrl_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
......@@ -752,10 +998,18 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_uar_alloc_free;
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
ret = hns_roce_init_xrcd_table(hr_dev);
if (ret) {
dev_err(dev, "Failed to init protected domain table.\n");
goto err_pd_table_free;
}
}
ret = hns_roce_init_mr_table(hr_dev);
if (ret) {
dev_err(dev, "Failed to init memory region table.\n");
goto err_pd_table_free;
goto err_xrcd_table_free;
}
ret = hns_roce_init_cq_table(hr_dev);
......@@ -770,14 +1024,31 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_cq_table_free;
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ret = hns_roce_init_srq_table(hr_dev);
if (ret) {
dev_err(dev,
"Failed to init share receive queue table.\n");
goto err_qp_table_free;
}
}
return 0;
err_qp_table_free:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_cleanup_qp_table(hr_dev);
err_cq_table_free:
hns_roce_cleanup_cq_table(hr_dev);
err_mr_table_free:
hns_roce_cleanup_mr_table(hr_dev);
err_xrcd_table_free:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
hns_roce_cleanup_xrcd_table(hr_dev);
err_pd_table_free:
hns_roce_cleanup_pd_table(hr_dev);
......@@ -861,6 +1132,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
if (ret)
goto error_failed_register_device;
(void)hns_roce_register_sysfs(hr_dev);
return 0;
error_failed_register_device:
......@@ -900,7 +1173,6 @@ EXPORT_SYMBOL_GPL(hns_roce_init);
void hns_roce_exit(struct hns_roce_dev *hr_dev)
{
hns_roce_unregister_device(hr_dev);
if (hr_dev->hw->hw_exit)
hr_dev->hw->hw_exit(hr_dev);
hns_roce_cleanup_bitmap(hr_dev);
......
......@@ -30,6 +30,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "roce_k_compat.h"
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
......@@ -184,12 +185,27 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
struct hns_roce_buddy *buddy;
int ret;
if (mtt_type == MTT_TYPE_WQE) {
switch (mtt_type) {
case MTT_TYPE_WQE:
buddy = &mr_table->mtt_buddy;
table = &mr_table->mtt_table;
} else {
break;
case MTT_TYPE_CQE:
buddy = &mr_table->mtt_cqe_buddy;
table = &mr_table->mtt_cqe_table;
break;
case MTT_TYPE_SRQWQE:
buddy = &mr_table->mtt_srqwqe_buddy;
table = &mr_table->mtt_srqwqe_table;
break;
case MTT_TYPE_IDX:
buddy = &mr_table->mtt_idx_buddy;
table = &mr_table->mtt_idx_table;
break;
default:
dev_err(hr_dev->dev, "Unsupport MTT table type: %d\n",
mtt_type);
return -EINVAL;
}
ret = hns_roce_buddy_alloc(buddy, order, seg);
......@@ -229,7 +245,7 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
/* Allocate MTT entry */
ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
mtt->mtt_type);
if (ret == -1)
if (ret != 0)
return -ENOMEM;
return 0;
......@@ -242,18 +258,40 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
if (mtt->order < 0)
return;
if (mtt->mtt_type == MTT_TYPE_WQE) {
switch (mtt->mtt_type) {
case MTT_TYPE_WQE:
hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
} else {
break;
case MTT_TYPE_CQE:
hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
break;
case MTT_TYPE_SRQWQE:
hns_roce_buddy_free(&mr_table->mtt_srqwqe_buddy, mtt->first_seg,
mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_srqwqe_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
break;
case MTT_TYPE_IDX:
hns_roce_buddy_free(&mr_table->mtt_idx_buddy, mtt->first_seg,
mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_idx_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
break;
default:
dev_err(hr_dev->dev,
"Unsupport mtt type %d, clean mtt failed\n",
mtt->mtt_type);
break;
}
}
EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
......@@ -329,7 +367,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
u64 bt_idx;
u64 size;
mhop_num = hr_dev->caps.pbl_hop_num;
mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
......@@ -351,7 +389,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_dma_addr;
mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mr->pbl_hop_num = mhop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0;
......@@ -511,7 +549,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->key = hw_index_to_key(index); /* MR key */
if (size == ~0ull) {
mr->type = MR_TYPE_DMA;
mr->pbl_buf = NULL;
mr->pbl_dma_addr = 0;
/* PBL multi-hop addressing parameters */
......@@ -522,7 +559,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->pbl_l1_dma_addr = NULL;
mr->pbl_l0_dma_addr = 0;
} else {
mr->type = MR_TYPE_MR;
if (!hr_dev->caps.pbl_hop_num) {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
......@@ -548,9 +584,9 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
u32 mhop_num;
u64 bt_idx;
npages = ib_umem_page_count(mr->umem);
npages = mr->pbl_size;
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
mhop_num = hr_dev->caps.pbl_hop_num;
mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num;
if (mhop_num == HNS_ROCE_HOP_NUM_0)
return;
......@@ -636,7 +672,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
}
if (mr->size != ~0ULL) {
npages = ib_umem_page_count(mr->umem);
if (mr->type == MR_TYPE_MR)
npages = ib_umem_page_count(mr->umem);
if (!hr_dev->caps.pbl_hop_num)
dma_free_coherent(dev, (unsigned int)(npages * 8),
......@@ -674,7 +711,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
goto err_table;
}
ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
else
ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
if (ret) {
dev_err(dev, "Write mtpt fail!\n");
goto err_page;
......@@ -711,10 +751,26 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
u32 bt_page_size;
u32 i;
if (mtt->mtt_type == MTT_TYPE_WQE)
switch (mtt->mtt_type) {
case MTT_TYPE_WQE:
table = &hr_dev->mr_table.mtt_table;
bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
else
break;
case MTT_TYPE_CQE:
table = &hr_dev->mr_table.mtt_cqe_table;
bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
break;
case MTT_TYPE_SRQWQE:
table = &hr_dev->mr_table.mtt_srqwqe_table;
bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
break;
case MTT_TYPE_IDX:
table = &hr_dev->mr_table.mtt_idx_table;
bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
break;
default:
return -EINVAL;
}
/* All MTTs must fit in the same page */
if (start_index / (bt_page_size / sizeof(u64)) !=
......@@ -724,11 +780,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
return -EINVAL;
if (mtt->mtt_type == MTT_TYPE_WQE)
table = &hr_dev->mr_table.mtt_table;
else
table = &hr_dev->mr_table.mtt_cqe_table;
mtts = hns_roce_table_find(hr_dev, table,
mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
&dma_handle);
......@@ -757,10 +808,25 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
if (mtt->order < 0)
return -EINVAL;
if (mtt->mtt_type == MTT_TYPE_WQE)
switch (mtt->mtt_type) {
case MTT_TYPE_WQE:
bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
else
break;
case MTT_TYPE_CQE:
bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
break;
case MTT_TYPE_SRQWQE:
bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
break;
case MTT_TYPE_IDX:
bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
break;
default:
dev_err(hr_dev->dev,
"Unsupport mtt type %d, write mtt failed\n",
mtt->mtt_type);
return -EINVAL;
}
while (npages > 0) {
chunk = min_t(int, bt_page_size / sizeof(u64), npages);
......@@ -826,8 +892,26 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
if (ret)
goto err_buddy_cqe;
}
ret = hns_roce_buddy_init(&mr_table->mtt_srqwqe_buddy,
ilog2(hr_dev->caps.num_srqwqe_segs));
if (ret)
goto err_buddy_srqwqe;
ret = hns_roce_buddy_init(&mr_table->mtt_idx_buddy,
ilog2(hr_dev->caps.num_idx_segs));
if (ret)
goto err_buddy_idx;
return 0;
err_buddy_idx:
hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
err_buddy_srqwqe:
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
err_buddy_cqe:
hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
......@@ -840,6 +924,8 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
hns_roce_buddy_cleanup(&mr_table->mtt_idx_buddy);
hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
......@@ -855,6 +941,8 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
if (mr == NULL)
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_DMA;
/* Allocate memory region key */
ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
~0ULL, acc, 0, mr);
......@@ -893,8 +981,25 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
u32 bt_page_size;
u32 n;
order = mtt->mtt_type == MTT_TYPE_WQE ? hr_dev->caps.mtt_ba_pg_sz :
hr_dev->caps.cqe_ba_pg_sz;
switch (mtt->mtt_type) {
case MTT_TYPE_WQE:
order = hr_dev->caps.mtt_ba_pg_sz;
break;
case MTT_TYPE_CQE:
order = hr_dev->caps.cqe_ba_pg_sz;
break;
case MTT_TYPE_SRQWQE:
order = hr_dev->caps.srqwqe_ba_pg_sz;
break;
case MTT_TYPE_IDX:
order = hr_dev->caps.idx_ba_pg_sz;
break;
default:
dev_err(dev, "Unsupport mtt type %d, write mtt failed\n",
mtt->mtt_type);
return -EINVAL;
}
bt_page_size = 1 << (order + PAGE_SHIFT);
pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
......@@ -1017,20 +1122,22 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_umem;
}
} else {
int pbl_size = 1;
u64 pbl_size = 1;
bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8;
for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
pbl_size *= bt_size;
if (n > pbl_size) {
dev_err(dev,
" MR len %lld err. MR page num is limited to %d!\n",
" MR len %lld err. MR page num is limited to %lld!\n",
length, pbl_size);
ret = -EINVAL;
goto err_umem;
}
}
mr->type = MR_TYPE_MR;
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
access_flags, n, mr);
if (ret)
......@@ -1201,3 +1308,194 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr)
return ret;
}
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr;
u64 length;
u32 page_size;
int ret;
page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT);
length = max_num_sg * page_size;
if (mr_type != IB_MR_TYPE_MEM_REG)
return ERR_PTR(-EINVAL);
if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
dev_err(dev, "max_num_sg larger than %d\n",
HNS_ROCE_FRMR_MAX_PA);
return ERR_PTR(-EINVAL);
}
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_FRMR;
/* Allocate memory region key */
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length,
0, max_num_sg, mr);
if (ret)
goto err_free;
ret = hns_roce_mr_enable(hr_dev, mr);
if (ret)
goto err_free_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
mr->umem = NULL;
return &mr->ibmr;
err_free_mr:
hns_roce_mr_free(to_hr_dev(pd->device), mr);
err_free:
kfree(mr);
return ERR_PTR(ret);
}
static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
{
struct hns_roce_mr *mr = to_hr_mr(ibmr);
mr->pbl_buf[mr->npages++] = cpu_to_le64(addr);
return 0;
}
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset)
{
struct hns_roce_mr *mr = to_hr_mr(ibmr);
mr->npages = 0;
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
}
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mw *mw)
{
struct device *dev = hr_dev->dev;
int ret;
if (mw->enabled) {
ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey)
& (hr_dev->caps.num_mtpts - 1));
if (ret)
dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret);
hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
key_to_hw_index(mw->rkey));
}
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
key_to_hw_index(mw->rkey), BITMAP_NO_RR);
}
static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
struct hns_roce_mw *mw)
{
unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
int ret;
/* prepare HEM entry memory */
ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
if (ret)
return ret;
/* allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
goto err_table;
}
ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
if (ret) {
dev_err(dev, "MW write mtpt fail!\n");
goto err_page;
}
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
goto err_page;
}
mw->enabled = 1;
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return 0;
err_page:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_table:
hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
return ret;
}
struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device);
struct hns_roce_mw *mw;
unsigned long index = 0;
int ret;
mw = kmalloc(sizeof(*mw), GFP_KERNEL);
if (!mw)
return ERR_PTR(-ENOMEM);
/* Allocate a key for mw from bitmap */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
if (ret)
goto err_bitmap;
mw->rkey = hw_index_to_key(index);
mw->ibmw.rkey = mw->rkey;
mw->ibmw.type = type;
mw->pdn = to_hr_pd(ib_pd)->pdn;
mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
ret = hns_roce_mw_enable(hr_dev, mw);
if (ret)
goto err_mw;
return &mw->ibmw;
err_mw:
hns_roce_mw_free(hr_dev, mw);
err_bitmap:
kfree(mw);
return ERR_PTR(ret);
}
int hns_roce_dealloc_mw(struct ib_mw *ibmw)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
struct hns_roce_mw *mw = to_hr_mw(ibmw);
hns_roce_mw_free(hr_dev, mw);
kfree(mw);
return 0;
}
......@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "roce_k_compat.h"
#include <linux/platform_device.h>
#include <linux/pci.h>
......@@ -37,7 +38,7 @@
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
{
return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
}
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
......@@ -45,6 +46,18 @@ static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR);
}
static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev,
unsigned long *xrcdn)
{
return hns_roce_bitmap_alloc(&hr_dev->xrcd_bitmap, xrcdn);
}
static void hns_roce_xrcd_free(struct hns_roce_dev *hr_dev,
unsigned long xrcdn)
{
hns_roce_bitmap_free(&hr_dev->xrcd_bitmap, xrcdn, BITMAP_NO_RR);
}
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
{
return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds,
......@@ -57,6 +70,19 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
}
int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
{
return hns_roce_bitmap_init(&hr_dev->xrcd_bitmap,
hr_dev->caps.num_xrcds,
hr_dev->caps.num_xrcds - 1,
hr_dev->caps.reserved_xrcds, 0);
}
void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev)
{
hns_roce_bitmap_cleanup(&hr_dev->xrcd_bitmap);
}
struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
struct ib_ucontext *context,
struct ib_udata *udata)
......@@ -77,6 +103,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
return ERR_PTR(ret);
}
#ifdef CONFIG_NEW_KERNEL
if (context) {
struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn};
......@@ -88,6 +115,17 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
}
}
#else
if (context) {
if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
kfree(pd);
return ERR_PTR(-EFAULT);
}
}
#endif
return &pd->ibpd;
}
EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
......@@ -101,6 +139,65 @@ int hns_roce_dealloc_pd(struct ib_pd *pd)
}
EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd);
struct ib_xrcd *hns_roce_ib_alloc_xrcd(struct ib_device *ib_dev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct ib_cq_init_attr cq_attr = {};
struct hns_roce_xrcd *xrcd;
int ret;
if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
return ERR_PTR(-EINVAL);
xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
if (!xrcd)
return ERR_PTR(-ENOMEM);
ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
if (ret) {
kfree(xrcd);
dev_err(hr_dev->dev,
"[alloc_xrcd]hns_roce_xrcd_alloc failed!\n");
return ERR_PTR(ret);
}
xrcd->pd = ib_alloc_pd(ib_dev, 0);
if (IS_ERR_OR_NULL(xrcd->pd)) {
ret = PTR_ERR(xrcd->pd);
goto err_dealloc_xrcd;
}
cq_attr.cqe = 1;
xrcd->cq = ib_create_cq(ib_dev, NULL, NULL, xrcd, &cq_attr);
if (IS_ERR_OR_NULL(xrcd->cq)) {
ret = PTR_ERR(xrcd->cq);
goto err_dealloc_pd;
}
return &xrcd->ibxrcd;
err_dealloc_pd:
ib_dealloc_pd(xrcd->pd);
err_dealloc_xrcd:
hns_roce_xrcd_free(hr_dev, xrcd->xrcdn);
kfree(xrcd);
return ERR_PTR(ret);
}
int hns_roce_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
ib_destroy_cq(to_hr_xrcd(xrcd)->cq);
ib_dealloc_pd(to_hr_xrcd(xrcd)->pd);
hns_roce_xrcd_free(to_hr_dev(xrcd->device), to_hr_xrcd(xrcd)->xrcdn);
kfree(xrcd);
return 0;
}
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
struct resource *res;
......
......@@ -31,6 +31,7 @@
* SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
......@@ -115,10 +116,7 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
base) ?
-ENOMEM :
0;
return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
}
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
......@@ -208,13 +206,23 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
}
}
if (hr_dev->caps.scc_ctx_entry_sz) {
/* Alloc memory for SCC CTX */
ret = hns_roce_table_get(hr_dev, &qp_table->scc_ctx_table,
hr_qp->qpn);
if (ret) {
dev_err(dev, "SCC CTX table get failed\n");
goto err_put_trrl;
}
}
spin_lock_irq(&qp_table->lock);
ret = radix_tree_insert(&hr_dev->qp_table_tree,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock);
if (ret) {
dev_err(dev, "QPC radix_tree_insert failed\n");
goto err_put_trrl;
goto err_put_scc_ctx;
}
atomic_set(&hr_qp->refcount, 1);
......@@ -222,6 +230,11 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
return 0;
err_put_scc_ctx:
if (hr_dev->caps.scc_ctx_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->scc_ctx_table,
hr_qp->qpn);
err_put_trrl:
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
......@@ -257,6 +270,9 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
wait_for_completion(&hr_qp->free);
if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
if (hr_dev->caps.scc_ctx_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->scc_ctx_table,
hr_qp->qpn);
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table,
hr_qp->qpn);
......@@ -279,7 +295,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, int is_user, int has_srq,
struct ib_qp_cap *cap, int is_user, int has_rq,
struct hns_roce_qp *hr_qp)
{
struct device *dev = hr_dev->dev;
......@@ -293,14 +309,12 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
/* If srq exit, set zero for relative number of rq */
if (has_srq) {
if (cap->max_recv_wr) {
dev_dbg(dev, "srq no need config max_recv_wr\n");
return -EINVAL;
}
hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
/* If srq exist, set zero for relative number of rq */
if (!has_rq) {
hr_qp->rq.wqe_cnt = 0;
hr_qp->rq.max_gs = 0;
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
} else {
if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
......@@ -345,6 +359,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
u8 max_sq_stride = ilog2(roundup_sq_stride);
u32 page_size;
u32 max_cnt;
u32 ex_sge_num;
/* Sanity check SQ size before proceeding */
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
......@@ -372,7 +387,22 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
if (hr_qp->sq.max_gs > 2)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
if (hr_qp->ibqp.qp_type == IB_QPT_UD)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->sq.max_gs);
if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(hr_dev->dev,
"The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
return -EINVAL;
}
}
hr_qp->sge.sge_shift = 4;
ex_sge_num = hr_qp->sge.sge_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) {
......@@ -386,6 +416,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sge.sge_cnt =
max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num);
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
......@@ -394,7 +426,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), page_size);
hr_qp->sq.offset = 0;
if (hr_qp->sge.sge_cnt) {
if (ex_sge_num) {
hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
......@@ -465,6 +497,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sge.sge_shift = 4;
}
if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
return -EINVAL;
}
}
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
......@@ -472,6 +512,8 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
(u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, page_size);
......@@ -503,7 +545,8 @@ static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
{
if (attr->qp_type == IB_QPT_XRC_INI ||
attr->qp_type == IB_QPT_XRC_TGT || attr->srq)
attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
!attr->cap.max_recv_wr)
return 0;
return 1;
......@@ -538,13 +581,14 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
!!init_attr->srq, hr_qp);
hns_roce_qp_has_rq(init_attr), hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_rq_size failed\n");
goto err_out;
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr)) {
/* allocate recv inline buf */
hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
sizeof(struct hns_roce_rinl_wqe),
......@@ -651,6 +695,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
dev_err(dev, "rq record doorbell map failed!\n");
goto err_sq_dbmap;
}
/* indicate kernel supports rq record db */
resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
hr_qp->rdb_en = 1;
}
} else {
if (init_attr->create_flags &
......@@ -759,17 +807,20 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
else
hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
/* indicate kernel supports rq record db */
resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
if (ib_pd->uobject && (udata->outlen >= sizeof(resp))) {
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (ret)
goto err_qp;
}
hr_qp->rdb_en = 1;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
if (ret) {
dev_err(hr_dev->dev, "qp flow control init failure!");
goto err_qp;
}
}
hr_qp->event = hns_roce_ib_qp_event;
return 0;
......@@ -838,13 +889,27 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_dev *hr_dev = pd ? to_hr_dev(pd->device) :
to_hr_dev(init_attr->xrcd->device);
struct device *dev = hr_dev->dev;
struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp;
u16 xrcdn = 0;
int ret;
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
return ERR_PTR(-EINVAL);
pd = to_hr_xrcd(init_attr->xrcd)->pd;
xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
init_attr->send_cq = to_hr_xrcd(init_attr->xrcd)->cq;
case IB_QPT_XRC_INI:
if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
return ERR_PTR(-EINVAL);
init_attr->recv_cq = init_attr->send_cq;
case IB_QPT_UD:
case IB_QPT_UC:
case IB_QPT_RC: {
hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
if (!hr_qp)
......@@ -859,7 +924,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
}
hr_qp->ibqp.qp_num = hr_qp->qpn;
hr_qp->xrcdn = xrcdn;
break;
}
case IB_QPT_GSI: {
......@@ -916,6 +981,8 @@ int to_hr_qp_type(int qp_type)
transport_type = SERV_TYPE_UD;
else if (qp_type == IB_QPT_GSI)
transport_type = SERV_TYPE_UD;
else if (qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT)
transport_type = SERV_TYPE_XRC;
else
transport_type = -1;
......@@ -923,46 +990,42 @@ int to_hr_qp_type(int qp_type)
}
EXPORT_SYMBOL_GPL(to_hr_qp_type);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
static int check_mtu_validate(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_qp_attr *attr, int attr_mask)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state;
struct device *dev = hr_dev->dev;
int ret = -EINVAL;
int p;
enum ib_mtu active_mtu;
int p;
mutex_lock(&hr_qp->mutex);
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
cur_state = attr_mask & IB_QP_CUR_STATE ?
attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
new_state = attr_mask & IB_QP_STATE ?
attr->qp_state : cur_state;
if (ibqp->uobject &&
(attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
if (hr_qp->sdb_en == 1) {
hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else {
dev_warn(dev, "flush cqe is not supported in userspace!\n");
goto out;
}
if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
attr->path_mtu > hr_dev->caps.max_mtu) ||
attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
attr->path_mtu);
return -EINVAL;
}
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
IB_LINK_LAYER_ETHERNET)) {
dev_err(dev, "ib_modify_qp_is_ok failed\n");
goto out;
}
return 0;
}
static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = hr_dev->dev;
int ret = 0;
int p;
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
attr->port_num);
goto out;
return -EINVAL;
}
if (attr_mask & IB_QP_PKEY_INDEX) {
......@@ -970,40 +1033,72 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
attr->pkey_index);
goto out;
return -EINVAL;
}
}
if (attr_mask & IB_QP_PATH_MTU) {
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
attr->path_mtu > IB_MTU_4096) ||
(hr_dev->caps.max_mtu == IB_MTU_2048 &&
attr->path_mtu > IB_MTU_2048) ||
attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > active_mtu) {
dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
attr->path_mtu);
goto out;
}
ret = check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
if (ret)
return ret;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
attr->max_rd_atomic);
goto out;
return -EINVAL;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
attr->max_dest_rd_atomic);
return -EINVAL;
}
return ret;
}
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state;
struct device *dev = hr_dev->dev;
int ret = -EINVAL;
mutex_lock(&hr_qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ?
attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
new_state = attr_mask & IB_QP_STATE ?
attr->qp_state : cur_state;
if (ibqp->pd->uobject &&
(attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
if (hr_qp->sdb_en == 1) {
hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
if (hr_qp->rdb_en == 1)
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else {
dev_warn(dev, "flush cqe is not supported in userspace!\n");
goto out;
}
}
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
IB_LINK_LAYER_ETHERNET)) {
dev_err(dev, "ib_modify_qp_is_ok failed\n");
goto out;
}
ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
if (ret)
goto out;
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
if (hr_dev->caps.min_wqes) {
ret = -EPERM;
......@@ -1106,14 +1201,20 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
int reserved_from_top = 0;
int reserved_from_bot;
int ret;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
/* A port include two SQP, six port total 12 */
/* In hw v1, a port include two SQP, six ports total 12 */
if (hr_dev->caps.max_sq_sg <= 2)
reserved_from_bot = SQP_NUM;
else
reserved_from_bot = hr_dev->caps.reserved_qps;
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
hr_dev->caps.num_qps - 1, SQP_NUM,
hr_dev->caps.num_qps - 1, reserved_from_bot,
reserved_from_top);
if (ret) {
dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
......
/*
* Copyright (c) 2018 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "roce_k_compat.h"
#include <rdma/ib_umem.h>
#include <rdma/hns-abi.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct hns_roce_srq *srq;
rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree,
srqn & (hr_dev->caps.num_srqs - 1));
rcu_read_unlock();
if (srq) {
refcount_inc(&srq->refcount);
} else {
dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
return;
}
srq->event(srq, event_type);
if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
}
EXPORT_SYMBOL_GPL(hns_roce_srq_event);
static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
enum hns_roce_event event_type)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
struct ib_srq *ibsrq = &srq->ibsrq;
struct ib_event event;
if (ibsrq->event_handler) {
event.device = ibsrq->device;
event.element.srq = ibsrq;
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
event.event = IB_EVENT_SRQ_LIMIT_REACHED;
break;
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
event.event = IB_EVENT_SRQ_ERR;
break;
default:
dev_err(hr_dev->dev,
"hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
event_type, srq->srqn);
return;
}
ibsrq->event_handler(&event, ibsrq->srq_context);
}
}
static int hns_roce_sw2hw_srq(struct hns_roce_dev *dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
HNS_ROCE_CMD_SW2HW_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd,
struct hns_roce_mtt *hr_mtt, u64 db_rec_addr,
struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct hns_roce_cmd_mailbox *mailbox;
dma_addr_t dma_handle_wqe;
dma_addr_t dma_handle_idx;
u64 *mtts_wqe;
u64 *mtts_idx;
int ret;
/* Get the physical address of srq buf */
mtts_wqe = hns_roce_table_find(hr_dev,
&hr_dev->mr_table.mtt_srqwqe_table,
srq->mtt.first_seg,
&dma_handle_wqe);
if (!mtts_wqe) {
dev_err(hr_dev->dev,
"SRQ alloc.Failed to find srq buf addr.\n");
return -EINVAL;
}
/* Get physical address of idx que buf */
mtts_idx = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_idx_table,
srq->idx_que.mtt.first_seg,
&dma_handle_idx);
if (!mtts_idx) {
dev_err(hr_dev->dev,
"SRQ alloc.Failed to find idx que buf addr.\n");
return -EINVAL;
}
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
if (ret == -1) {
dev_err(hr_dev->dev, "SRQ alloc.Failed to alloc index.\n");
return -ENOMEM;
}
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
if (ret)
goto err_out;
spin_lock_irq(&srq_table->lock);
ret = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
spin_unlock_irq(&srq_table->lock);
if (ret)
goto err_put;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
goto err_radix;
}
hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
mtts_wqe, mtts_idx, dma_handle_wqe,
dma_handle_idx);
ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
goto err_radix;
refcount_set(&srq->refcount, 1);
init_completion(&srq->free);
return ret;
err_radix:
spin_lock_irq(&srq_table->lock);
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
err_put:
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
err_out:
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
return ret;
}
void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
int ret;
ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
if (ret)
dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
ret, srq->srqn);
spin_lock_irq(&srq_table->lock);
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
}
static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
u32 page_shift)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que;
u32 bitmap_num;
int i;
idx_que->entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, 8 * sizeof(u64));
idx_que->bitmap = kcalloc(1, bitmap_num / 8, GFP_KERNEL);
if (!idx_que->bitmap)
return -ENOMEM;
bitmap_num = bitmap_num / (8 * sizeof(u64));
idx_que->buf_size = srq->max * idx_que->entry_sz;
if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
&idx_que->idx_buf, page_shift)) {
kfree(idx_que->bitmap);
return -ENOMEM;
}
for (i = 0; i < bitmap_num; i++)
idx_que->bitmap[i] = ~(0UL);
return 0;
}
struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_srq *srq;
int srq_desc_size;
int srq_buf_size;
u32 page_shift;
int ret = 0;
u32 npages;
u16 xrcdn;
u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */
if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
return ERR_PTR(-EINVAL);
srq = kzalloc(sizeof(*srq), GFP_KERNEL);
if (!srq)
return ERR_PTR(-ENOMEM);
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
srq->max_gs = srq_init_attr->attr.max_sge;
srq_desc_size = max(16, 16 * srq->max_gs);
srq->wqe_shift = ilog2(srq_desc_size);
srq_buf_size = srq->max * srq_desc_size;
srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
if (udata) {
struct hns_roce_ib_create_srq ucmd;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
ret = -EFAULT;
goto err_srq;
}
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
srq_buf_size, 0, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
}
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
if (hr_dev->caps.srqwqe_buf_pg_sz) {
npages = (ib_umem_page_count(srq->umem) +
(1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
(1 << hr_dev->caps.srqwqe_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages,
page_shift,
&srq->mtt);
} else
ret = hns_roce_mtt_init(hr_dev,
ib_umem_page_count(srq->umem),
srq->umem->page_shift,
&srq->mtt);
if (ret)
goto err_buf;
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
if (ret)
goto err_srq_mtt;
/* config index queue BA */
srq->idx_que.umem = ib_umem_get(pd->uobject->context,
ucmd.que_addr,
srq->idx_que.buf_size, 0, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev,
"ib_umem_get error for index queue\n");
goto err_srq_mtt;
}
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
if (hr_dev->caps.idx_buf_pg_sz) {
npages = (ib_umem_page_count(srq->idx_que.umem) +
(1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
(1 << hr_dev->caps.idx_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages,
page_shift, &srq->idx_que.mtt);
} else {
ret = hns_roce_mtt_init(hr_dev,
ib_umem_page_count(srq->idx_que.umem),
srq->idx_que.umem->page_shift,
&srq->idx_que.mtt);
}
if (ret) {
dev_err(hr_dev->dev,
"hns_roce_mtt_init error for idx que\n");
goto err_idx_mtt;
}
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
srq->idx_que.umem);
if (ret) {
dev_err(hr_dev->dev,
"hns_roce_ib_umem_write_mtt error for idx que\n");
goto err_idx_buf;
}
} else {
u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
(1 << page_shift) * 2,
&srq->buf, page_shift)) {
ret = -ENOMEM;
goto err_buf;
}
srq->head = 0;
srq->tail = srq->max - 1;
srq->wqe_ctr = 0;
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages,
srq->buf.page_shift, &srq->mtt);
if (ret)
goto err_buf;
ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
if (ret)
goto err_srq_mtt;
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_create_idx_que(pd, srq, page_shift);
if (ret) {
dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
ret);
goto err_srq_mtt;
}
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
/* Init mtt table for idx_que */
ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
srq->idx_que.idx_buf.page_shift,
&srq->idx_que.mtt);
if (ret)
goto err_create_idx;
/* Write buffer address into the mtt table */
ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
&srq->idx_que.idx_buf);
if (ret)
goto err_idx_buf;
srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_idx_buf;
}
}
cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
xrcdn = (srq_init_attr->srq_type == IB_SRQT_XRC) ?
to_hr_xrcd(srq_init_attr->ext.xrc.xrcd)->xrcdn : 0;
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, xrcdn,
&srq->mtt, 0, srq);
if (ret)
goto err_wrid;
srq->event = hns_roce_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->srqn;
if (pd->uobject) {
if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
ret = -EFAULT;
goto err_wrid;
}
}
return &srq->ibsrq;
err_wrid:
kvfree(srq->wrid);
err_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
err_idx_mtt:
if (udata)
ib_umem_release(srq->idx_que.umem);
err_create_idx:
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
&srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);
err_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
err_buf:
if (udata)
ib_umem_release(srq->umem);
else
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
err_srq:
kfree(srq);
return ERR_PTR(ret);
}
int hns_roce_destroy_srq(struct ib_srq *ibsrq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
hns_roce_srq_free(hr_dev, srq);
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
if (ibsrq->uobject) {
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
ib_umem_release(srq->idx_que.umem);
ib_umem_release(srq->umem);
} else {
kvfree(srq->wrid);
hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
&srq->buf);
}
kfree(srq);
return 0;
}
struct hns_roce_srq *hns_roce_srq_lookup(struct hns_roce_dev *hr_dev, u32 srqn)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct hns_roce_srq *srq;
rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree,
srqn & (hr_dev->caps.max_srqs - 1));
rcu_read_unlock();
return srq;
}
EXPORT_SYMBOL_GPL(hns_roce_srq_lookup);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
spin_lock_init(&srq_table->lock);
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
hr_dev->caps.num_srqs - 1,
hr_dev->caps.reserved_srqs, 0);
}
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
{
hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
}
此差异已折叠。
#ifndef _ROCE_K_COMPAT_H
#define _ROCE_K_COMPAT_H
#ifndef LINUX_VERSION_CODE
#include <linux/version.h>
#else
#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#endif
#ifndef PCI_VENDOR_ID_HUAWEI
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
/**
* OFED didn't provide a version code
* !!!!! This is a TEMPORARILY solution !!!!!
*/
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0))
typedef unsigned long long __u64;
#if defined(__GNUC__)
typedef __u64 uint64_t;
#endif
typedef uint64_t u64;
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
#undef pci_irq_vector
#define pci_irq_vector _kc_pci_irq_vector
#ifdef CONFIG_PCI_MSI
#include <linux/pci.h>
#include <linux/msi.h>
/**
* pci_irq_vector - return Linux IRQ number of a device vector
* @dev: PCI device to operate on
* @nr: device-relative interrupt vector index (0-based).
*/
static inline int _kc_pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
if (dev->msix_enabled) {
struct msi_desc *entry;
int i = 0;
for_each_pci_msi_entry(entry, dev) {
if (i == nr)
return entry->irq;
i++;
}
WARN_ON_ONCE(1);
return -EINVAL;
}
if (dev->msi_enabled) {
struct msi_desc *entry = first_pci_msi_entry(dev);
if (WARN_ON_ONCE(nr >= entry->nvec_used))
return -EINVAL;
} else {
if (WARN_ON_ONCE(nr > 0))
return -EINVAL;
}
return dev->irq + nr;
}
#else
static inline int _kc_pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
if (WARN_ON_ONCE(nr > 0))
return -EINVAL;
return dev->irq;
}
#endif
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
#ifndef HAVE_LINUX_MM_H
#define HAVE_LINUX_MM_H
#endif
#ifndef HAVE_LINUX_SCHED_H
#define HAVE_LINUX_SCHED_H
#endif
/**
* struct refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
* The counter saturates at UINT_MAX and will not move once
* there. This avoids wrapping the counter and causing 'spurious'
* use-after-free bugs.
*/
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;
/**
* refcount_set - set a refcount's value
* @r: the refcount
* @n: value to which the refcount will be set
*/
#undef refcount_set
#define refcount_set _kc_refcount_set
static inline void _kc_refcount_set(refcount_t *r, unsigned int n)
{
atomic_set(&r->refs, n);
}
#undef refcount_dec_and_test
#define refcount_dec_and_test _kc_refcount_dec_and_test
static inline __must_check bool _kc_refcount_dec_and_test(refcount_t *r)
{
return atomic_dec_and_test(&r->refs);
}
/*
* Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
*/
static inline bool refcount_inc_not_zero(refcount_t *r)
{
unsigned int old, new, val = atomic_read(&r->refs);
for (;;) {
new = val + 1;
if (!val)
return false;
if (unlikely(!new))
return true;
old = atomic_cmpxchg_relaxed(&r->refs, val, new);
if (old == val)
break;
val = old;
}
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
return true;
}
/*
* Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
*
* Provides no memory ordering, it is assumed the caller already has a
* reference on the object, will WARN when this is not so.
*/
static inline void refcount_inc(refcount_t *r)
{
WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
}
/*
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
* when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
static inline void refcount_dec(refcount_t *r)
{
WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
/*
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
* success thereof.
*
* Like all decrement operations, it provides release memory order and provides
* a control dependency.
*
* It can be used like a try-delete operator; this explicit case is provided
* and not cmpxchg in generic, because that would allow implementing unsafe
* operations.
*/
static inline bool refcount_dec_if_one(refcount_t *r)
{
return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
}
/**
* Here we call kmalloc_array for mem allocate
* Kernel optimize from 4.11
*/
#undef kvmalloc_array
#define kvmalloc_array _kc_kvmalloc_array
static inline void *_kc_kvmalloc_array(size_t n, size_t size, gfp_t flags)
{
return kmalloc_array(n, size, flags);
}
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
#undef addrconf_addr_eui48_base
#define addrconf_addr_eui48_base _kc_addrconf_addr_eui48_base
static inline void _kc_addrconf_addr_eui48_base(u8 *eui,
const char *const addr)
{
memcpy(eui, addr, 3);
eui[3] = 0xFF;
eui[4] = 0xFE;
memcpy(eui + 5, addr + 3, 3);
}
#undef addrconf_addr_eui48
#define addrconf_addr_eui48 _kc_addrconf_addr_eui48
static inline void _kc_addrconf_addr_eui48(u8 *eui, const char *const addr)
{
addrconf_addr_eui48_base(eui, addr);
eui[0] ^= 2;
}
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0))
#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
/*
* If one of a or b is a compile-time constant, this avoids a division.
*/
#define __unsigned_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a * __b; \
__builtin_constant_p(__b) ? \
__b > 0 && __a > type_max(typeof(__a)) / __b : \
__a > 0 && __b > type_max(typeof(__b)) / __a; \
})
/*
* Signed multiplication is rather hard. gcc always follows C99, so
* division is truncated towards 0. This means that we can write the
* overflow check like this:
*
* (a > 0 && (b > MAX/a || b < MIN/a)) ||
* (a < -1 && (b > MIN/a || b < MAX/a) ||
* (a == -1 && b == MIN)
*
* The redundant casts of -1 are to silence an annoying -Wtype-limits
* (included in -Wextra) warning: When the type is u8 or u16, the
* __b_c_e in check_mul_overflow obviously selects
* __unsigned_mul_overflow, but unfortunately gcc still parses this
* code and warns about the limited range of __b.
*/
#define __signed_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
typeof(a) __tmax = type_max(typeof(a)); \
typeof(a) __tmin = type_min(typeof(a)); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a * (u64)__b; \
(__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
(__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
(__b == (typeof(__b))-1 && __a == __tmin); \
})
#define check_mul_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_mul_overflow(a, b, d), \
__unsigned_mul_overflow(a, b, d))
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0))
#define __must_check __attribute__((warn_unused_result))
typedef unsigned long __kernel_ulong_t;
typedef __kernel_ulong_t __kernel_size_t;
typedef __kernel_size_t size_t;
#define SIZE_MAX (~(size_t)0)
#endif
/**
* array_size() - Calculate size of 2-dimensional array.
*
* @a: dimension one
* @b: dimension two
*
* Calculates size of 2-dimensional array: @a * @b.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
static inline __must_check size_t array_size(size_t a, size_t b)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
return bytes;
}
#endif
#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 16, 0))
#define CONFIG_NEW_KERNEL
#define MODIFY_CQ_MASK
#else
#endif
#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 18, 0))
#define CONFIG_KERNEL_419
#endif
#endif /*_ROCE_K_COMPAT_H*/
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册