提交 40bc5e5e 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

driver: roce: update roce driver from driver team

driver inclusion
category: feature

-----------------------------------------

Based on 15c940a5062b17c9c2d30700194f1bd9c3bde72b
("RDMA/hns: Fix coding style related issues")
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 4645e3f4
...@@ -113,16 +113,15 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, ...@@ -113,16 +113,15 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
unsigned long obj, int cnt, unsigned long obj, int cnt,
int rr) int rr)
{ {
unsigned long base = obj & (bitmap->max + bitmap->reserved_top - 1);
int i; int i;
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock); spin_lock(&bitmap->lock);
for (i = 0; i < cnt; i++) for (i = 0; i < cnt; i++)
clear_bit(obj + i, bitmap->table); clear_bit(base + i, bitmap->table);
if (!rr) if (!rr)
bitmap->last = min(bitmap->last, obj); bitmap->last = min(bitmap->last, base);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask; & bitmap->mask;
spin_unlock(&bitmap->lock); spin_unlock(&bitmap->lock);
...@@ -186,7 +185,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, ...@@ -186,7 +185,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
u32 page_size = 1 << page_shift; u32 page_size = 1 << page_shift;
u32 order; u32 order;
/* SQ/RQ buf lease than one page, SQ + RQ = 8K */ /* buf for SQ/RQ both at lease one page, SQ + RQ is 2 pages */
if (size <= max_direct) { if (size <= max_direct) {
buf->nbufs = 1; buf->nbufs = 1;
/* Npages calculated by page_size */ /* Npages calculated by page_size */
......
...@@ -162,7 +162,7 @@ static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -162,7 +162,7 @@ static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier, u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op, unsigned long timeout) u8 op_modifier, u16 op, unsigned long timeout)
{ {
int ret = 0; int ret;
down(&hr_dev->cmd.event_sem); down(&hr_dev->cmd.event_sem);
ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
......
...@@ -124,7 +124,8 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, ...@@ -124,7 +124,8 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
/* Get CQC memory HEM(Hardware Entry Memory) table */ /* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) { if (ret) {
dev_err(dev, "CQ alloc.Failed to get context mem.\n"); dev_err(dev, "CQ(0x%lx) alloc.Failed to get context mem(%d).\n",
hr_cq->cqn, ret);
goto err_out; goto err_out;
} }
...@@ -134,7 +135,8 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, ...@@ -134,7 +135,8 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq); ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq);
spin_unlock_irq(&cq_table->lock); spin_unlock_irq(&cq_table->lock);
if (ret) { if (ret) {
dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n"); dev_err(dev, "CQ(0x%lx) alloc.Failed to radix_tree_insert.\n",
hr_cq->cqn);
goto err_put; goto err_put;
} }
...@@ -152,7 +154,8 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, ...@@ -152,7 +154,8 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn); ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) { if (ret) {
dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n"); dev_err(dev, "CQ(0x%lx) alloc.Failed to cmd mailbox(%d).\n",
hr_cq->cqn, ret);
goto err_radix; goto err_radix;
} }
...@@ -246,12 +249,16 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, ...@@ -246,12 +249,16 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
(*umem)->page_shift, (*umem)->page_shift,
&buf->hr_mtt); &buf->hr_mtt);
} }
if (ret) if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for create cq\n");
goto err_buf; goto err_buf;
}
ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem); ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
if (ret) if (ret) {
dev_err(hr_dev->dev, "hns_roce_ib_umem_write_mtt error for create cq\n");
goto err_mtt; goto err_mtt;
}
return 0; return 0;
...@@ -282,12 +289,16 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev, ...@@ -282,12 +289,16 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages, ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
buf->hr_buf.page_shift, &buf->hr_mtt); buf->hr_buf.page_shift, &buf->hr_mtt);
if (ret) if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for kernel create cq\n");
goto err_buf; goto err_buf;
}
ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf); ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
if (ret) if (ret) {
dev_err(hr_dev->dev, "hns_roce_ib_umem_write_mtt error for kernel create cq\n");
goto err_mtt; goto err_mtt;
}
return 0; return 0;
...@@ -365,10 +376,19 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev, ...@@ -365,10 +376,19 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret; int ret;
hr_cq->workq =
create_singlethread_workqueue("hns_roce_cq_workqueue");
if (!hr_cq->workq) {
dev_err(dev, "Failed to create cq workqueue!\n");
return -ENOMEM;
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
if (ret) if (ret) {
return ret; dev_err(dev, "Failed to alloc db for cq.\n");
goto err_workq;
}
hr_cq->set_ci_db = hr_cq->db.db_record; hr_cq->set_ci_db = hr_cq->db.db_record;
*hr_cq->set_ci_db = 0; *hr_cq->set_ci_db = 0;
...@@ -378,7 +398,7 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev, ...@@ -378,7 +398,7 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
/* Init mmt table and write buff address to mtt table */ /* Init mmt table and write buff address to mtt table */
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries); ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
if (ret) { if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n"); dev_err(dev, "Failed to alloc cq buf.\n");
goto err_db; goto err_db;
} }
...@@ -388,6 +408,9 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev, ...@@ -388,6 +408,9 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
return 0; return 0;
err_workq:
destroy_workqueue(hr_cq->workq);
err_db: err_db:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db); hns_roce_free_db(hr_dev, &hr_cq->db);
...@@ -457,13 +480,13 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -457,13 +480,13 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
ret = create_user_cq(hr_dev, hr_cq, context, udata, &resp, uar, ret = create_user_cq(hr_dev, hr_cq, context, udata, &resp, uar,
cq_entries); cq_entries);
if (ret) { if (ret) {
dev_err(dev, "Create cq fail in user mode!\n"); dev_err(dev, "Failed to create cq for user mode!\n");
goto err_cq; goto err_cq;
} }
} else { } else {
ret = create_kernel_cq(hr_dev, hr_cq, uar, cq_entries); ret = create_kernel_cq(hr_dev, hr_cq, uar, cq_entries);
if (ret) { if (ret) {
dev_err(dev, "Create cq fail in user mode!\n"); dev_err(dev, "Failed to create cq for kernel mode!\n");
goto err_cq; goto err_cq;
} }
} }
...@@ -545,6 +568,9 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) ...@@ -545,6 +568,9 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
ib_cq->cqe); ib_cq->cqe);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db); hns_roce_free_db(hr_dev, &hr_cq->db);
flush_workqueue(hr_cq->workq);
destroy_workqueue(hr_cq->workq);
} }
kfree(hr_cq); kfree(hr_cq);
......
...@@ -79,7 +79,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir( ...@@ -79,7 +79,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
if (!pgdir) if (!pgdir)
return NULL; return NULL;
bitmap_fill(pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2); bitmap_fill(pgdir->order1,
HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
pgdir->bits[0] = pgdir->order0; pgdir->bits[0] = pgdir->order0;
pgdir->bits[1] = pgdir->order1; pgdir->bits[1] = pgdir->order1;
pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE, pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
...@@ -117,7 +118,7 @@ static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir, ...@@ -117,7 +118,7 @@ static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
db->u.pgdir = pgdir; db->u.pgdir = pgdir;
db->index = i; db->index = i;
db->db_record = pgdir->page + db->index; db->db_record = pgdir->page + db->index;
db->dma = pgdir->db_dma + db->index * 4; db->dma = pgdir->db_dma + db->index * HNS_ROCE_DB_UNIT_SIZE;
db->order = order; db->order = order;
return 0; return 0;
...@@ -171,7 +172,8 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db) ...@@ -171,7 +172,8 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
i >>= o; i >>= o;
set_bit(i, db->u.pgdir->bits[o]); set_bit(i, db->u.pgdir->bits[o]);
if (bitmap_full(db->u.pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2)) { if (bitmap_full(db->u.pgdir->order1,
HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) {
dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page, dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
db->u.pgdir->db_dma); db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list); list_del(&db->u.pgdir->list);
......
...@@ -37,9 +37,12 @@ ...@@ -37,9 +37,12 @@
#define DRV_NAME "hns_roce" #define DRV_NAME "hns_roce"
/* hip08 is a pci device, it includes two version according pci version id */
#define PCI_REVISION_ID_HIP08_A 0x20
#define PCI_REVISION_ID_HIP08_B 0x21
#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6') #define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
#define MAC_ADDR_OCTET_NUM 6
#define HNS_ROCE_MAX_MSG_LEN 0x80000000 #define HNS_ROCE_MAX_MSG_LEN 0x80000000
#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b)) #define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
...@@ -48,6 +51,10 @@ ...@@ -48,6 +51,10 @@
#define HNS_ROCE_BA_SIZE (32 * 4096) #define HNS_ROCE_BA_SIZE (32 * 4096)
#define BA_BYTE_LEN 8
#define BITS_PER_BYTE 8
/* Hardware specification only for v1 engine */ /* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40 #define HNS_ROCE_MIN_CQE_NUM 0x40
#define HNS_ROCE_MIN_WQE_NUM 0x20 #define HNS_ROCE_MIN_WQE_NUM 0x20
...@@ -55,6 +62,8 @@ ...@@ -55,6 +62,8 @@
/* Hardware specification only for v1 engine */ /* Hardware specification only for v1 engine */
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7 #define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000 #define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
#define HNS_ROCE_MAX_SGE_NUM 2
#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20 #define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \ #define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
...@@ -64,6 +73,9 @@ ...@@ -64,6 +73,9 @@
#define HNS_ROCE_MAX_IRQ_NUM 128 #define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
#define HNS_ROCE_SGE_SHIFT 4
#define EQ_ENABLE 1 #define EQ_ENABLE 1
#define EQ_DISABLE 0 #define EQ_DISABLE 0
...@@ -82,6 +94,8 @@ ...@@ -82,6 +94,8 @@
#define HNS_ROCE_MAX_GID_NUM 16 #define HNS_ROCE_MAX_GID_NUM 16
#define HNS_ROCE_GID_SIZE 16 #define HNS_ROCE_GID_SIZE 16
#define HNS_ROCE_SGE_SIZE 16
#define HNS_ROCE_HOP_NUM_0 0xff #define HNS_ROCE_HOP_NUM_0 0xff
#define BITMAP_NO_RR 0 #define BITMAP_NO_RR 0
...@@ -114,6 +128,8 @@ ...@@ -114,6 +128,8 @@
#define PAGES_SHIFT_24 24 #define PAGES_SHIFT_24 24
#define PAGES_SHIFT_32 32 #define PAGES_SHIFT_32 32
#define HNS_ROCE_PCI_BAR_NR 2
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define HNS_ROCE_FRMR_MAX_PA 512 #define HNS_ROCE_FRMR_MAX_PA 512
...@@ -139,6 +155,12 @@ enum hns_roce_qp_state { ...@@ -139,6 +155,12 @@ enum hns_roce_qp_state {
HNS_ROCE_QP_NUM_STATE, HNS_ROCE_QP_NUM_STATE,
}; };
enum queue_type {
HNS_ROCE_SQ,
HNS_ROCE_RQ,
HNS_ROCE_CQ,
};
enum hns_roce_event { enum hns_roce_event {
HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01, HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01,
HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02, HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02,
...@@ -220,6 +242,9 @@ enum hns_roce_mtt_type { ...@@ -220,6 +242,9 @@ enum hns_roce_mtt_type {
MTT_TYPE_IDX MTT_TYPE_IDX
}; };
#define HNS_ROCE_DB_TYPE_COUNT 2
#define HNS_ROCE_DB_UNIT_SIZE 4
enum { enum {
HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4 HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
}; };
...@@ -329,7 +354,7 @@ struct hns_roce_hem_table { ...@@ -329,7 +354,7 @@ struct hns_roce_hem_table {
unsigned long num_hem; unsigned long num_hem;
/* HEM entry record obj total num */ /* HEM entry record obj total num */
unsigned long num_obj; unsigned long num_obj;
/*Single obj size */ /* Single obj size */
unsigned long obj_size; unsigned long obj_size;
unsigned long table_chunk_size; unsigned long table_chunk_size;
int lowmem; int lowmem;
...@@ -368,25 +393,25 @@ struct hns_roce_mr { ...@@ -368,25 +393,25 @@ struct hns_roce_mr {
u64 size; /* Address range of MR */ u64 size; /* Address range of MR */
u32 key; /* Key of MR */ u32 key; /* Key of MR */
u32 pd; /* PD num of MR */ u32 pd; /* PD num of MR */
u32 access;/* Access permission of MR */ u32 access; /* Access permission of MR */
u32 npages; u32 npages;
int enabled; /* MR's active status */ int enabled; /* MR's active status */
int type; /* MR's register type */ int type; /* MR's register type */
u64 *pbl_buf;/* MR's PBL space */ u64 *pbl_buf; /* MR's PBL space */
dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
u32 pbl_size;/* PA number in the PBL */ u32 pbl_size; /* PA number in the PBL */
u64 pbl_ba;/* page table address */ u64 pbl_ba; /* page table address */
u32 l0_chunk_last_num;/* L0 last number */ u32 l0_chunk_last_num; /* L0 last number */
u32 l1_chunk_last_num;/* L1 last number */ u32 l1_chunk_last_num; /* L1 last number */
u64 **pbl_bt_l2;/* PBL BT L2 */ u64 **pbl_bt_l2; /* PBL BT L2 */
u64 **pbl_bt_l1;/* PBL BT L1 */ u64 **pbl_bt_l1; /* PBL BT L1 */
u64 *pbl_bt_l0;/* PBL BT L0 */ u64 *pbl_bt_l0; /* PBL BT L0 */
dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */ dma_addr_t *pbl_l2_dma_addr; /* PBL BT L2 dma addr */
dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */ dma_addr_t *pbl_l1_dma_addr; /* PBL BT L1 dma addr */
dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */ dma_addr_t pbl_l0_dma_addr; /* PBL BT L0 dma addr */
u32 pbl_ba_pg_sz;/* BT chunk page size */ u32 pbl_ba_pg_sz; /* BT chunk page size */
u32 pbl_buf_pg_sz;/* buf chunk page size */ u32 pbl_buf_pg_sz; /* buf chunk page size */
u32 pbl_hop_num;/* multi-hop number */ u32 pbl_hop_num; /* multi-hop number */
}; };
struct hns_roce_mr_table { struct hns_roce_mr_table {
...@@ -409,16 +434,17 @@ struct hns_roce_wq { ...@@ -409,16 +434,17 @@ struct hns_roce_wq {
u32 max_post; u32 max_post;
int max_gs; int max_gs;
int offset; int offset;
int wqe_shift;/* WQE size */ int wqe_shift; /* WQE size */
u32 head; u32 head;
u32 tail; u32 tail;
void __iomem *db_reg_l; void __iomem *db_reg_l;
struct workqueue_struct *workq;
}; };
struct hns_roce_sge { struct hns_roce_sge {
int sge_cnt; /* SGE num */ int sge_cnt; /* SGE num */
int offset; int offset;
int sge_shift;/* SGE size */ int sge_shift; /* SGE size */
}; };
struct hns_roce_buf_list { struct hns_roce_buf_list {
...@@ -437,8 +463,8 @@ struct hns_roce_buf { ...@@ -437,8 +463,8 @@ struct hns_roce_buf {
struct hns_roce_db_pgdir { struct hns_roce_db_pgdir {
struct list_head list; struct list_head list;
DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE); DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / 2); DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
unsigned long *bits[2]; unsigned long *bits[HNS_ROCE_DB_TYPE_COUNT];
u32 *page; u32 *page;
dma_addr_t db_dma; dma_addr_t db_dma;
}; };
...@@ -488,6 +514,7 @@ struct hns_roce_cq { ...@@ -488,6 +514,7 @@ struct hns_roce_cq {
u32 vector; u32 vector;
atomic_t refcount; atomic_t refcount;
struct completion free; struct completion free;
struct workqueue_struct *workq;
}; };
struct hns_roce_idx_que { struct hns_roce_idx_que {
...@@ -575,7 +602,7 @@ struct hns_roce_av { ...@@ -575,7 +602,7 @@ struct hns_roce_av {
u8 hop_limit; u8 hop_limit;
__le32 sl_tclass_flowlabel; __le32 sl_tclass_flowlabel;
u8 dgid[HNS_ROCE_GID_SIZE]; u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[6]; u8 mac[ETH_ALEN];
__le16 vlan; __le16 vlan;
bool vlan_en; bool vlan_en;
}; };
...@@ -666,6 +693,8 @@ struct hns_roce_qp { ...@@ -666,6 +693,8 @@ struct hns_roce_qp {
u8 sl; u8 sl;
u8 resp_depth; u8 resp_depth;
u8 state; u8 state;
u8 next_state; /* record for flush cqe */
int attr_mask; /* record for flush cqe */
u32 access_flags; u32 access_flags;
u32 atomic_rd_en; u32 atomic_rd_en;
u32 pkey_index; u32 pkey_index;
...@@ -907,6 +936,13 @@ struct hns_roce_work { ...@@ -907,6 +936,13 @@ struct hns_roce_work {
int event_type; int event_type;
int sub_type; int sub_type;
}; };
struct hns_roce_flush_work {
struct hns_roce_dev *hr_dev;
struct work_struct work;
struct hns_roce_qp *hr_qp;
};
struct hns_roce_stat { struct hns_roce_stat {
int cqn; int cqn;
int srqn; int srqn;
...@@ -979,7 +1015,7 @@ struct hns_roce_hw { ...@@ -979,7 +1015,7 @@ struct hns_roce_hw {
dma_addr_t dma_handle, int nent, u32 vector); dma_addr_t dma_handle, int nent, u32 vector);
int (*set_hem)(struct hns_roce_dev *hr_dev, int (*set_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, int step_idx); struct hns_roce_hem_table *table, int obj, int step_idx);
int (*clear_hem)(struct hns_roce_dev *hr_dev, void (*clear_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, struct hns_roce_hem_table *table, int obj,
int step_idx); int step_idx);
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
...@@ -1048,7 +1084,7 @@ struct hns_roce_dev { ...@@ -1048,7 +1084,7 @@ struct hns_roce_dev {
struct hns_roce_caps caps; struct hns_roce_caps caps;
struct radix_tree_root qp_table_tree; struct radix_tree_root qp_table_tree;
unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM]; unsigned char dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
u64 sys_image_guid; u64 sys_image_guid;
u32 vendor_id; u32 vendor_id;
u32 vendor_part_id; u32 vendor_part_id;
...@@ -1267,6 +1303,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, ...@@ -1267,6 +1303,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_udata *udata); struct ib_udata *udata);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
struct hns_roce_cq *cq, enum queue_type type);
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n); void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
...@@ -1314,6 +1352,16 @@ int hns_roce_fill_res_entry(struct sk_buff *msg, ...@@ -1314,6 +1352,16 @@ int hns_roce_fill_res_entry(struct sk_buff *msg,
int hns_roce_register_sysfs(struct hns_roce_dev *hr_dev); int hns_roce_register_sysfs(struct hns_roce_dev *hr_dev);
void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev); void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev);
enum hns_phy_state {
HNS_ROCE_PHY_SLEEP = 1,
HNS_ROCE_PHY_POLLING = 2,
HNS_ROCE_PHY_DISABLED = 3,
HNS_ROCE_PHY_TRAINING = 4,
HNS_ROCE_PHY_LINKUP = 5,
HNS_ROCE_PHY_LINKERR = 6,
HNS_ROCE_PHY_TEST = 7
};
#ifdef CONFIG_INFINIBAND_HNS_DFX #ifdef CONFIG_INFINIBAND_HNS_DFX
enum { enum {
RDFX_FUNC_MODIFY_DEVICE, RDFX_FUNC_MODIFY_DEVICE,
......
...@@ -189,7 +189,7 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev, ...@@ -189,7 +189,7 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev,
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8; mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
mhop->hop_num = hr_dev->caps.mtt_hop_num; mhop->hop_num = hr_dev->caps.mtt_hop_num;
break; break;
case HEM_TYPE_CQE: case HEM_TYPE_CQE:
...@@ -197,7 +197,7 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev, ...@@ -197,7 +197,7 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev,
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8; mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
mhop->hop_num = hr_dev->caps.cqe_hop_num; mhop->hop_num = hr_dev->caps.cqe_hop_num;
break; break;
case HEM_TYPE_SRQWQE: case HEM_TYPE_SRQWQE:
...@@ -205,7 +205,7 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev, ...@@ -205,7 +205,7 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev,
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8; mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
mhop->hop_num = hr_dev->caps.srqwqe_hop_num; mhop->hop_num = hr_dev->caps.srqwqe_hop_num;
break; break;
case HEM_TYPE_IDX: case HEM_TYPE_IDX:
...@@ -213,7 +213,7 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev, ...@@ -213,7 +213,7 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev,
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8; mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
mhop->hop_num = hr_dev->caps.idx_hop_num; mhop->hop_num = hr_dev->caps.idx_hop_num;
break; break;
default: default:
...@@ -246,7 +246,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -246,7 +246,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
* MTT/CQE alloc hem for bt pages. * MTT/CQE alloc hem for bt pages.
*/ */
bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
chunk_ba_num = mhop->bt_chunk_size / 8; chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
mhop->bt_chunk_size; mhop->bt_chunk_size;
table_idx = (*obj & (table->num_obj - 1)) / table_idx = (*obj & (table->num_obj - 1)) /
...@@ -286,6 +286,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, ...@@ -286,6 +286,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
struct scatterlist *mem; struct scatterlist *mem;
int order; int order;
void *buf; void *buf;
int left;
WARN_ON(gfp_mask & __GFP_HIGHMEM); WARN_ON(gfp_mask & __GFP_HIGHMEM);
...@@ -298,8 +299,8 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, ...@@ -298,8 +299,8 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
INIT_LIST_HEAD(&hem->chunk_list); INIT_LIST_HEAD(&hem->chunk_list);
order = get_order(hem_alloc_size); order = get_order(hem_alloc_size);
left = npages;
while (npages > 0) { while (left > 0) {
if (!chunk) { if (!chunk) {
chunk = kmalloc(sizeof(*chunk), chunk = kmalloc(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
...@@ -313,7 +314,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, ...@@ -313,7 +314,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
list_add_tail(&chunk->list, &hem->chunk_list); list_add_tail(&chunk->list, &hem->chunk_list);
} }
while (1 << order > npages) while (1 << order > left)
--order; --order;
/* /*
...@@ -331,7 +332,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, ...@@ -331,7 +332,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
++chunk->npages; ++chunk->npages;
++chunk->nsg; ++chunk->nsg;
npages -= 1 << order; left -= 1 << order;
} }
return hem; return hem;
...@@ -475,7 +476,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, ...@@ -475,7 +476,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
buf_chunk_size = mhop.buf_chunk_size; buf_chunk_size = mhop.buf_chunk_size;
bt_chunk_size = mhop.bt_chunk_size; bt_chunk_size = mhop.bt_chunk_size;
hop_num = mhop.hop_num; hop_num = mhop.hop_num;
chunk_ba_num = bt_chunk_size / 8; chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;
bt_num = hns_roce_get_bt_num(table->type, hop_num); bt_num = hns_roce_get_bt_num(table->type, hop_num);
switch (bt_num) { switch (bt_num) {
...@@ -521,9 +522,11 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, ...@@ -521,9 +522,11 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
/* set base address to hardware */ /* set base address to hardware */
if (table->type < HEM_TYPE_MTT) { if (table->type < HEM_TYPE_MTT) {
step_idx = 0; step_idx = 0;
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
if (ret) {
dev_err(dev, "set HEM base address to HW failed(%d), type = %d\n",
ret, table->type);
ret = -ENODEV; ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed!\n");
goto err_dma_alloc_l1; goto err_dma_alloc_l1;
} }
} }
...@@ -545,9 +548,11 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, ...@@ -545,9 +548,11 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
/* set base address to hardware */ /* set base address to hardware */
step_idx = 1; step_idx = 1;
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
if (ret) {
dev_err(dev, "set HEM base address to HW failed(%d), type = %d\n",
ret, table->type);
ret = -ENODEV; ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed!\n");
goto err_alloc_hem_buf; goto err_alloc_hem_buf;
} }
} }
...@@ -585,9 +590,11 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, ...@@ -585,9 +590,11 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
} }
/* set HEM base address to hardware */ /* set HEM base address to hardware */
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
if (ret) {
dev_err(dev, "set HEM base address to HW failed(%d), type = %d\n",
ret, table->type);
ret = -ENODEV; ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed!\n");
goto err_alloc_hem_buf; goto err_alloc_hem_buf;
} }
} else if (hop_num == 2) { } else if (hop_num == 2) {
...@@ -685,7 +692,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, ...@@ -685,7 +692,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
bt_chunk_size = mhop.bt_chunk_size; bt_chunk_size = mhop.bt_chunk_size;
hop_num = mhop.hop_num; hop_num = mhop.hop_num;
chunk_ba_num = bt_chunk_size / 8; chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;
bt_num = hns_roce_get_bt_num(table->type, hop_num); bt_num = hns_roce_get_bt_num(table->type, hop_num);
switch (bt_num) { switch (bt_num) {
...@@ -713,17 +720,12 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, ...@@ -713,17 +720,12 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
return; return;
} }
if (table->type < HEM_TYPE_MTT && hop_num == 1) { if (table->type < HEM_TYPE_MTT && hop_num == 1)
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) hr_dev->hw->clear_hem(hr_dev, table, obj, 1);
dev_warn(dev, "Clear HEM base address failed.\n"); else if (table->type < HEM_TYPE_MTT && hop_num == 2)
} else if (table->type < HEM_TYPE_MTT && hop_num == 2) { hr_dev->hw->clear_hem(hr_dev, table, obj, 2);
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2)) else if (table->type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0)
dev_warn(dev, "Clear HEM base address failed.\n"); hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
} else if (table->type < HEM_TYPE_MTT &&
hop_num == HNS_ROCE_HOP_NUM_0) {
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
dev_warn(dev, "Clear HEM base address failed.\n");
}
/* /*
* free buffer space chunk for QPC/MTPT/CQC/SRQC/SCC_CTX. * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCC_CTX.
...@@ -736,9 +738,8 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, ...@@ -736,9 +738,8 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
start_idx = mhop.l0_idx * chunk_ba_num; start_idx = mhop.l0_idx * chunk_ba_num;
if (hns_roce_check_hem_null(table->hem, start_idx, if (hns_roce_check_hem_null(table->hem, start_idx,
chunk_ba_num)) { chunk_ba_num)) {
if (table->type < HEM_TYPE_MTT && if (table->type < HEM_TYPE_MTT)
hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
dev_warn(dev, "Clear HEM base address failed.\n");
dma_free_coherent(dev, bt_chunk_size, dma_free_coherent(dev, bt_chunk_size,
table->bt_l0[mhop.l0_idx], table->bt_l0[mhop.l0_idx],
...@@ -750,8 +751,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, ...@@ -750,8 +751,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
mhop.l1_idx * chunk_ba_num; mhop.l1_idx * chunk_ba_num;
if (hns_roce_check_hem_null(table->hem, start_idx, if (hns_roce_check_hem_null(table->hem, start_idx,
chunk_ba_num)) { chunk_ba_num)) {
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) hr_dev->hw->clear_hem(hr_dev, table, obj, 1);
dev_warn(dev, "Clear HEM base address failed.\n");
dma_free_coherent(dev, bt_chunk_size, dma_free_coherent(dev, bt_chunk_size,
table->bt_l1[bt_l1_idx], table->bt_l1[bt_l1_idx],
...@@ -761,9 +761,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, ...@@ -761,9 +761,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
start_idx = mhop.l0_idx * chunk_ba_num; start_idx = mhop.l0_idx * chunk_ba_num;
if (hns_roce_check_bt_null(table->bt_l1, start_idx, if (hns_roce_check_bt_null(table->bt_l1, start_idx,
chunk_ba_num)) { chunk_ba_num)) {
if (hr_dev->hw->clear_hem(hr_dev, table, obj, hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
0))
dev_warn(dev, "Clear HEM base address failed.\n");
dma_free_coherent(dev, bt_chunk_size, dma_free_coherent(dev, bt_chunk_size,
table->bt_l0[mhop.l0_idx], table->bt_l0[mhop.l0_idx],
...@@ -779,7 +777,6 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, ...@@ -779,7 +777,6 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
void hns_roce_table_put(struct hns_roce_dev *hr_dev, void hns_roce_table_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj) struct hns_roce_hem_table *table, unsigned long obj)
{ {
struct device *dev = hr_dev->dev;
unsigned long i; unsigned long i;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) { if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
...@@ -794,8 +791,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, ...@@ -794,8 +791,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
if (--table->hem[i]->refcount == 0) { if (--table->hem[i]->refcount == 0) {
/* Clear HEM base address */ /* Clear HEM base address */
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
dev_warn(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]); hns_roce_free_hem(hr_dev, table->hem[i]);
table->hem[i] = NULL; table->hem[i] = NULL;
...@@ -832,12 +828,14 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, ...@@ -832,12 +828,14 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk; idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
dma_offset = offset = idx_offset * table->obj_size; dma_offset = offset = idx_offset * table->obj_size;
} else { } else {
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
goto out;
/* mtt mhop */ /* mtt mhop */
i = mhop.l0_idx; i = mhop.l0_idx;
j = mhop.l1_idx; j = mhop.l1_idx;
if (mhop.hop_num == 2) if (mhop.hop_num == 2)
hem_idx = i * (mhop.bt_chunk_size / 8) + j; hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
else if (mhop.hop_num == 1 || else if (mhop.hop_num == 1 ||
mhop.hop_num == HNS_ROCE_HOP_NUM_0) mhop.hop_num == HNS_ROCE_HOP_NUM_0)
hem_idx = i; hem_idx = i;
...@@ -886,7 +884,9 @@ int hns_roce_table_get_range(struct hns_roce_dev *hr_dev, ...@@ -886,7 +884,9 @@ int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
int ret; int ret;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) { if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); ret = get_hem_table_config(hr_dev, &mhop, table->type);
if (ret)
return ret;
inc = mhop.bt_chunk_size / table->obj_size; inc = mhop.bt_chunk_size / table->obj_size;
} }
...@@ -916,7 +916,8 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev, ...@@ -916,7 +916,8 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
unsigned long i; unsigned long i;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) { if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); if (get_hem_table_config(hr_dev, &mhop, table->type))
return;
inc = mhop.bt_chunk_size / table->obj_size; inc = mhop.bt_chunk_size / table->obj_size;
} }
...@@ -958,7 +959,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -958,7 +959,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
obj_per_chunk = buf_chunk_size / obj_size; obj_per_chunk = buf_chunk_size / obj_size;
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
bt_chunk_num = bt_chunk_size / 8; bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
if (type >= HEM_TYPE_MTT) if (type >= HEM_TYPE_MTT)
num_bt_l0 = bt_chunk_num; num_bt_l0 = bt_chunk_num;
...@@ -1035,18 +1036,21 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev, ...@@ -1035,18 +1036,21 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
{ {
struct hns_roce_hem_mhop mhop; struct hns_roce_hem_mhop mhop;
u32 buf_chunk_size; u32 buf_chunk_size;
int i; int ret;
u64 obj; u64 obj;
int i;
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); ret = get_hem_table_config(hr_dev, &mhop, table->type);
buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size : if (!ret) {
mhop.bt_chunk_size; buf_chunk_size = table->type < HEM_TYPE_MTT ?
mhop.buf_chunk_size : mhop.bt_chunk_size;
for (i = 0; i < table->num_hem; ++i) { for (i = 0; i < table->num_hem; ++i) {
obj = i * buf_chunk_size / table->obj_size; obj = i * buf_chunk_size / table->obj_size;
if (table->hem[i]) if (table->hem[i])
hns_roce_table_mhop_put(hr_dev, table, obj, 0); hns_roce_table_mhop_put(hr_dev, table, obj, 0);
} }
}
kfree(table->hem); kfree(table->hem);
table->hem = NULL; table->hem = NULL;
...@@ -1063,7 +1067,6 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev, ...@@ -1063,7 +1067,6 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table) struct hns_roce_hem_table *table)
{ {
struct device *dev = hr_dev->dev;
unsigned long i; unsigned long i;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) { if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
...@@ -1073,9 +1076,8 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, ...@@ -1073,9 +1076,8 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
for (i = 0; i < table->num_hem; ++i) for (i = 0; i < table->num_hem; ++i)
if (table->hem[i]) { if (table->hem[i]) {
if (hr_dev->hw->clear_hem(hr_dev, table, hr_dev->hw->clear_hem(hr_dev, table,
i * table->table_chunk_size / table->obj_size, 0)) i * table->table_chunk_size / table->obj_size, 0);
dev_err(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]); hns_roce_free_hem(hr_dev, table->hem[i]);
} }
......
...@@ -62,16 +62,16 @@ enum { ...@@ -62,16 +62,16 @@ enum {
(sizeof(struct scatterlist))) (sizeof(struct scatterlist)))
#define check_whether_bt_num_3(type, hop_num) \ #define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2) ((type) < HEM_TYPE_MTT && (hop_num) == 2)
#define check_whether_bt_num_2(type, hop_num) \ #define check_whether_bt_num_2(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == 1) || \ (((type) < HEM_TYPE_MTT && (hop_num) == 1) || \
(type >= HEM_TYPE_MTT && hop_num == 2)) ((type) >= HEM_TYPE_MTT && (hop_num) == 2))
#define check_whether_bt_num_1(type, hop_num) \ #define check_whether_bt_num_1(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \ (((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \
(type >= HEM_TYPE_MTT && hop_num == 1) || \ ((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \
(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0)) ((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0))
enum { enum {
HNS_ROCE_HEM_PAGE_SHIFT = 12, HNS_ROCE_HEM_PAGE_SHIFT = 12,
......
...@@ -53,17 +53,15 @@ int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev, ...@@ -53,17 +53,15 @@ int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev,
{ {
struct hns_roce_v2_mpt_entry *mpt_ctx; struct hns_roce_v2_mpt_entry *mpt_ctx;
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
int key = hr_dev->hr_stat.key;
int cur_len = 0;
char *out = buf;
u64 bt0_ba = 0; u64 bt0_ba = 0;
u64 bt1_ba = 0; u64 bt1_ba = 0;
int *mpt; int *mpt;
int ret; int ret;
int i; int i;
char *buff;
int key = hr_dev->hr_stat.key;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
...@@ -98,43 +96,41 @@ int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev, ...@@ -98,43 +96,41 @@ int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev,
else else
goto err_mailbox; goto err_mailbox;
*desc += sprintf(buff + *desc, "MPT(0x%x) BT0: 0x%llx\n", key, bt0_ba); hns_roce_v2_sysfs_print(out, cur_len,
*desc += sprintf(buff + *desc, "MPT(0x%x) BT1: 0x%llx\n", key, bt1_ba); "MPT(0x%x) BT0: 0x%llx\n", key, bt0_ba);
hns_roce_v2_sysfs_print(out, cur_len,
"MPT(0x%x) BT1: 0x%llx\n", key, bt1_ba);
mpt = (int *)mpt_ctx; mpt = (int *)mpt_ctx;
for (i = 0; i < (sizeof(*mpt_ctx) >> 2); i += 8) { for (i = 0; i < (sizeof(*mpt_ctx) >> 2); i += 8) {
*desc += sprintf(buff + *desc, hns_roce_v2_sysfs_print(out, cur_len,
"MPT(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", "MPT(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
key, *mpt, *(mpt + 1), *(mpt + 2), key, *mpt, *(mpt + 1), *(mpt + 2),
*(mpt + 3), *(mpt + 4), *(mpt + 5), *(mpt + 3), *(mpt + 4), *(mpt + 5),
*(mpt + 6), *(mpt + 7)); *(mpt + 6), *(mpt + 7));
mpt += 8; mpt += 8;
} }
memcpy(buf, buff, *desc); *desc += cur_len;
err_mailbox: err_mailbox:
kfree(mpt_ctx); kfree(mpt_ctx);
err_cmd: err_cmd:
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
kfree(buff);
return ret; return ret;
} }
int hns_roce_v2_query_srqc_stat(struct hns_roce_dev *hr_dev, int hns_roce_v2_query_srqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc) char *buf, int *desc)
{ {
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_srq_context *srq_context; struct hns_roce_srq_context *srq_context;
struct hns_roce_cmd_mailbox *mailbox;
int srqn = hr_dev->hr_stat.srqn;
int cur_len = 0;
char *out = buf;
u64 bt0_ba = 0; u64 bt0_ba = 0;
u64 bt1_ba = 0; u64 bt1_ba = 0;
int *srqc; int *srqc;
int ret;
int i = 0; int i = 0;
char *buff; int ret;
int srqn = hr_dev->hr_stat.srqn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
...@@ -162,26 +158,25 @@ int hns_roce_v2_query_srqc_stat(struct hns_roce_dev *hr_dev, ...@@ -162,26 +158,25 @@ int hns_roce_v2_query_srqc_stat(struct hns_roce_dev *hr_dev,
else else
goto err_mailbox; goto err_mailbox;
*desc += sprintf(buff + *desc, hns_roce_v2_sysfs_print(out, cur_len,
"SRQC(0x%x) BT0: 0x%llx\n", srqn, bt0_ba); "SRQC(0x%x) BT0: 0x%llx\n", srqn, bt0_ba);
*desc += sprintf(buff + *desc, hns_roce_v2_sysfs_print(out, cur_len,
"SRQC(0x%x) BT1: 0x%llx\n", srqn, bt1_ba); "SRQC(0x%x) BT1: 0x%llx\n", srqn, bt1_ba);
srqc = (int *)srq_context; srqc = (int *)srq_context;
for (i = 0; i < (sizeof(*srq_context) >> 2); i += 8) { for (i = 0; i < (sizeof(*srq_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc, hns_roce_v2_sysfs_print(out, cur_len,
"SRQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", "SRQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
srqn, *srqc, *(srqc + 1), *(srqc + 2), srqn, *srqc, *(srqc + 1), *(srqc + 2),
*(srqc + 3), *(srqc + 4), *(srqc + 5), *(srqc + 3), *(srqc + 4), *(srqc + 5),
*(srqc + 6), *(srqc + 7)); *(srqc + 6), *(srqc + 7));
srqc += 8; srqc += 8;
} }
memcpy(buf, buff, *desc); *desc += cur_len;
err_mailbox: err_mailbox:
kfree(srq_context); kfree(srq_context);
err_cmd: err_cmd:
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
kfree(buff);
return ret; return ret;
} }
...@@ -190,17 +185,14 @@ int hns_roce_v2_query_qpc_stat(struct hns_roce_dev *hr_dev, ...@@ -190,17 +185,14 @@ int hns_roce_v2_query_qpc_stat(struct hns_roce_dev *hr_dev,
{ {
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_v2_qp_context *qp_context; struct hns_roce_v2_qp_context *qp_context;
int qpn = hr_dev->hr_stat.qpn;
int cur_len = 0;
char *out = buf;
u64 bt0_ba = 0; u64 bt0_ba = 0;
u64 bt1_ba = 0; u64 bt1_ba = 0;
int *qpc; int *qpc;
int ret;
int i = 0; int i = 0;
char *buff; int ret;
int qpn = hr_dev->hr_stat.qpn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
...@@ -236,24 +228,25 @@ int hns_roce_v2_query_qpc_stat(struct hns_roce_dev *hr_dev, ...@@ -236,24 +228,25 @@ int hns_roce_v2_query_qpc_stat(struct hns_roce_dev *hr_dev,
else else
goto err_mailbox; goto err_mailbox;
*desc += sprintf(buff + *desc, "QPC(0x%x) BT0: 0x%llx\n", qpn, bt0_ba); hns_roce_v2_sysfs_print(out, cur_len,
*desc += sprintf(buff + *desc, "QPC(0x%x) BT1: 0x%llx\n", qpn, bt1_ba); "QPC(0x%x) BT0: 0x%llx\n", qpn, bt0_ba);
hns_roce_v2_sysfs_print(out, cur_len,
"QPC(0x%x) BT1: 0x%llx\n", qpn, bt1_ba);
qpc = (int *)qp_context; qpc = (int *)qp_context;
for (i = 0; i < (sizeof(*qp_context) >> 2); i += 8) { for (i = 0; i < (sizeof(*qp_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc, hns_roce_v2_sysfs_print(out, cur_len,
"QPC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", "QPC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
qpn, *qpc, *(qpc + 1), *(qpc + 2), qpn, *qpc, *(qpc + 1), *(qpc + 2),
*(qpc + 3), *(qpc + 4), *(qpc + 5), *(qpc + 3), *(qpc + 4), *(qpc + 5),
*(qpc + 6), *(qpc + 7)); *(qpc + 6), *(qpc + 7));
qpc += 8; qpc += 8;
} }
memcpy(buf, buff, *desc); *desc += cur_len;
err_mailbox: err_mailbox:
kfree(qp_context); kfree(qp_context);
err_cmd: err_cmd:
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
kfree(buff);
return ret; return ret;
} }
...@@ -261,24 +254,18 @@ int hns_roce_v2_query_qpc_stat(struct hns_roce_dev *hr_dev, ...@@ -261,24 +254,18 @@ int hns_roce_v2_query_qpc_stat(struct hns_roce_dev *hr_dev,
int hns_roce_v2_query_aeqc_stat(struct hns_roce_dev *hr_dev, int hns_roce_v2_query_aeqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc) char *buf, int *desc)
{ {
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_eq_context *eq_context; struct hns_roce_eq_context *eq_context;
struct hns_roce_cmd_mailbox *mailbox;
int aeqn = hr_dev->hr_stat.aeqn;
int cur_len = 0;
char *out = buf;
int i = 0;
int *aeqc; int *aeqc;
int ret; int ret;
int i = 0;
char *buff;
int aeqn;
aeqn = hr_dev->hr_stat.aeqn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) { if (IS_ERR(mailbox))
ret = PTR_ERR(mailbox); return PTR_ERR(mailbox);
goto err_aeqc_buff;
}
eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL); eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL);
if (!eq_context) { if (!eq_context) {
...@@ -296,23 +283,20 @@ int hns_roce_v2_query_aeqc_stat(struct hns_roce_dev *hr_dev, ...@@ -296,23 +283,20 @@ int hns_roce_v2_query_aeqc_stat(struct hns_roce_dev *hr_dev,
aeqc = (int *)eq_context; aeqc = (int *)eq_context;
for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) { for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc, hns_roce_v2_sysfs_print(out, cur_len,
"AEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", "AEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
aeqn, *aeqc, *(aeqc + 1), *(aeqc + 2), aeqn, *aeqc, *(aeqc + 1), *(aeqc + 2),
*(aeqc + 3), *(aeqc + 4), *(aeqc + 5), *(aeqc + 3), *(aeqc + 4), *(aeqc + 5),
*(aeqc + 6), *(aeqc + 7)); *(aeqc + 6), *(aeqc + 7));
aeqc += 8; aeqc += 8;
} }
memcpy(buf, buff, *desc); *desc += cur_len;
err_mailbox: err_mailbox:
kfree(eq_context); kfree(eq_context);
err_context: err_context:
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_aeqc_buff:
kfree(buff);
return ret; return ret;
} }
#define CMD_NUM_QUERY_PKT_CNT (8) #define CMD_NUM_QUERY_PKT_CNT (8)
...@@ -330,13 +314,11 @@ int hns_roce_v2_query_pkt_stat(struct hns_roce_dev *hr_dev, ...@@ -330,13 +314,11 @@ int hns_roce_v2_query_pkt_stat(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc desc_cnp_tx = {0}; struct hns_roce_cmq_desc desc_cnp_tx = {0};
struct rdfx_query_cnp_tx_cnt *resp_cnp_tx = struct rdfx_query_cnp_tx_cnt *resp_cnp_tx =
(struct rdfx_query_cnp_tx_cnt *)desc_cnp_tx.data; (struct rdfx_query_cnp_tx_cnt *)desc_cnp_tx.data;
int cur_len = 0;
char *out = buf;
int status; int status;
int i; int i;
char *buff;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
for (i = 0; i < CMD_NUM_QUERY_PKT_CNT; i++) { for (i = 0; i < CMD_NUM_QUERY_PKT_CNT; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], hns_roce_cmq_setup_basic_desc(&desc[i],
HNS_ROCE_OPC_QUEYR_PKT_CNT, true); HNS_ROCE_OPC_QUEYR_PKT_CNT, true);
...@@ -358,7 +340,7 @@ int hns_roce_v2_query_pkt_stat(struct hns_roce_dev *hr_dev, ...@@ -358,7 +340,7 @@ int hns_roce_v2_query_pkt_stat(struct hns_roce_dev *hr_dev,
if (status) if (status)
return status; return status;
if (hr_dev->pci_dev->revision == 0x21) { if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
hns_roce_cmq_setup_basic_desc(&desc_cnp_rx, hns_roce_cmq_setup_basic_desc(&desc_cnp_rx,
HNS_ROCE_OPC_QUEYR_CNP_RX_CNT, true); HNS_ROCE_OPC_QUEYR_CNP_RX_CNT, true);
status = hns_roce_cmq_send(hr_dev, &desc_cnp_rx, 1); status = hns_roce_cmq_send(hr_dev, &desc_cnp_rx, 1);
...@@ -372,71 +354,69 @@ int hns_roce_v2_query_pkt_stat(struct hns_roce_dev *hr_dev, ...@@ -372,71 +354,69 @@ int hns_roce_v2_query_pkt_stat(struct hns_roce_dev *hr_dev,
return status; return status;
} }
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"RX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", "RX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->rc_pkt_num, resp_query[1]->rc_pkt_num, resp_query[0]->rc_pkt_num, resp_query[1]->rc_pkt_num,
resp_query[2]->rc_pkt_num, resp_query[3]->rc_pkt_num); resp_query[2]->rc_pkt_num, resp_query[3]->rc_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"RX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", "RX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->uc_pkt_num, resp_query[1]->uc_pkt_num, resp_query[0]->uc_pkt_num, resp_query[1]->uc_pkt_num,
resp_query[2]->uc_pkt_num, resp_query[3]->uc_pkt_num); resp_query[2]->uc_pkt_num, resp_query[3]->uc_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"RX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", "RX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->ud_pkt_num, resp_query[1]->ud_pkt_num, resp_query[0]->ud_pkt_num, resp_query[1]->ud_pkt_num,
resp_query[2]->ud_pkt_num, resp_query[3]->ud_pkt_num); resp_query[2]->ud_pkt_num, resp_query[3]->ud_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"RX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", "RX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->xrc_pkt_num, resp_query[1]->xrc_pkt_num, resp_query[0]->xrc_pkt_num, resp_query[1]->xrc_pkt_num,
resp_query[2]->xrc_pkt_num, resp_query[3]->xrc_pkt_num); resp_query[2]->xrc_pkt_num, resp_query[3]->xrc_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"RX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", "RX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->total_pkt_num, resp_query[1]->total_pkt_num, resp_query[0]->total_pkt_num, resp_query[1]->total_pkt_num,
resp_query[2]->total_pkt_num, resp_query[3]->total_pkt_num); resp_query[2]->total_pkt_num, resp_query[3]->total_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"RX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", "RX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[0]->error_pkt_num, resp_query[1]->error_pkt_num, resp_query[0]->error_pkt_num, resp_query[1]->error_pkt_num,
resp_query[2]->error_pkt_num, resp_query[3]->error_pkt_num); resp_query[2]->error_pkt_num, resp_query[3]->error_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"TX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", "TX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->rc_pkt_num, resp_query[5]->rc_pkt_num, resp_query[4]->rc_pkt_num, resp_query[5]->rc_pkt_num,
resp_query[6]->rc_pkt_num, resp_query[7]->rc_pkt_num); resp_query[6]->rc_pkt_num, resp_query[7]->rc_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"TX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", "TX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->uc_pkt_num, resp_query[5]->uc_pkt_num, resp_query[4]->uc_pkt_num, resp_query[5]->uc_pkt_num,
resp_query[6]->uc_pkt_num, resp_query[7]->uc_pkt_num); resp_query[6]->uc_pkt_num, resp_query[7]->uc_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"TX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", "TX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->ud_pkt_num, resp_query[5]->ud_pkt_num, resp_query[4]->ud_pkt_num, resp_query[5]->ud_pkt_num,
resp_query[6]->ud_pkt_num, resp_query[7]->ud_pkt_num); resp_query[6]->ud_pkt_num, resp_query[7]->ud_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"TX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", "TX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->xrc_pkt_num, resp_query[5]->xrc_pkt_num, resp_query[4]->xrc_pkt_num, resp_query[5]->xrc_pkt_num,
resp_query[6]->xrc_pkt_num, resp_query[7]->xrc_pkt_num); resp_query[6]->xrc_pkt_num, resp_query[7]->xrc_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"TX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", "TX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->total_pkt_num, resp_query[5]->total_pkt_num, resp_query[4]->total_pkt_num, resp_query[5]->total_pkt_num,
resp_query[6]->total_pkt_num, resp_query[7]->total_pkt_num); resp_query[6]->total_pkt_num, resp_query[7]->total_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"TX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", "TX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_query[4]->error_pkt_num, resp_query[5]->error_pkt_num, resp_query[4]->error_pkt_num, resp_query[5]->error_pkt_num,
resp_query[6]->error_pkt_num, resp_query[7]->error_pkt_num); resp_query[6]->error_pkt_num, resp_query[7]->error_pkt_num);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"CQE : 0x%08x 0x%08x 0x%08x 0x%08x\n", "CQE : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_cqe->port0_cqe, resp_cqe->port1_cqe, resp_cqe->port0_cqe, resp_cqe->port1_cqe,
resp_cqe->port2_cqe, resp_cqe->port3_cqe); resp_cqe->port2_cqe, resp_cqe->port3_cqe);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"CNP RX : 0x%08x 0x%08x 0x%08x 0x%08x\n", "CNP RX : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_cnp_rx->port0_cnp_rx, resp_cnp_rx->port1_cnp_rx, resp_cnp_rx->port0_cnp_rx, resp_cnp_rx->port1_cnp_rx,
resp_cnp_rx->port2_cnp_rx, resp_cnp_rx->port3_cnp_rx); resp_cnp_rx->port2_cnp_rx, resp_cnp_rx->port3_cnp_rx);
*buff_size += sprintf(buff + *buff_size, hns_roce_v2_sysfs_print(out, cur_len,
"CNP TX : 0x%08x 0x%08x 0x%08x 0x%08x\n", "CNP TX : 0x%08x 0x%08x 0x%08x 0x%08x\n",
resp_cnp_tx->port0_cnp_tx, resp_cnp_tx->port1_cnp_tx, resp_cnp_tx->port0_cnp_tx, resp_cnp_tx->port1_cnp_tx,
resp_cnp_tx->port2_cnp_tx, resp_cnp_tx->port3_cnp_tx); resp_cnp_tx->port2_cnp_tx, resp_cnp_tx->port3_cnp_tx);
memcpy(buf, buff, *buff_size); *buff_size += cur_len;
kfree(buff);
return status; return status;
} }
int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev, int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev,
...@@ -444,21 +424,16 @@ int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev, ...@@ -444,21 +424,16 @@ int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev,
{ {
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_eq_context *eq_context; struct hns_roce_eq_context *eq_context;
int ceqn = hr_dev->hr_stat.ceqn;
int cur_len = 0;
char *out = buf;
int *ceqc; int *ceqc;
int ret;
int i = 0; int i = 0;
char *buff; int ret;
int ceqn = hr_dev->hr_stat.ceqn;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
if (IS_ERR(mailbox)) { return PTR_ERR(mailbox);
ret = PTR_ERR(mailbox);
goto err_ceqc_buff;
}
eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL); eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL);
if (!eq_context) { if (!eq_context) {
...@@ -475,22 +450,19 @@ int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev, ...@@ -475,22 +450,19 @@ int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev,
goto err_mailbox; goto err_mailbox;
ceqc = (int *)eq_context; ceqc = (int *)eq_context;
for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) { for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) {
*desc += sprintf(buff + *desc, hns_roce_v2_sysfs_print(out, cur_len,
"CEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", "CEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
ceqn, *ceqc, *(ceqc + 1), *(ceqc + 2), ceqn, *ceqc, *(ceqc + 1), *(ceqc + 2),
*(ceqc + 3), *(ceqc + 4), *(ceqc + 5), *(ceqc + 3), *(ceqc + 4), *(ceqc + 5),
*(ceqc + 6), *(ceqc + 7)); *(ceqc + 6), *(ceqc + 7));
ceqc += 8; ceqc += 8;
} }
memcpy(buf, buff, *desc); *desc += cur_len;
err_mailbox: err_mailbox:
kfree(eq_context); kfree(eq_context);
err_context: err_context:
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_ceqc_buff:
kfree(buff);
return ret; return ret;
} }
...@@ -501,12 +473,10 @@ int hns_roce_v2_query_cmd_stat(struct hns_roce_dev *hr_dev, ...@@ -501,12 +473,10 @@ int hns_roce_v2_query_cmd_stat(struct hns_roce_dev *hr_dev,
struct hns_roce_query_mbdb_cnt *resp_cnt = struct hns_roce_query_mbdb_cnt *resp_cnt =
(struct hns_roce_query_mbdb_cnt *)desc_cnt.data; (struct hns_roce_query_mbdb_cnt *)desc_cnt.data;
struct hns_roce_cmq_desc desc_dfx; struct hns_roce_cmq_desc desc_dfx;
int cur_len = 0;
char *out = buf;
int status; int status;
char *buff;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
hns_roce_cmq_setup_basic_desc(&desc_cnt, hns_roce_cmq_setup_basic_desc(&desc_cnt,
HNS_ROCE_OPC_QUEYR_MBDB_CNT, true); HNS_ROCE_OPC_QUEYR_MBDB_CNT, true);
status = hns_roce_cmq_send(hr_dev, &desc_cnt, 1); status = hns_roce_cmq_send(hr_dev, &desc_cnt, 1);
...@@ -519,21 +489,19 @@ int hns_roce_v2_query_cmd_stat(struct hns_roce_dev *hr_dev, ...@@ -519,21 +489,19 @@ int hns_roce_v2_query_cmd_stat(struct hns_roce_dev *hr_dev,
if (status) if (status)
return status; return status;
hns_roce_v2_sysfs_print(out, cur_len, "MB ISSUE CNT : 0x%08x\n",
*desc += sprintf(buff + *desc, "MB ISSUE CNT : 0x%08x\n",
resp_cnt->mailbox_issue_cnt); resp_cnt->mailbox_issue_cnt);
*desc += sprintf(buff + *desc, "MB EXEC CNT : 0x%08x\n", hns_roce_v2_sysfs_print(out, cur_len, "MB EXEC CNT : 0x%08x\n",
resp_cnt->mailbox_exe_cnt); resp_cnt->mailbox_exe_cnt);
*desc += sprintf(buff + *desc, "DB ISSUE CNT : 0x%08x\n", hns_roce_v2_sysfs_print(out, cur_len, "DB ISSUE CNT : 0x%08x\n",
resp_cnt->doorbell_issue_cnt); resp_cnt->doorbell_issue_cnt);
*desc += sprintf(buff + *desc, "DB EXEC CNT : 0x%08x\n", hns_roce_v2_sysfs_print(out, cur_len, "DB EXEC CNT : 0x%08x\n",
resp_cnt->doorbell_exe_cnt); resp_cnt->doorbell_exe_cnt);
*desc += sprintf(buff + *desc, "EQDB ISSUE CNT : 0x%08x\n", hns_roce_v2_sysfs_print(out, cur_len, "EQDB ISSUE CNT : 0x%08x\n",
resp_cnt->eq_doorbell_issue_cnt); resp_cnt->eq_doorbell_issue_cnt);
*desc += sprintf(buff + *desc, "EQDB EXEC CNT : 0x%08x\n", hns_roce_v2_sysfs_print(out, cur_len, "EQDB EXEC CNT : 0x%08x\n",
resp_cnt->eq_doorbell_exe_cnt); resp_cnt->eq_doorbell_exe_cnt);
memcpy(buf, buff, *desc); *desc += cur_len;
kfree(buff);
return status; return status;
} }
...@@ -583,11 +551,13 @@ int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev, ...@@ -583,11 +551,13 @@ int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc) char *buf, int *desc)
{ {
struct hns_roce_v2_cq_context *cq_context; struct hns_roce_v2_cq_context *cq_context;
int cqn = hr_dev->hr_stat.cqn;
int cur_len = 0;
char *out = buf;
u64 bt0_ba = 0; u64 bt0_ba = 0;
u64 bt1_ba = 0; u64 bt1_ba = 0;
int *cqc; int *cqc;
int i, ret; int i, ret;
int cqn = hr_dev->hr_stat.cqn;
cq_context = kzalloc(sizeof(*cq_context), GFP_KERNEL); cq_context = kzalloc(sizeof(*cq_context), GFP_KERNEL);
if (!cq_context) if (!cq_context)
...@@ -596,18 +566,22 @@ int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev, ...@@ -596,18 +566,22 @@ int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev,
ret = hns_roce_v2_query_cqc(hr_dev, &bt0_ba, &bt1_ba, cqn, cq_context); ret = hns_roce_v2_query_cqc(hr_dev, &bt0_ba, &bt1_ba, cqn, cq_context);
if (ret) if (ret)
goto out; goto out;
*desc += sprintf(buf + *desc, "CQC(0x%x) BT0: 0x%llx\n", cqn, bt0_ba);
*desc += sprintf(buf + *desc, "CQC(0x%x) BT1: 0x%llx\n", cqn, bt1_ba); hns_roce_v2_sysfs_print(out, cur_len,
"CQC(0x%x) BT0: 0x%llx\n", cqn, bt0_ba);
hns_roce_v2_sysfs_print(out, cur_len,
"CQC(0x%x) BT1: 0x%llx\n", cqn, bt1_ba);
cqc = (int *)cq_context; cqc = (int *)cq_context;
for (i = 0; i < (sizeof(*cq_context) >> 2); i += 8) { for (i = 0; i < (sizeof(*cq_context) >> 2); i += 8) {
*desc += sprintf(buf + *desc, hns_roce_v2_sysfs_print(out, cur_len,
"CQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", "CQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n",
cqn, *cqc, *(cqc + 1), *(cqc + 2), cqn, *cqc, *(cqc + 1), *(cqc + 2),
*(cqc + 3), *(cqc + 4), *(cqc + 5), *(cqc + 3), *(cqc + 4), *(cqc + 5),
*(cqc + 6), *(cqc + 7)); *(cqc + 6), *(cqc + 7));
cqc += 8; cqc += 8;
} }
*desc += cur_len;
out: out:
kfree(cq_context); kfree(cq_context);
return ret; return ret;
......
...@@ -825,7 +825,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) ...@@ -825,7 +825,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
attr.dest_qp_num = hr_qp->qpn; attr.dest_qp_num = hr_qp->qpn;
memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
hr_dev->dev_addr[port], hr_dev->dev_addr[port],
MAC_ADDR_OCTET_NUM); ETH_ALEN);
memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
...@@ -2482,7 +2482,7 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -2482,7 +2482,7 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return ret; return ret;
} }
static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, static void hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, struct hns_roce_hem_table *table, int obj,
int step_idx) int step_idx)
{ {
...@@ -2513,9 +2513,9 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, ...@@ -2513,9 +2513,9 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
break; break;
case HEM_TYPE_SRQC: case HEM_TYPE_SRQC:
dev_dbg(dev, "HEM_TYPE_SRQC not support.\n"); dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
return -EINVAL; return;
default: default:
return 0; return;
} }
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
...@@ -2533,7 +2533,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, ...@@ -2533,7 +2533,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
flags); flags);
return -EBUSY; return;
} }
} else { } else {
break; break;
...@@ -2548,7 +2548,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, ...@@ -2548,7 +2548,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
return 0; return;
} }
static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
......
...@@ -75,20 +75,15 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, ...@@ -75,20 +75,15 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
struct hns_roce_mr *mr = to_hr_mr(wr->mr); struct hns_roce_mr *mr = to_hr_mr(wr->mr);
/* use ib_access_flags */ /* use ib_access_flags */
roce_set_bit(rc_sq_wqe->byte_4, roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
wr->access & IB_ACCESS_MW_BIND ? 1 : 0); wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
roce_set_bit(rc_sq_wqe->byte_4, roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
roce_set_bit(rc_sq_wqe->byte_4, roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S,
V2_RC_FRMR_WQE_BYTE_4_RR_S,
wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0); wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
roce_set_bit(rc_sq_wqe->byte_4, roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S,
V2_RC_FRMR_WQE_BYTE_4_RW_S,
wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0); wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
roce_set_bit(rc_sq_wqe->byte_4, roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S,
V2_RC_FRMR_WQE_BYTE_4_LW_S,
wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0); wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
/* Data structure reuse may lead to confusion */ /* Data structure reuse may lead to confusion */
...@@ -105,8 +100,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, ...@@ -105,8 +100,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M, V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S, V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET); mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_bit(fseg->mode_buf_pg_sz, roce_set_bit(fseg->mode_buf_pg_sz, V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S,
V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0); 0);
} }
#ifdef CONFIG_KERNEL_419 #ifdef CONFIG_KERNEL_419
...@@ -206,7 +201,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -206,7 +201,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (wr->opcode == IB_WR_RDMA_READ) { if (wr->opcode == IB_WR_RDMA_READ) {
*bad_wr = wr; *bad_wr = wr;
dev_err(hr_dev->dev, "Not support inline data!\n"); dev_err(hr_dev->dev, "Not support inline data in rdma read!\n");
return -EINVAL; return -EINVAL;
} }
...@@ -250,11 +245,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -250,11 +245,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
return 0; return 0;
} }
static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
#ifdef CONFIG_KERNEL_419 #ifdef CONFIG_KERNEL_419
static int hns_roce_v2_post_send(struct ib_qp *ibqp, static int hns_roce_v2_post_send(struct ib_qp *ibqp,
const struct ib_send_wr *wr, const struct ib_send_wr *wr,
...@@ -273,13 +263,11 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -273,13 +263,11 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct hns_roce_wqe_frmr_seg *fseg; struct hns_roce_wqe_frmr_seg *fseg;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db; struct hns_roce_v2_db sq_db;
struct ib_qp_attr attr; unsigned int sge_ind;
unsigned int sge_ind = 0;
unsigned int owner_bit; unsigned int owner_bit;
unsigned long flags; unsigned long flags;
unsigned int ind; unsigned int ind;
void *wqe = NULL; void *wqe = NULL;
int attr_mask;
u32 tmp_len; u32 tmp_len;
int ret = 0; int ret = 0;
u32 hr_op; u32 hr_op;
...@@ -292,14 +280,16 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -292,14 +280,16 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ibqp->qp_type != IB_QPT_UD) && ibqp->qp_type != IB_QPT_UD) &&
(ibqp->qp_type != IB_QPT_XRC_INI) && (ibqp->qp_type != IB_QPT_XRC_INI) &&
(ibqp->qp_type != IB_QPT_XRC_TGT)) { (ibqp->qp_type != IB_QPT_XRC_TGT)) {
dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type); dev_err(dev, "Not supported QP type, type-0x%x, qpn-0x%x!\n",
ibqp->qp_type, ibqp->qp_num);
*bad_wr = wr; *bad_wr = wr;
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT || if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
qp->state == IB_QPS_RTR)) { qp->state == IB_QPS_RTR)) {
dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state); dev_err(dev, "Post WQE fail, QP(0x%x) state %d err!\n",
ibqp->qp_num, qp->state);
*bad_wr = wr; *bad_wr = wr;
return -EINVAL; return -EINVAL;
} }
...@@ -314,6 +304,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -314,6 +304,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
ret = -ENOMEM; ret = -ENOMEM;
*bad_wr = wr; *bad_wr = wr;
dev_err(dev, "qp(0x%x): wq overflow, nreq=0x%x\n",
ibqp->qp_num, nreq);
goto out; goto out;
} }
...@@ -598,7 +590,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -598,7 +590,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ind++; ind++;
} else { } else {
dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type); dev_err(dev, "Illegal qp(0x%x) type:0x%x\n",
ibqp->qp_num, ibqp->qp_type);
spin_unlock_irqrestore(&qp->sq.lock, flags); spin_unlock_irqrestore(&qp->sq.lock, flags);
*bad_wr = wr; *bad_wr = wr;
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -629,18 +622,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -629,18 +622,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qp->sq_next_wqe = ind; qp->sq_next_wqe = ind;
qp->next_sge = sge_ind; qp->next_sge = sge_ind;
if (qp->state == IB_QPS_ERR) { if (qp->state == IB_QPS_ERR)
attr_mask = IB_QP_STATE; init_flush_work(hr_dev, qp, NULL, HNS_ROCE_SQ);
attr.qp_state = IB_QPS_ERR;
ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
qp->state, IB_QPS_ERR);
if (ret) {
spin_unlock_irqrestore(&qp->sq.lock, flags);
*bad_wr = wr;
return ret;
}
}
} }
rdfx_inc_sq_db_cnt(hr_dev, ibqp->qp_num); rdfx_inc_sq_db_cnt(hr_dev, ibqp->qp_num);
rdfx_put_rdfx_qp(hr_dev, ibqp->qp_num); rdfx_put_rdfx_qp(hr_dev, ibqp->qp_num);
...@@ -664,10 +647,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -664,10 +647,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct hns_roce_v2_wqe_data_seg *dseg; struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_rinl_sge *sge_list; struct hns_roce_rinl_sge *sge_list;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct ib_qp_attr attr;
unsigned long flags; unsigned long flags;
void *wqe = NULL; void *wqe = NULL;
int attr_mask;
int ret = 0; int ret = 0;
int nreq; int nreq;
int ind; int ind;
...@@ -694,7 +675,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -694,7 +675,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
} }
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", dev_err(dev, "rq:num_sge=%d > qp->rq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs); wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL; ret = -EINVAL;
*bad_wr = wr; *bad_wr = wr;
...@@ -742,19 +723,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -742,19 +723,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
*hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff; *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
if (hr_qp->state == IB_QPS_ERR) { if (hr_qp->state == IB_QPS_ERR)
attr_mask = IB_QP_STATE; init_flush_work(hr_dev, hr_qp, NULL, HNS_ROCE_RQ);
attr.qp_state = IB_QPS_ERR;
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
attr_mask, hr_qp->state,
IB_QPS_ERR);
if (ret) {
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
*bad_wr = wr;
return ret;
}
}
rdfx_inc_rq_db_cnt(hr_dev, hr_qp->qpn); rdfx_inc_rq_db_cnt(hr_dev, hr_qp->qpn);
} }
...@@ -1083,6 +1054,8 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, ...@@ -1083,6 +1054,8 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
spin_lock_bh(&csq->lock); spin_lock_bh(&csq->lock);
if (num > hns_roce_cmq_space(csq)) { if (num > hns_roce_cmq_space(csq)) {
dev_err(hr_dev->dev, "cmq num(%d) is out of space %p\n",
num, csq);
spin_unlock_bh(&csq->lock); spin_unlock_bh(&csq->lock);
return -EBUSY; return -EBUSY;
} }
...@@ -1187,8 +1160,10 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) ...@@ -1187,8 +1160,10 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true); hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
ret = hns_roce_cmq_send(hr_dev, &desc, 1); ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) if (ret) {
dev_warn(hr_dev->dev, "query hw version failed(%d)\n", ret);
return ret; return ret;
}
resp = (struct hns_roce_query_version *)desc.data; resp = (struct hns_roce_query_version *)desc.data;
hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version); hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
...@@ -1236,7 +1211,7 @@ static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval, ...@@ -1236,7 +1211,7 @@ static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
if (reset_cnt != hr_dev->reset_cnt) { if (reset_cnt != hr_dev->reset_cnt) {
hr_dev->dis_db = true; hr_dev->dis_db = true;
hr_dev->is_reset = true; hr_dev->is_reset = true;
dev_warn(hr_dev->dev, "Func clear success after reset.\n"); dev_info(hr_dev->dev, "Func clear success after reset.\n");
} else if (hw_resetting) { } else if (hw_resetting) {
hr_dev->dis_db = true; hr_dev->dis_db = true;
...@@ -1246,11 +1221,11 @@ static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval, ...@@ -1246,11 +1221,11 @@ static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
while (time_before(jiffies, end)) { while (time_before(jiffies, end)) {
if (!ops->get_hw_reset_stat(handle)) { if (!ops->get_hw_reset_stat(handle)) {
hr_dev->is_reset = true; hr_dev->is_reset = true;
dev_warn(hr_dev->dev, dev_info(hr_dev->dev,
"Func clear success after reset.\n"); "Func clear success after reset.\n");
return; return;
} }
msleep(20); msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
} }
dev_warn(hr_dev->dev, "Func clear failed.\n"); dev_warn(hr_dev->dev, "Func clear failed.\n");
...@@ -1264,14 +1239,14 @@ static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval, ...@@ -1264,14 +1239,14 @@ static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
if (ops->ae_dev_reset_cnt(handle) != if (ops->ae_dev_reset_cnt(handle) !=
hr_dev->reset_cnt) { hr_dev->reset_cnt) {
hr_dev->is_reset = true; hr_dev->is_reset = true;
dev_warn(hr_dev->dev, dev_info(hr_dev->dev,
"Func clear success after reset.\n"); "Func clear success after sw reset\n");
return; return;
} }
msleep(20); msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
} }
dev_warn(hr_dev->dev, "Func clear failed.\n"); dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
} else { } else {
if (retval && !flag) if (retval && !flag)
dev_warn(hr_dev->dev, dev_warn(hr_dev->dev,
...@@ -1283,9 +1258,9 @@ static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval, ...@@ -1283,9 +1258,9 @@ static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
static void hns_roce_query_func_num(struct hns_roce_dev *hr_dev) static void hns_roce_query_func_num(struct hns_roce_dev *hr_dev)
{ {
int ret = 0;
struct hns_roce_cmq_desc desc; struct hns_roce_cmq_desc desc;
struct hns_roce_pf_func_num *resp; struct hns_roce_pf_func_num *resp;
int ret;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_VF_NUM, true); hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_VF_NUM, true);
ret = hns_roce_cmq_send(hr_dev, &desc, 1); ret = hns_roce_cmq_send(hr_dev, &desc, 1);
...@@ -1325,7 +1300,7 @@ static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id) ...@@ -1325,7 +1300,7 @@ static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id)
end = msecs_to_jiffies(HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS) + jiffies; end = msecs_to_jiffies(HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS) + jiffies;
msleep(40); msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
while (time_before(jiffies, end)) { while (time_before(jiffies, end)) {
if (hns_roce_func_clr_chk_rst(hr_dev)) if (hns_roce_func_clr_chk_rst(hr_dev))
goto out; goto out;
...@@ -1336,7 +1311,7 @@ static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id) ...@@ -1336,7 +1311,7 @@ static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id)
ret = hns_roce_cmq_send(hr_dev, &desc, 1); ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) { if (ret) {
msleep(20); msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
continue; continue;
} }
...@@ -1355,9 +1330,9 @@ static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id) ...@@ -1355,9 +1330,9 @@ static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id)
static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
{ {
int i; int i;
int vf_num = 0;/*should be (hr_dev->func_num-1) when enable ROCE VF*/ int vf_num = 0;/* should be (hr_dev->func_num-1) when enable ROCE VF */
/* Clear vf first, then clear pf*/ /* Clear vf first, then clear pf */
for (i = vf_num; i >= 0; i--) for (i = vf_num; i >= 0; i--)
hns_roce_clear_func(hr_dev, i); hns_roce_clear_func(hr_dev, i);
} }
...@@ -1401,13 +1376,13 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev) ...@@ -1401,13 +1376,13 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_cmq_desc desc[2]; struct hns_roce_cmq_desc desc[QUERY_PF_RES_CMDQ_DESC_NUM];
struct hns_roce_pf_res_a *req_a; struct hns_roce_pf_res_a *req_a;
struct hns_roce_pf_res_b *req_b; struct hns_roce_pf_res_b *req_b;
int ret; int ret;
int i; int i;
for (i = 0; i < 2; i++) { for (i = 0; i < QUERY_PF_RES_CMDQ_DESC_NUM; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], hns_roce_cmq_setup_basic_desc(&desc[i],
HNS_ROCE_OPC_QUERY_PF_RES, true); HNS_ROCE_OPC_QUERY_PF_RES, true);
...@@ -1417,7 +1392,7 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) ...@@ -1417,7 +1392,7 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
} }
ret = hns_roce_cmq_send(hr_dev, desc, 2); ret = hns_roce_cmq_send(hr_dev, desc, QUERY_PF_RES_CMDQ_DESC_NUM);
if (ret) if (ret)
return ret; return ret;
...@@ -1449,12 +1424,12 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) ...@@ -1449,12 +1424,12 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev) static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_cmq_desc desc[2]; struct hns_roce_cmq_desc desc[QUERY_PF_TIMER_RES_CMDQ_DESC_NUM];
struct hns_roce_pf_timer_res_a *req_a; struct hns_roce_pf_timer_res_a *req_a;
int ret; int ret;
int i; int i;
for (i = 0; i < 2; i++) { for (i = 0; i < QUERY_PF_TIMER_RES_CMDQ_DESC_NUM; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], hns_roce_cmq_setup_basic_desc(&desc[i],
HNS_ROCE_OPC_QUERY_PF_TIMER_RES, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
true); true);
...@@ -1465,7 +1440,7 @@ static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev) ...@@ -1465,7 +1440,7 @@ static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
} }
ret = hns_roce_cmq_send(hr_dev, desc, 2); ret = hns_roce_cmq_send(hr_dev, desc, QUERY_PF_TIMER_RES_CMDQ_DESC_NUM);
if (ret) if (ret)
return ret; return ret;
...@@ -1512,7 +1487,7 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, ...@@ -1512,7 +1487,7 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_cmq_desc desc[2]; struct hns_roce_cmq_desc desc[ALLOC_VF_RES_CMDQ_DESC_NUM];
struct hns_roce_vf_res_a *req_a; struct hns_roce_vf_res_a *req_a;
struct hns_roce_vf_res_b *req_b; struct hns_roce_vf_res_b *req_b;
int d; int d;
...@@ -1523,7 +1498,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) ...@@ -1523,7 +1498,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
req_b = (struct hns_roce_vf_res_b *)desc[1].data; req_b = (struct hns_roce_vf_res_b *)desc[1].data;
memset(req_a, 0, sizeof(*req_a)); memset(req_a, 0, sizeof(*req_a));
memset(req_b, 0, sizeof(*req_b)); memset(req_b, 0, sizeof(*req_b));
for (i = 0; i < 2; i++) { for (i = 0; i < ALLOC_VF_RES_CMDQ_DESC_NUM; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], hns_roce_cmq_setup_basic_desc(&desc[i],
HNS_ROCE_OPC_ALLOC_VF_RES, false); HNS_ROCE_OPC_ALLOC_VF_RES, false);
...@@ -1607,7 +1582,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) ...@@ -1607,7 +1582,7 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
} }
} }
return hns_roce_cmq_send(hr_dev, desc, 2); return hns_roce_cmq_send(hr_dev, desc, ALLOC_VF_RES_CMDQ_DESC_NUM);
} }
static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
...@@ -1718,7 +1693,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1718,7 +1693,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
hns_roce_query_func_num(hr_dev); hns_roce_query_func_num(hr_dev);
if (hr_dev->pci_dev->revision == 0x21) { if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
ret = hns_roce_query_pf_timer_resource(hr_dev); ret = hns_roce_query_pf_timer_resource(hr_dev);
if (ret) { if (ret) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev,
...@@ -1735,7 +1710,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1735,7 +1710,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret; return ret;
} }
if (hr_dev->pci_dev->revision == 0x21) { if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
ret = hns_roce_set_vf_switch_param(hr_dev, 0); ret = hns_roce_set_vf_switch_param(hr_dev, 0);
if (ret) { if (ret) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev,
...@@ -1784,7 +1759,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1784,7 +1759,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ; caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ; caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
caps->idx_entry_sz = 4; caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE; caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
caps->reserved_lkey = 0; caps->reserved_lkey = 0;
...@@ -1808,7 +1783,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1808,7 +1783,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->mpt_ba_pg_sz = 0; caps->mpt_ba_pg_sz = 0;
caps->mpt_buf_pg_sz = 0; caps->mpt_buf_pg_sz = 0;
caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
caps->pbl_ba_pg_sz = 2; caps->pbl_ba_pg_sz = HNS_ROCE_MEM_PAGE_SUPPORT_8K;
caps->pbl_buf_pg_sz = 0; caps->pbl_buf_pg_sz = 0;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->mtt_ba_pg_sz = 0; caps->mtt_ba_pg_sz = 0;
...@@ -1844,7 +1819,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1844,7 +1819,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR; caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE; caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
if (hr_dev->pci_dev->revision == 0x21) { if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
caps->flags |= (HNS_ROCE_CAP_FLAG_XRC | HNS_ROCE_CAP_FLAG_SRQ | caps->flags |= (HNS_ROCE_CAP_FLAG_XRC | HNS_ROCE_CAP_FLAG_SRQ |
HNS_ROCE_CAP_FLAG_MW | HNS_ROCE_CAP_FLAG_MW |
HNS_ROCE_CAP_FLAG_FRMR | HNS_ROCE_CAP_FLAG_FRMR |
...@@ -1880,7 +1855,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1880,7 +1855,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
enum hns_roce_link_table_type type) enum hns_roce_link_table_type type)
{ {
struct hns_roce_cmq_desc desc[2]; struct hns_roce_cmq_desc desc[CONFIG_LLM_CMDQ_DESC_NUM];
struct hns_roce_cfg_llm_a *req_a = struct hns_roce_cfg_llm_a *req_a =
(struct hns_roce_cfg_llm_a *)desc[0].data; (struct hns_roce_cfg_llm_a *)desc[0].data;
struct hns_roce_cfg_llm_b *req_b = struct hns_roce_cfg_llm_b *req_b =
...@@ -1902,6 +1877,8 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, ...@@ -1902,6 +1877,8 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM; opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
break; break;
default: default:
dev_err(hr_dev->dev, "Not supported link table type: 0x%x!\n",
type);
return -EINVAL; return -EINVAL;
} }
...@@ -1910,7 +1887,7 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, ...@@ -1910,7 +1887,7 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
memset(req_a, 0, sizeof(*req_a)); memset(req_a, 0, sizeof(*req_a));
memset(req_b, 0, sizeof(*req_b)); memset(req_b, 0, sizeof(*req_b));
for (i = 0; i < 2; i++) { for (i = 0; i < CONFIG_LLM_CMDQ_DESC_NUM; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false); hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
if (i == 0) if (i == 0)
...@@ -1942,6 +1919,7 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, ...@@ -1942,6 +1919,7 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
CFG_LLM_TAIL_BA_H_S, CFG_LLM_TAIL_BA_H_S,
entry[page_num - 1].blk_ba1_nxt_ptr & entry[page_num - 1].blk_ba1_nxt_ptr &
HNS_ROCE_LINK_TABLE_BA1_M); HNS_ROCE_LINK_TABLE_BA1_M);
/* (page_num - 2) indicates to the second last page */
roce_set_field(req_b->tail_ptr, roce_set_field(req_b->tail_ptr,
CFG_LLM_TAIL_PTR_M, CFG_LLM_TAIL_PTR_M,
CFG_LLM_TAIL_PTR_S, CFG_LLM_TAIL_PTR_S,
...@@ -1953,7 +1931,7 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, ...@@ -1953,7 +1931,7 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
roce_set_field(req_a->depth_pgsz_init_en, roce_set_field(req_a->depth_pgsz_init_en,
CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1); CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
return hns_roce_cmq_send(hr_dev, desc, 2); return hns_roce_cmq_send(hr_dev, desc, CONFIG_LLM_CMDQ_DESC_NUM);
} }
static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev, static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
...@@ -1976,16 +1954,23 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev, ...@@ -1976,16 +1954,23 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
case TSQ_LINK_TABLE: case TSQ_LINK_TABLE:
link_tbl = &priv->tsq; link_tbl = &priv->tsq;
buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT); buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz; pg_num_a = hr_dev->caps.num_qps * QP_EX_DB_SIZE / buf_chk_sz;
/*
* every transport service queue(tsq) need 2 page and reserved 1
* page, it includes tx queue and rx queue.
*/
pg_num_b = hr_dev->caps.sl_num * 4 + 2; pg_num_b = hr_dev->caps.sl_num * 4 + 2;
break; break;
case TPQ_LINK_TABLE: case TPQ_LINK_TABLE:
link_tbl = &priv->tpq; link_tbl = &priv->tpq;
buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT); buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz; pg_num_a = hr_dev->caps.num_cqs * CQ_EX_DB_SIZE / buf_chk_sz;
pg_num_b = 2 * 4 * func_num + 2; /* every function need 2 page and reserved 2 page */
pg_num_b = 2 * TIMEOUT_POLL_QUEUE_NUM * func_num + 2;
break; break;
default: default:
dev_err(hr_dev->dev, "Not supported link table type: 0x%x\n",
type);
return -EINVAL; return -EINVAL;
} }
...@@ -1998,6 +1983,8 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev, ...@@ -1998,6 +1983,8 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
if (!link_tbl->table.buf) if (!link_tbl->table.buf)
goto out; goto out;
memset(link_tbl->table.buf, 0, size);
link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list), link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
GFP_KERNEL); GFP_KERNEL);
if (!link_tbl->pg_list) if (!link_tbl->pg_list)
...@@ -2071,11 +2058,13 @@ static int hns_roce_v2_uar_init(struct hns_roce_dev *hr_dev) ...@@ -2071,11 +2058,13 @@ static int hns_roce_v2_uar_init(struct hns_roce_dev *hr_dev)
struct hns_roce_buf_list *uar = &priv->uar; struct hns_roce_buf_list *uar = &priv->uar;
struct device *dev = &hr_dev->pci_dev->dev; struct device *dev = &hr_dev->pci_dev->dev;
uar->buf = dma_zalloc_coherent(dev, HNS_ROCE_V2_UAR_BUF_SIZE, &uar->map, uar->buf = dma_alloc_coherent(dev, HNS_ROCE_V2_UAR_BUF_SIZE, &uar->map,
GFP_KERNEL); GFP_KERNEL);
if (!uar->buf) if (!uar->buf)
return -ENOMEM; return -ENOMEM;
memset(uar->buf, 0, HNS_ROCE_V2_UAR_BUF_SIZE);
hr_dev->uar2_dma_addr = uar->map; hr_dev->uar2_dma_addr = uar->map;
hr_dev->uar2_size = HNS_ROCE_V2_UAR_BUF_SIZE; hr_dev->uar2_size = HNS_ROCE_V2_UAR_BUF_SIZE;
...@@ -2118,7 +2107,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) ...@@ -2118,7 +2107,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
goto err_tpq_init_failed; goto err_tpq_init_failed;
} }
/* Alloc memory for QPC Timer buffer space chunk*/ /* Alloc memory for QPC Timer buffer space chunk */
for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num; for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
qpc_count++) { qpc_count++) {
ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table.table, ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table.table,
...@@ -2129,7 +2118,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) ...@@ -2129,7 +2118,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
} }
} }
/* Alloc memory for CQC Timer buffer space chunk*/ /* Alloc memory for CQC Timer buffer space chunk */
for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num; for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
cqc_count++) { cqc_count++) {
ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table.table, ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table.table,
...@@ -2165,7 +2154,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) ...@@ -2165,7 +2154,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_priv *priv = hr_dev->priv;
if (hr_dev->pci_dev->revision == 0x21) if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B)
hns_roce_function_clear(hr_dev); hns_roce_function_clear(hr_dev);
hns_roce_free_link_table(hr_dev, &priv->tpq); hns_roce_free_link_table(hr_dev, &priv->tpq);
...@@ -2209,6 +2198,11 @@ static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -2209,6 +2198,11 @@ static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
{ {
struct hns_roce_cmq_desc desc; struct hns_roce_cmq_desc desc;
struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data; struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
struct hns_roce_qp *qp;
unsigned long sq_flags;
unsigned long rq_flags;
int to_be_err_state = false;
int ret;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false); hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
...@@ -2216,10 +2210,26 @@ static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -2216,10 +2210,26 @@ static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
mb->in_param_h = cpu_to_le64(in_param) >> 32; mb->in_param_h = cpu_to_le64(in_param) >> 32;
mb->out_param_l = cpu_to_le64(out_param); mb->out_param_l = cpu_to_le64(out_param);
mb->out_param_h = cpu_to_le64(out_param) >> 32; mb->out_param_h = cpu_to_le64(out_param) >> 32;
mb->cmd_tag = in_modifier << 8 | op; mb->cmd_tag = in_modifier << HNS_ROCE_MB_TAG_S | op;
mb->token_event_en = event << 16 | token; mb->token_event_en = event << HNS_ROCE_MB_EVENT_EN_S | token;
return hns_roce_cmq_send(hr_dev, &desc, 1); qp = __hns_roce_qp_lookup(hr_dev, in_modifier);
if (qp && !qp->ibqp.uobject &&
(qp->attr_mask & IB_QP_STATE) && qp->next_state == IB_QPS_ERR) {
spin_lock_irqsave(&qp->sq.lock, sq_flags);
spin_lock_irqsave(&qp->rq.lock, rq_flags);
to_be_err_state = true;
}
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (to_be_err_state) {
spin_unlock_irqrestore(&qp->rq.lock, rq_flags);
spin_unlock_irqrestore(&qp->sq.lock, sq_flags);
}
return ret;
} }
static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
...@@ -2252,7 +2262,7 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev, ...@@ -2252,7 +2262,7 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
unsigned long timeout) unsigned long timeout)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
unsigned long end = 0; unsigned long end;
u32 status; u32 status;
end = msecs_to_jiffies(timeout) + jiffies; end = msecs_to_jiffies(timeout) + jiffies;
...@@ -2265,7 +2275,7 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev, ...@@ -2265,7 +2275,7 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
} }
status = hns_roce_v2_cmd_complete(hr_dev); status = hns_roce_v2_cmd_complete(hr_dev);
if (status != 0x1) { if (unlikely(status != HNS_ROCE_CMD_SUCCESS)) {
if (status == CMD_RST_PRC_EBUSY) if (status == CMD_RST_PRC_EBUSY)
return status; return status;
...@@ -2303,7 +2313,7 @@ static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev, ...@@ -2303,7 +2313,7 @@ static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
p = (u32 *)&gid->raw[8]; p = (u32 *)&gid->raw[8];
sgid_tb->vf_sgid_mh = cpu_to_le32(*p); sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
p = (u32 *)&gid->raw[0xc]; p = (u32 *)&gid->raw[12];
sgid_tb->vf_sgid_h = cpu_to_le32(*p); sgid_tb->vf_sgid_h = cpu_to_le32(*p);
return hns_roce_cmq_send(hr_dev, &desc, 1); return hns_roce_cmq_send(hr_dev, &desc, 1);
...@@ -2319,7 +2329,7 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port, ...@@ -2319,7 +2329,7 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
const struct ib_gid_attr *attr) const struct ib_gid_attr *attr)
#endif #endif
{ {
enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1; enum hns_roce_sgid_type sgid_type;
int ret; int ret;
if (!gid || !attr) if (!gid || !attr)
...@@ -2374,9 +2384,10 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, ...@@ -2374,9 +2384,10 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
struct scatterlist *sg; struct scatterlist *sg;
u64 page_addr; u64 page_addr;
u64 *pages; u64 *pages;
int i, j;
int len;
int entry; int entry;
int len;
int i;
int j;
mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3)); mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
...@@ -2395,7 +2406,7 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, ...@@ -2395,7 +2406,7 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
page_addr = sg_dma_address(sg) + page_addr = sg_dma_address(sg) +
(j << mr->umem->page_shift); (j << mr->umem->page_shift);
pages[i] = page_addr >> 6; pages[i] = page_addr >> 6;
/* Record the first 2 entry directly to MTPT table*/ /* Record the first 2 entry directly to MTPT table */
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1) if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
goto found; goto found;
i++; i++;
...@@ -2441,7 +2452,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, ...@@ -2441,7 +2452,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
V2_MPT_BYTE_4_PD_S, mr->pd); V2_MPT_BYTE_4_PD_S, mr->pd);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S, roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
(mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
...@@ -2634,8 +2645,8 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index) ...@@ -2634,8 +2645,8 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
/* always called with interrupts disabled. */ /* always called with interrupts disabled. */
spin_lock(&srq->lock); spin_lock(&srq->lock);
bitmap_num = wqe_index / (sizeof(u64) * 8); bitmap_num = wqe_index / BITS_PER_LONG_LONG;
bit_num = wqe_index % (sizeof(u64) * 8); bit_num = wqe_index % BITS_PER_LONG_LONG;
srq->idx_que.bitmap[bitmap_num] |= (1ULL << bit_num); srq->idx_que.bitmap[bitmap_num] |= (1ULL << bit_num);
srq->tail++; srq->tail++;
...@@ -2804,8 +2815,8 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, ...@@ -2804,8 +2815,8 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL; V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
/* /*
* flags = 0; Notification Flag = 1, next * flags is 0; Notification Flag is 1, next
* flags = 1; Notification Flag = 0, solocited * flags is 1; Notification Flag is 0, solocited
*/ */
roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S, roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
hr_cq->cqn); hr_cq->cqn);
...@@ -2829,9 +2840,13 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe, ...@@ -2829,9 +2840,13 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
struct ib_wc *wc) struct ib_wc *wc)
{ {
struct hns_roce_rinl_sge *sge_list; struct hns_roce_rinl_sge *sge_list;
u32 wr_num, wr_cnt, sge_num;
u32 sge_cnt, data_len, size;
void *wqe_buf; void *wqe_buf;
u32 data_len;
u32 sge_num;
u32 sge_cnt;
u32 wr_num;
u32 wr_cnt;
u32 size;
wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M, wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff; V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
...@@ -2861,13 +2876,11 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe, ...@@ -2861,13 +2876,11 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc) struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
struct hns_roce_srq *srq = NULL; struct hns_roce_srq *srq = NULL;
struct hns_roce_dev *hr_dev;
struct hns_roce_v2_cqe *cqe; struct hns_roce_v2_cqe *cqe;
struct hns_roce_qp *hr_qp; struct hns_roce_qp *hr_qp;
struct hns_roce_wq *wq; struct hns_roce_wq *wq;
struct ib_qp_attr attr;
int attr_mask;
int is_send; int is_send;
u16 wqe_ctr; u16 wqe_ctr;
u32 opcode; u32 opcode;
...@@ -2891,7 +2904,6 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, ...@@ -2891,7 +2904,6 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
V2_CQE_BYTE_16_LCL_QPN_S); V2_CQE_BYTE_16_LCL_QPN_S);
if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) { if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
hr_dev = to_hr_dev(hr_cq->ib_cq.device);
hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (unlikely(!hr_qp)) { if (unlikely(!hr_qp)) {
dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n", dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
...@@ -2986,13 +2998,12 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, ...@@ -2986,13 +2998,12 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
} }
/* flush cqe if wc status is error, excluding flush error */ /* flush cqe if wc status is error, excluding flush error */
if ((wc->status != IB_WC_SUCCESS) && if (wc->status != IB_WC_SUCCESS &&
(wc->status != IB_WC_WR_FLUSH_ERR)) { wc->status != IB_WC_WR_FLUSH_ERR) {
attr_mask = IB_QP_STATE; dev_err(hr_dev->dev, "error cqe status is: 0x%x\n",
attr.qp_state = IB_QPS_ERR; status & HNS_ROCE_V2_CQE_STATUS_MASK);
return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp, init_flush_work(hr_dev, *cur_qp, hr_cq, HNS_ROCE_CQ);
&attr, attr_mask, return 0;
(*cur_qp)->state, IB_QPS_ERR);
} }
if (wc->status == IB_WC_WR_FLUSH_ERR) if (wc->status == IB_WC_WR_FLUSH_ERR)
...@@ -3110,7 +3121,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, ...@@ -3110,7 +3121,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->port_num = roce_get_field(cqe->byte_32, wc->port_num = roce_get_field(cqe->byte_32,
V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S); V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
wc->pkey_index = 0; wc->pkey_index = 0;
memcpy(wc->smac, cqe->smac, 4); memcpy(wc->smac, cqe->smac, sizeof(u32));
wc->smac[4] = roce_get_field(cqe->byte_28, wc->smac[4] = roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_SMAC_4_M, V2_CQE_BYTE_28_SMAC_4_M,
V2_CQE_BYTE_28_SMAC_4_S); V2_CQE_BYTE_28_SMAC_4_S);
...@@ -3222,7 +3233,6 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, ...@@ -3222,7 +3233,6 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_mhop mhop; struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem; struct hns_roce_hem *hem;
unsigned long mhop_obj = obj; unsigned long mhop_obj = obj;
int i, j, k;
int ret = 0; int ret = 0;
u64 hem_idx = 0; u64 hem_idx = 0;
u64 l1_idx = 0; u64 l1_idx = 0;
...@@ -3230,6 +3240,9 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, ...@@ -3230,6 +3240,9 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
u32 chunk_ba_num; u32 chunk_ba_num;
u32 hop_num; u32 hop_num;
int op; int op;
int i;
int j;
int k;
if (!hns_roce_check_whether_mhop(hr_dev, table->type)) if (!hns_roce_check_whether_mhop(hr_dev, table->type))
return 0; return 0;
...@@ -3239,7 +3252,7 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, ...@@ -3239,7 +3252,7 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
j = mhop.l1_idx; j = mhop.l1_idx;
k = mhop.l2_idx; k = mhop.l2_idx;
hop_num = mhop.hop_num; hop_num = mhop.hop_num;
chunk_ba_num = mhop.bt_chunk_size / 8; chunk_ba_num = mhop.bt_chunk_size / BA_BYTE_LEN;
if (hop_num == 2) { if (hop_num == 2) {
hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num + hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
...@@ -3259,6 +3272,9 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, ...@@ -3259,6 +3272,9 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
if (table->type == HEM_TYPE_SCC_CTX)
obj = mhop.l0_idx;
if (check_whether_last_step(hop_num, step_idx)) { if (check_whether_last_step(hop_num, step_idx)) {
hem = table->hem[hem_idx]; hem = table->hem[hem_idx];
for (hns_roce_hem_first(hem, &iter); for (hns_roce_hem_first(hem, &iter);
...@@ -3285,17 +3301,16 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, ...@@ -3285,17 +3301,16 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, static void hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, struct hns_roce_hem_table *table, int obj,
int step_idx) int step_idx)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
int ret = 0;
u16 op = 0xff; u16 op = 0xff;
if (!hns_roce_check_whether_mhop(hr_dev, table->type)) if (!hns_roce_check_whether_mhop(hr_dev, table->type))
return 0; return;
switch (table->type) { switch (table->type) {
case HEM_TYPE_QPC: case HEM_TYPE_QPC:
...@@ -3311,27 +3326,28 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, ...@@ -3311,27 +3326,28 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
case HEM_TYPE_QPC_TIMER: case HEM_TYPE_QPC_TIMER:
case HEM_TYPE_CQC_TIMER: case HEM_TYPE_CQC_TIMER:
/* there is no need to destroy these ctx */ /* there is no need to destroy these ctx */
return 0; return;
case HEM_TYPE_SRQC: case HEM_TYPE_SRQC:
op = HNS_ROCE_CMD_DESTROY_SRQC_BT0; op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
break; break;
default: default:
dev_warn(dev, "Table %d not to be destroyed by mailbox!\n", dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
table->type); table->type);
return 0; return;
} }
op += step_idx; op += step_idx;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return;
/* configure the tag and op */ /* configure the tag and op */
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op, if (hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS))
dev_warn(dev, "Failed to clear HEM.\n");
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret; return;
} }
static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev, static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
...@@ -3348,6 +3364,10 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev, ...@@ -3348,6 +3364,10 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
/*
* The context include qp context and qp mask context,
* it needs to be guaranteed to be continuous
*/
memcpy(mailbox->buf, context, sizeof(*context) * 2); memcpy(mailbox->buf, context, sizeof(*context) * 2);
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
...@@ -3469,7 +3489,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -3469,7 +3489,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(context->byte_4_sqpn_tst, roce_set_field(context->byte_4_sqpn_tst,
V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, V2_QPC_BYTE_4_SGE_SHIFT_S,
hr_qp->sq.max_gs > 2 ? hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
...@@ -3559,6 +3579,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, ...@@ -3559,6 +3579,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0); V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
} }
/*
* 63 bits of the record db address are needed for hardware:
* first, right shift 1 bit and fill the low 31 bits into the low
* positions of rq_db_record_address field; then right shift 32 bits
* and fill the high 32 bits into the high positions of
* rq_db_record_address
*/
roce_set_field(context->byte_68_rq_db, roce_set_field(context->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
...@@ -3811,7 +3838,9 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, ...@@ -3811,7 +3838,9 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
else else
roce_set_field(context->byte_4_sqpn_tst, roce_set_field(context->byte_4_sqpn_tst,
V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ? V2_QPC_BYTE_4_SGE_SHIFT_S,
hr_qp->sq.max_gs >
HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
...@@ -3918,7 +3947,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3918,7 +3947,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table, mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
hr_qp->mtt.first_seg, &dma_handle); hr_qp->mtt.first_seg, &dma_handle);
if (!mtts) { if (!mtts) {
dev_err(dev, "qp buf pa find failed\n"); dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn);
return -EINVAL; return -EINVAL;
} }
...@@ -3926,7 +3955,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3926,7 +3955,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
hr_qp->qpn, &dma_handle_2); hr_qp->qpn, &dma_handle_2);
if (!mtts_2) { if (!mtts_2) {
dev_err(dev, "qp irrl_table find failed\n"); dev_err(dev, "qp(0x%lx) irrl_table find failed\n", hr_qp->qpn);
return -EINVAL; return -EINVAL;
} }
...@@ -3934,7 +3963,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3934,7 +3963,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table, mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
hr_qp->qpn, &dma_handle_3); hr_qp->qpn, &dma_handle_3);
if (!mtts_3) { if (!mtts_3) {
dev_err(dev, "qp trrl_table find failed\n"); dev_err(dev, "qp(0x%lx) trrl_table find failed\n", hr_qp->qpn);
return -EINVAL; return -EINVAL;
} }
...@@ -3969,7 +3998,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3969,7 +3998,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_20_SGE_HOP_NUM_M, V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S, V2_QPC_BYTE_20_SGE_HOP_NUM_S,
(((ibqp->qp_type == IB_QPT_GSI) || (((ibqp->qp_type == IB_QPT_GSI) ||
ibqp->qp_type == IB_QPT_UD) || hr_qp->sq.max_gs > 2) ? ibqp->qp_type == IB_QPT_UD) ||
hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
hr_dev->caps.mtt_hop_num : 0); hr_dev->caps.mtt_hop_num : 0);
roce_set_field(qpc_mask->byte_20_smac_sgid_idx, roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M, V2_QPC_BYTE_20_SGE_HOP_NUM_M,
...@@ -4081,13 +4111,14 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -4081,13 +4111,14 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_20_smac_sgid_idx, roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S, 0); V2_QPC_BYTE_20_SGID_IDX_S, 0);
memcpy(&(context->dmac), dmac, 4); memcpy(&(context->dmac), dmac, sizeof(u32));
roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M, roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4]))); V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
qpc_mask->dmac = 0; qpc_mask->dmac = 0;
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M, roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, 0); V2_QPC_BYTE_52_DMAC_S, 0);
/* mtu*(2^LP_PKTN_INI) should not bigger then 1 message length 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 4); V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
...@@ -4129,6 +4160,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -4129,6 +4160,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M, roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0); V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
/* rocee send 2^lp_sgen_ini segs every time */
roce_set_field(context->byte_168_irrl_idx, roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_LP_SGEN_INI_M, V2_QPC_BYTE_168_LP_SGEN_INI_M,
V2_QPC_BYTE_168_LP_SGEN_INI_S, 3); V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
...@@ -4155,7 +4187,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -4155,7 +4187,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table, mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
hr_qp->mtt.first_seg, &dma_handle); hr_qp->mtt.first_seg, &dma_handle);
if (!mtts) { if (!mtts) {
dev_err(dev, "qp buf pa find failed\n"); dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn);
return -EINVAL; return -EINVAL;
} }
...@@ -4185,14 +4217,16 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -4185,14 +4217,16 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI || context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI ||
ibqp->qp_type == IB_QPT_UD) || ibqp->qp_type == IB_QPT_UD) ||
hr_qp->sq.max_gs > 2) ? hr_qp->sq.max_gs >
HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
((u32)(mtts[hr_qp->sge.offset / page_size] ((u32)(mtts[hr_qp->sge.offset / page_size]
>> PAGE_ADDR_SHIFT)) : 0; >> PAGE_ADDR_SHIFT)) : 0;
roce_set_field(context->byte_184_irrl_idx, roce_set_field(context->byte_184_irrl_idx,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
((ibqp->qp_type == IB_QPT_GSI || ((ibqp->qp_type == IB_QPT_GSI ||
ibqp->qp_type == IB_QPT_UD) || hr_qp->sq.max_gs > 2) ? ibqp->qp_type == IB_QPT_UD) ||
hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
(mtts[hr_qp->sge.offset / page_size] >> (mtts[hr_qp->sge.offset / page_size] >>
(32 + PAGE_ADDR_SHIFT)) : 0); (32 + PAGE_ADDR_SHIFT)) : 0);
qpc_mask->sq_cur_sge_blk_addr = 0; qpc_mask->sq_cur_sge_blk_addr = 0;
...@@ -4327,7 +4361,9 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, ...@@ -4327,7 +4361,9 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
} }
if (status) { if (status) {
dev_err(hr_dev->dev, "get gid during modifing QP failed\n"); dev_err(hr_dev->dev,
"get gid during modifing QP(0x%x) failed, status %d\n",
ibqp->qp_num, status);
return -EAGAIN; return -EAGAIN;
} }
#endif #endif
...@@ -4377,10 +4413,10 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, ...@@ -4377,10 +4413,10 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0); V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
#ifdef CONFIG_KERNEL_419 #ifdef CONFIG_KERNEL_419
if (hr_dev->pci_dev->revision == 0x21 && if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B &&
gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
#else #else
if (hr_dev->pci_dev->revision == 0x21 && if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B &&
gid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) gid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
#endif #endif
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
...@@ -4437,7 +4473,8 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, ...@@ -4437,7 +4473,8 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
/* Nothing */ /* Nothing */
; ;
} else { } else {
dev_err(hr_dev->dev, "Illegal state for QP!\n"); dev_err(hr_dev->dev, "Illegal state for QP(0x%x),cur state-%d, new_state-%d!\n",
ibqp->qp_num, cur_state, new_state);
ret = -EAGAIN; ret = -EAGAIN;
goto out; goto out;
} }
...@@ -4537,7 +4574,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, ...@@ -4537,7 +4574,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
roce_set_field(context->byte_224_retry_msg, roce_set_field(context->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_PSN_M, V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_PSN_S, V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
attr->sq_psn >> 16); attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
roce_set_field(qpc_mask->byte_224_retry_msg, roce_set_field(qpc_mask->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_PSN_M, V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0); V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
...@@ -4639,25 +4676,34 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -4639,25 +4676,34 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_qp_context *context; struct hns_roce_v2_qp_context cmd_qpc[2];
struct hns_roce_v2_qp_context *qpc_mask; struct hns_roce_v2_qp_context *context = &cmd_qpc[0];
struct hns_roce_v2_qp_context *qpc_mask = &cmd_qpc[1];
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret = -EINVAL; int ret;
context = kcalloc(2, sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
qpc_mask = context + 1;
/* /*
* In v2 engine, software pass context and context mask to hardware * In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context, * when modifying qp. If software need modify some fields in context,
* we should set all bits of the relevant fields in context mask to * we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1. * 0 at the same time, else set them to 0x1.
*/ */
memset(context, 0, sizeof(*context));
memset(qpc_mask, 0xff, sizeof(*qpc_mask)); memset(qpc_mask, 0xff, sizeof(*qpc_mask));
/* Configure the mandatory fields */
ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state, ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
new_state, context, qpc_mask); new_state, context, qpc_mask);
if (ret) {
dev_err(dev, "set fields for modify qp(0x%x) from state %d to state %d failed, ret = %d\n",
ibqp->qp_num, to_hns_roce_qp_st(cur_state),
to_hns_roce_qp_st(new_state), ret);
goto out;
}
/* Configure the optional fields */
ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret) if (ret)
goto out; goto out;
...@@ -4682,12 +4728,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -4682,12 +4728,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
} }
} }
/* Configure the optional fields */
ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S, roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
((ibqp->srq || ((ibqp->srq ||
(to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC)) ? (to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC)) ?
...@@ -4707,7 +4747,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -4707,7 +4747,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
to_hns_roce_qp_st(new_state), to_hns_roce_qp_st(new_state),
context, hr_qp); context, hr_qp);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret); dev_err(dev, "modify qp(0x%x) from state %d to state %d failed, ret = %d\n",
ibqp->qp_num, to_hns_roce_qp_st(cur_state),
to_hns_roce_qp_st(new_state), ret);
goto out; goto out;
} }
...@@ -4737,7 +4779,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -4737,7 +4779,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
rdfx_set_qp_attr(hr_dev, hr_qp, attr, attr_mask, new_state); rdfx_set_qp_attr(hr_dev, hr_qp, attr, attr_mask, new_state);
out: out:
kfree(context);
return ret; return ret;
} }
...@@ -4798,7 +4839,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ...@@ -4798,7 +4839,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context); ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
if (ret) { if (ret) {
dev_err(dev, "query qpc error\n"); dev_err(dev, "query qpc(0x%x) error, ret = %d\n",
ibqp->qp_num, ret);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -4831,11 +4873,12 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ...@@ -4831,11 +4873,12 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S); V2_QPC_BYTE_56_DQPN_S);
qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en, qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RRE_S)) << 2) | V2_QPC_BYTE_76_RRE_S)) << V2_QP_RWE_S) |
((roce_get_bit(context->byte_76_srqn_op_en, ((roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RWE_S)) << 1) | V2_QPC_BYTE_76_RWE_S)) << V2_QP_RRE_S) |
((roce_get_bit(context->byte_76_srqn_op_en, ((roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_ATE_S)) << 3); V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
if (hr_qp->ibqp.qp_type == IB_QPT_RC || if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
hr_qp->ibqp.qp_type == IB_QPT_UC) { hr_qp->ibqp.qp_type == IB_QPT_UC) {
struct ib_global_route *grh = struct ib_global_route *grh =
...@@ -4914,7 +4957,9 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ...@@ -4914,7 +4957,9 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET); hr_qp->state, IB_QPS_RESET);
if (ret) { if (ret) {
dev_err(dev, "modify QP to Reset failed.\n"); dev_err(dev,
"modify QP %06lx to Reset failed, ret = %d.\n",
hr_qp->qpn, ret);
return ret; return ret;
} }
} }
...@@ -4989,10 +5034,15 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp) ...@@ -4989,10 +5034,15 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
return ret; return ret;
} }
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
kfree(hr_to_hr_sqp(hr_qp)); kfree(hr_to_hr_sqp(hr_qp));
else } else {
flush_workqueue(hr_qp->rq.workq);
destroy_workqueue(hr_qp->rq.workq);
flush_workqueue(hr_qp->sq.workq);
destroy_workqueue(hr_qp->sq.workq);
kfree(hr_qp); kfree(hr_qp);
}
return 0; return 0;
} }
...@@ -5088,45 +5138,12 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) ...@@ -5088,45 +5138,12 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) if (ret)
dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n"); dev_err(hr_dev->dev, "MODIFY CQ(0x%lx) cmd process error.\n",
hr_cq->cqn);
return ret; return ret;
} }
static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
{
struct hns_roce_qp *qp;
struct ib_qp_attr attr;
int attr_mask;
int ret;
qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (!qp) {
dev_warn(hr_dev->dev, "no qp can be found!\n");
return;
}
if (qp->ibqp.pd->uobject) {
if (qp->sdb_en == 1) {
qp->sq.head = *(int *)(qp->sdb.virt_addr);
if (qp->rdb_en == 1)
qp->rq.head = *(int *)(qp->rdb.virt_addr);
} else {
dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
return;
}
}
attr_mask = IB_QP_STATE;
attr.qp_state = IB_QPS_ERR;
ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
qp->state, IB_QPS_ERR);
if (ret)
dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
qpn);
}
static void hns_roce_irq_work_handle(struct work_struct *work) static void hns_roce_irq_work_handle(struct work_struct *work)
{ {
struct hns_roce_work *irq_work = struct hns_roce_work *irq_work =
...@@ -5149,18 +5166,16 @@ static void hns_roce_irq_work_handle(struct work_struct *work) ...@@ -5149,18 +5166,16 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
dev_warn(dev, "Send queue drained.\n"); dev_warn(dev, "Send queue drained.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
dev_err(dev, "Local work queue catastrophic error, sub_event type is: %d\n", dev_err(dev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
irq_work->sub_type); qpn, irq_work->sub_type);
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
break; break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
dev_err(dev, "Invalid request local work queue error.\n"); dev_err(dev, "Invalid request local work queue 0x%x error.\n",
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); qpn);
break; break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
dev_err(dev, "Local access violation work queue error, sub_event type is: %d\n", dev_err(dev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
irq_work->sub_type); qpn, irq_work->sub_type);
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
break; break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
dev_warn(dev, "SRQ limit reach.\n"); dev_warn(dev, "SRQ limit reach.\n");
...@@ -5291,6 +5306,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, ...@@ -5291,6 +5306,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
int aeqe_found = 0; int aeqe_found = 0;
int event_type; int event_type;
int sub_type; int sub_type;
u32 ci_max;
u32 srqn; u32 srqn;
u32 qpn; u32 qpn;
u32 cqn; u32 cqn;
...@@ -5360,8 +5376,9 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, ...@@ -5360,8 +5376,9 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index; ++eq->cons_index;
aeqe_found = 1; aeqe_found = 1;
if (eq->cons_index > (2 * eq->entries - 1)) { ci_max = 2 * eq->entries - 1;
dev_warn(dev, "cons_index overflow, set back to 0.\n"); if (eq->cons_index > ci_max) {
dev_info(dev, "aeq cons_index overflow, set back to 0.\n");
eq->cons_index = 0; eq->cons_index = 0;
} }
hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn); hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
...@@ -5419,6 +5436,7 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, ...@@ -5419,6 +5436,7 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_ceqe *ceqe; struct hns_roce_ceqe *ceqe;
int ceqe_found = 0; int ceqe_found = 0;
u32 ci_max;
u32 cqn; u32 cqn;
while ((ceqe = next_ceqe_sw_v2(eq))) { while ((ceqe = next_ceqe_sw_v2(eq))) {
...@@ -5437,8 +5455,9 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, ...@@ -5437,8 +5455,9 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index; ++eq->cons_index;
ceqe_found = 1; ceqe_found = 1;
if (eq->cons_index > (2 * eq->entries - 1)) { ci_max = 2 * eq->entries - 1;
dev_warn(dev, "cons_index overflow, set back to 0.\n"); if (eq->cons_index > ci_max) {
dev_info(dev, "ceq cons_index overflow, set back to 0.\n");
eq->cons_index = 0; eq->cons_index = 0;
} }
...@@ -5571,7 +5590,8 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) ...@@ -5571,7 +5590,8 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
} }
if (ret) if (ret)
dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); dev_err(dev, "[mailbox cmd] destroy eqc(0x%x) failed(%d).\n",
eqn, ret);
} }
static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev, static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
...@@ -5591,14 +5611,12 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev, ...@@ -5591,14 +5611,12 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT); bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
/* hop_num = 0 */
if (mhop_num == HNS_ROCE_HOP_NUM_0) { if (mhop_num == HNS_ROCE_HOP_NUM_0) {
dma_free_coherent(dev, (unsigned int)(eq->entries * dma_free_coherent(dev, (unsigned int)(eq->entries *
eq->eqe_size), eq->bt_l0, eq->l0_dma); eq->eqe_size), eq->bt_l0, eq->l0_dma);
return; return;
} }
/* hop_num = 1 or hop = 2 */
dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma); dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
if (mhop_num == 1) { if (mhop_num == 1) {
for (i = 0; i < eq->l0_last_num; i++) { for (i = 0; i < eq->l0_last_num; i++) {
...@@ -5617,8 +5635,8 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev, ...@@ -5617,8 +5635,8 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
eq->l1_dma[i]); eq->l1_dma[i]);
for (j = 0; j < bt_chk_sz / 8; j++) { for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
idx = i * (bt_chk_sz / 8) + j; idx = i * (bt_chk_sz / BA_BYTE_LEN) + j;
if ((i == eq->l0_last_num - 1) if ((i == eq->l0_last_num - 1)
&& j == eq->l1_last_num - 1) { && j == eq->l1_last_num - 1) {
eqe_alloc = (buf_chk_sz / eq->eqe_size) eqe_alloc = (buf_chk_sz / eq->eqe_size)
...@@ -5664,7 +5682,7 @@ static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev, ...@@ -5664,7 +5682,7 @@ static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev, static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
struct hns_roce_eq *eq, struct hns_roce_eq *eq,
void *mb_buf) struct hns_roce_eq_context *mb_buf)
{ {
struct hns_roce_eq_context *eqc; struct hns_roce_eq_context *eqc;
unsigned int eq_period = HNS_ROCE_V2_EQ_DEFAULT_INTERVAL; unsigned int eq_period = HNS_ROCE_V2_EQ_DEFAULT_INTERVAL;
...@@ -5844,11 +5862,10 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -5844,11 +5862,10 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT); bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) / ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
buf_chk_sz; buf_chk_sz);
bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8); bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
/* hop_num = 0 */
if (mhop_num == HNS_ROCE_HOP_NUM_0) { if (mhop_num == HNS_ROCE_HOP_NUM_0) {
if (eq->entries > buf_chk_sz / eq->eqe_size) { if (eq->entries > buf_chk_sz / eq->eqe_size) {
dev_err(dev, "eq entries %d is larger than buf_pg_sz!", dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
...@@ -5890,13 +5907,15 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -5890,13 +5907,15 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
if (!eq->bt_l0) if (!eq->bt_l0)
goto err_dma_alloc_l0; goto err_dma_alloc_l0;
memset(eq->bt_l0, 0, bt_chk_sz);
if (mhop_num == 1) { if (mhop_num == 1) {
if (ba_num > (bt_chk_sz / 8)) if (ba_num > (bt_chk_sz / BA_BYTE_LEN))
dev_err(dev, "ba_num %d is too large for 1 hop\n", dev_err(dev, "ba_num %d is too large for 1 hop\n",
ba_num); ba_num);
/* alloc buf */ /* alloc buf */
for (i = 0; i < bt_chk_sz / 8; i++) { for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
if (eq_buf_cnt + 1 < ba_num) { if (eq_buf_cnt + 1 < ba_num) {
size = buf_chk_sz; size = buf_chk_sz;
} else { } else {
...@@ -5921,16 +5940,19 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -5921,16 +5940,19 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
} else if (mhop_num == 2) { } else if (mhop_num == 2) {
/* alloc L1 BT and buf */ /* alloc L1 BT and buf */
for (i = 0; i < bt_chk_sz / 8; i++) { for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz, eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
&(eq->l1_dma[i]), &(eq->l1_dma[i]),
GFP_KERNEL); GFP_KERNEL);
if (!eq->bt_l1[i]) if (!eq->bt_l1[i])
goto err_dma_alloc_l1; goto err_dma_alloc_l1;
memset(eq->bt_l1[i], 0, bt_chk_sz);
*(eq->bt_l0 + i) = eq->l1_dma[i]; *(eq->bt_l0 + i) = eq->l1_dma[i];
for (j = 0; j < bt_chk_sz / 8; j++) { for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
idx = i * bt_chk_sz / 8 + j; idx = i * bt_chk_sz / BA_BYTE_LEN + j;
if (eq_buf_cnt + 1 < ba_num) { if (eq_buf_cnt + 1 < ba_num) {
size = buf_chk_sz; size = buf_chk_sz;
} else { } else {
...@@ -5976,8 +5998,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -5976,8 +5998,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
eq->l1_dma[i]); eq->l1_dma[i]);
for (j = 0; j < bt_chk_sz / 8; j++) { for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
idx = i * bt_chk_sz / 8 + j; idx = i * bt_chk_sz / BA_BYTE_LEN + j;
dma_free_coherent(dev, buf_chk_sz, eq->buf[idx], dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
eq->buf_dma[idx]); eq->buf_dma[idx]);
} }
...@@ -6000,11 +6022,11 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -6000,11 +6022,11 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
eq->l1_dma[i]); eq->l1_dma[i]);
for (j = 0; j < bt_chk_sz / 8; j++) { for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
if (i == record_i && j >= record_j) if (i == record_i && j >= record_j)
break; break;
idx = i * bt_chk_sz / 8 + j; idx = i * bt_chk_sz / BA_BYTE_LEN + j;
dma_free_coherent(dev, buf_chk_sz, dma_free_coherent(dev, buf_chk_sz,
eq->buf[idx], eq->buf[idx],
eq->buf_dma[idx]); eq->buf_dma[idx]);
...@@ -6074,12 +6096,14 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, ...@@ -6074,12 +6096,14 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
rdfx_alloc_rdfx_ceq(hr_dev, eq->eqn, eq_cmd); rdfx_alloc_rdfx_ceq(hr_dev, eq->eqn, eq_cmd);
hns_roce_config_eqc(hr_dev, eq, mailbox->buf); hns_roce_config_eqc(hr_dev, eq,
(struct hns_roce_eq_context *)mailbox->buf);
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS); eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) { if (ret) {
dev_err(dev, "[mailbox cmd] create eqc failed.\n"); dev_err(dev, "[mailbox cmd] create eqc(0x%x) failed(%d).\n",
eq->eqn, ret);
goto err_cmd_mbox; goto err_cmd_mbox;
} }
...@@ -6121,7 +6145,7 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, ...@@ -6121,7 +6145,7 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
} }
} }
/* irq contains: abnormal + AEQ + CEQ*/ /* irq contains: abnormal + AEQ + CEQ */
for (j = 0; j < irq_num; j++) for (j = 0; j < irq_num; j++)
if (j < other_num) if (j < other_num)
snprintf((char *)hr_dev->irq_names[j], snprintf((char *)hr_dev->irq_names[j],
...@@ -6152,7 +6176,8 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, ...@@ -6152,7 +6176,8 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
0, hr_dev->irq_names[j - comp_num], 0, hr_dev->irq_names[j - comp_num],
&eq_table->eq[j - other_num]); &eq_table->eq[j - other_num]);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Request irq error!\n"); dev_err(hr_dev->dev, "Request irq error, ret = %d\n",
ret);
goto err_request_failed; goto err_request_failed;
} }
} }
...@@ -6245,7 +6270,8 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) ...@@ -6245,7 +6270,8 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd); ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
if (ret) { if (ret) {
dev_err(dev, "eq create failed.\n"); dev_err(dev, "eq(0x%x) create failed(%d).\n", eq->eqn,
ret);
goto err_create_eq_fail; goto err_create_eq_fail;
} }
} }
...@@ -6446,7 +6472,8 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, ...@@ -6446,7 +6472,8 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) { if (ret) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev,
"MODIFY SRQ Failed to cmd mailbox.\n"); "MODIFY SRQ(0x%lx) cmd process error(%d).\n",
srq->srqn, ret);
return ret; return ret;
} }
} }
...@@ -6472,7 +6499,8 @@ int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) ...@@ -6472,7 +6499,8 @@ int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
HNS_ROCE_CMD_QUERY_SRQC, HNS_ROCE_CMD_QUERY_SRQC,
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n"); dev_err(hr_dev->dev, "QUERY SRQ(0x%lx) cmd process error(%d).\n",
srq->srqn, ret);
goto out; goto out;
} }
...@@ -6502,7 +6530,7 @@ static int find_empty_entry(struct hns_roce_idx_que *idx_que) ...@@ -6502,7 +6530,7 @@ static int find_empty_entry(struct hns_roce_idx_que *idx_que)
bit_num = __ffs64(idx_que->bitmap[i]) + 1; bit_num = __ffs64(idx_que->bitmap[i]) + 1;
idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1)); idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1));
return i * sizeof(u64) * 8 + (bit_num - 1); return i * BITS_PER_LONG_LONG + (bit_num - 1);
} }
static void fill_idx_queue(struct hns_roce_idx_que *idx_que, static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
...@@ -6542,12 +6570,17 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, ...@@ -6542,12 +6570,17 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
ind = srq->head & (srq->max - 1); ind = srq->head & (srq->max - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) { for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(wr->num_sge > srq->max_gs)) { if (unlikely(wr->num_sge > srq->max_gs)) {
dev_err(hr_dev->dev,
"srq(0x%lx) wr sge num(%d) exceed the max num %d.\n",
srq->srqn, wr->num_sge, srq->max_gs);
ret = -EINVAL; ret = -EINVAL;
*bad_wr = wr; *bad_wr = wr;
break; break;
} }
if (unlikely(srq->head == srq->tail)) { if (unlikely(srq->head == srq->tail)) {
dev_err(hr_dev->dev, "srq(0x%lx) head equals tail\n",
srq->srqn);
ret = -ENOMEM; ret = -ENOMEM;
*bad_wr = wr; *bad_wr = wr;
break; break;
...@@ -6583,7 +6616,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, ...@@ -6583,7 +6616,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
*/ */
wmb(); wmb();
srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << 24 | srq->srqn; srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
(srq->srqn & V2_DB_BYTE_4_TAG_M);
srq_db.parameter = srq->head; srq_db.parameter = srq->head;
hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l); hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
...@@ -6717,13 +6751,13 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) ...@@ -6717,13 +6751,13 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
ret = hns_roce_hw_v2_get_cfg(hr_dev, handle); ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Get Configuration failed!\n"); dev_err(hr_dev->dev, "Get Configuration failed(%d)!\n", ret);
goto error_failed_get_cfg; goto error_failed_get_cfg;
} }
ret = hns_roce_init(hr_dev); ret = hns_roce_init(hr_dev);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "RoCE Engine init failed!\n"); dev_err(hr_dev->dev, "RoCE Engine init failed(%d)!\n", ret);
goto error_failed_get_cfg; goto error_failed_get_cfg;
} }
...@@ -6809,7 +6843,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) ...@@ -6809,7 +6843,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
msleep(20); msleep(20);
if (!ops->ae_dev_resetting(handle)) if (!ops->ae_dev_resetting(handle))
dev_warn(&handle->pdev->dev, "Device completed reset.\n"); dev_info(&handle->pdev->dev, "Device completed reset.\n");
else { else {
dev_warn(&handle->pdev->dev, dev_warn(&handle->pdev->dev,
"Device is still resetting! timeout!\n"); "Device is still resetting! timeout!\n");
...@@ -6854,7 +6888,7 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, ...@@ -6854,7 +6888,7 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
msleep(20); msleep(20);
if (!ops->ae_dev_resetting(handle)) if (!ops->ae_dev_resetting(handle))
dev_warn(&handle->pdev->dev, dev_info(&handle->pdev->dev,
"Device completed reset.\n"); "Device completed reset.\n");
else { else {
dev_warn(&handle->pdev->dev, dev_warn(&handle->pdev->dev,
......
...@@ -86,6 +86,7 @@ ...@@ -86,6 +86,7 @@
#define HNS_ROCE_V2_SRQC_ENTRY_SZ 64 #define HNS_ROCE_V2_SRQC_ENTRY_SZ 64
#define HNS_ROCE_V2_MTPT_ENTRY_SZ 64 #define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64 #define HNS_ROCE_V2_MTT_ENTRY_SZ 64
#define HNS_ROCE_V2_IDX_ENTRY_SZ 4
#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32 #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
#define HNS_ROCE_V2_SCC_CTX_ENTRY_SZ 32 #define HNS_ROCE_V2_SCC_CTX_ENTRY_SZ 32
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096 #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096
...@@ -100,6 +101,8 @@ ...@@ -100,6 +101,8 @@
/* Time out for hardware to complete reset */ /* Time out for hardware to complete reset */
#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 #define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
#define HNS_ROCE_V2_HW_RST_COMPLETION_WAIT 20
/* The longest time for software reset process in NIC subsystem, if a timeout /* The longest time for software reset process in NIC subsystem, if a timeout
* occurs, it indicates that the network subsystem has encountered a serious * occurs, it indicates that the network subsystem has encountered a serious
* error and cannot be recovered from the reset processing. * error and cannot be recovered from the reset processing.
...@@ -114,6 +117,7 @@ ...@@ -114,6 +117,7 @@
#define HNS_ROCE_PBL_HOP_NUM 2 #define HNS_ROCE_PBL_HOP_NUM 2
#define HNS_ROCE_EQE_HOP_NUM 2 #define HNS_ROCE_EQE_HOP_NUM 2
#define HNS_ROCE_IDX_HOP_NUM 1 #define HNS_ROCE_IDX_HOP_NUM 1
#define HNS_ROCE_MEM_PAGE_SUPPORT_8K 2
#define HNS_ROCE_V2_GID_INDEX_NUM(d) (d ? (8) : (256)) #define HNS_ROCE_V2_GID_INDEX_NUM(d) (d ? (8) : (256))
...@@ -401,10 +405,10 @@ struct hns_roce_srq_context { ...@@ -401,10 +405,10 @@ struct hns_roce_srq_context {
__le32 byte_8_limit_wl; __le32 byte_8_limit_wl;
__le32 byte_12_xrcd; __le32 byte_12_xrcd;
__le32 byte_16_pi_ci; __le32 byte_16_pi_ci;
__le32 wqe_bt_ba; __le32 wqe_bt_ba; /* Aligned with 8B, so store [:3] */
__le32 byte_24_wqe_bt_ba; __le32 byte_24_wqe_bt_ba;
__le32 byte_28_rqws_pd; __le32 byte_28_rqws_pd;
__le32 idx_bt_ba; __le32 idx_bt_ba; /* Aligned with 8B, so store [:3] */
__le32 rsv_idx_bt_ba; __le32 rsv_idx_bt_ba;
__le32 idx_cur_blk_addr; __le32 idx_cur_blk_addr;
__le32 byte_44_idxbufpgsz_addr; __le32 byte_44_idxbufpgsz_addr;
...@@ -499,7 +503,7 @@ enum hns_roce_v2_qp_state { ...@@ -499,7 +503,7 @@ enum hns_roce_v2_qp_state {
struct hns_roce_v2_qp_context { struct hns_roce_v2_qp_context {
__le32 byte_4_sqpn_tst; __le32 byte_4_sqpn_tst;
__le32 wqe_sge_ba; __le32 wqe_sge_ba; /* Aligned with 8B, so store [:3] */
__le32 byte_12_sq_hop; __le32 byte_12_sq_hop;
__le32 byte_16_buf_ba_pg_sz; __le32 byte_16_buf_ba_pg_sz;
__le32 byte_20_smac_sgid_idx; __le32 byte_20_smac_sgid_idx;
...@@ -527,7 +531,7 @@ struct hns_roce_v2_qp_context { ...@@ -527,7 +531,7 @@ struct hns_roce_v2_qp_context {
__le32 rx_rkey_pkt_info; __le32 rx_rkey_pkt_info;
__le64 rx_va; __le64 rx_va;
__le32 byte_132_trrl; __le32 byte_132_trrl;
__le32 trrl_ba; __le32 trrl_ba; /* Aligned with 64B, but store [:4] */
__le32 byte_140_raq; __le32 byte_140_raq;
__le32 byte_144_raq; __le32 byte_144_raq;
__le32 byte_148_raq; __le32 byte_148_raq;
...@@ -544,7 +548,7 @@ struct hns_roce_v2_qp_context { ...@@ -544,7 +548,7 @@ struct hns_roce_v2_qp_context {
__le32 byte_192_ext_sge; __le32 byte_192_ext_sge;
__le32 byte_196_sq_psn; __le32 byte_196_sq_psn;
__le32 byte_200_sq_max; __le32 byte_200_sq_max;
__le32 irrl_ba; __le32 irrl_ba; /* Aligned with 64B, so store [:6] */
__le32 byte_208_irrl; __le32 byte_208_irrl;
__le32 byte_212_lsn; __le32 byte_212_lsn;
__le32 sq_timer; __le32 sq_timer;
...@@ -927,6 +931,10 @@ struct hns_roce_v2_qp_context { ...@@ -927,6 +931,10 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16 #define V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16
#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16) #define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16)
#define V2_QP_RWE_S 1 /* rdma write enable */
#define V2_QP_RRE_S 2 /* rdma read enable */
#define V2_QP_ATE_S 3 /* rdma atomic enable */
struct hns_roce_v2_cqe { struct hns_roce_v2_cqe {
__le32 byte_4; __le32 byte_4;
union { union {
...@@ -1003,7 +1011,7 @@ struct hns_roce_v2_mpt_entry { ...@@ -1003,7 +1011,7 @@ struct hns_roce_v2_mpt_entry {
__le32 va_l; __le32 va_l;
__le32 va_h; __le32 va_h;
__le32 pbl_size; __le32 pbl_size;
__le32 pbl_ba_l; __le32 pbl_ba_l; /* Aligned with 8B, so store [:3] */
__le32 byte_48_mode_ba; __le32 byte_48_mode_ba;
__le32 pa0_l; __le32 pa0_l;
__le32 byte_56_pa0_h; __le32 byte_56_pa0_h;
...@@ -1284,6 +1292,18 @@ struct hns_roce_pf_func_num { ...@@ -1284,6 +1292,18 @@ struct hns_roce_pf_func_num {
#define FUNC_CLEAR_RST_FUN_DONE_S 0 #define FUNC_CLEAR_RST_FUN_DONE_S 0
#define HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS (512 * 100) #define HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS (512 * 100)
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL 40
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT 20
#define QUERY_PF_RES_CMDQ_DESC_NUM 2
#define QUERY_PF_TIMER_RES_CMDQ_DESC_NUM 2
#define ALLOC_VF_RES_CMDQ_DESC_NUM 2
#define CONFIG_LLM_CMDQ_DESC_NUM 2
/* TSQ and RAQ each account for 4B */
#define QP_EX_DB_SIZE 8
#define CQ_EX_DB_SIZE 4
#define TIMEOUT_POLL_QUEUE_NUM 4
struct hns_roce_cfg_llm_a { struct hns_roce_cfg_llm_a {
__le32 base_addr_l; __le32 base_addr_l;
...@@ -1620,6 +1640,9 @@ struct hns_roce_cmq_desc { ...@@ -1620,6 +1640,9 @@ struct hns_roce_cmq_desc {
#define HNS_ROCE_HW_RUN_BIT_SHIFT 31 #define HNS_ROCE_HW_RUN_BIT_SHIFT 31
#define HNS_ROCE_HW_MB_STATUS_MASK 0xFF #define HNS_ROCE_HW_MB_STATUS_MASK 0xFF
#define HNS_ROCE_MB_TAG_S 8
#define HNS_ROCE_MB_EVENT_EN_S 16
struct hns_roce_v2_cmq_ring { struct hns_roce_v2_cmq_ring {
dma_addr_t desc_dma_addr; dma_addr_t desc_dma_addr;
struct hns_roce_cmq_desc *desc; struct hns_roce_cmq_desc *desc;
...@@ -1654,7 +1677,7 @@ struct hns_roce_link_table { ...@@ -1654,7 +1677,7 @@ struct hns_roce_link_table {
}; };
struct hns_roce_link_table_entry { struct hns_roce_link_table_entry {
u32 blk_ba0; u32 blk_ba0; /* Aligned with 4KB regardless of kernel page size */
u32 blk_ba1_nxt_ptr; u32 blk_ba1_nxt_ptr;
}; };
#define HNS_ROCE_LINK_TABLE_BA1_S 0 #define HNS_ROCE_LINK_TABLE_BA1_S 0
...@@ -1906,6 +1929,15 @@ struct rdfx_query_cnp_tx_cnt { ...@@ -1906,6 +1929,15 @@ struct rdfx_query_cnp_tx_cnt {
__le32 rsv[2]; __le32 rsv[2];
}; };
#define HNS_ROCE_V2_SYSFS_BUF_MAX_SIZE 1024
#define hns_roce_v2_sysfs_print(out, cur, fmt, ...) do {\
if (cur < HNS_ROCE_V2_SYSFS_BUF_MAX_SIZE) { \
cur += snprintf(out + cur, \
HNS_ROCE_V2_SYSFS_BUF_MAX_SIZE - cur,\
fmt, ##__VA_ARGS__); \
} \
} while (0)
int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev, int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev,
char *buf, int *desc); char *buf, int *desc);
int hns_roce_v2_query_srqc_stat(struct hns_roce_dev *hr_dev, int hns_roce_v2_query_srqc_stat(struct hns_roce_dev *hr_dev,
...@@ -1936,6 +1968,7 @@ void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, ...@@ -1936,6 +1968,7 @@ void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
bool is_read); bool is_read);
int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num); struct hns_roce_cmq_desc *desc, int num);
#ifdef CONFIG_INFINIBAND_HNS_DFX #ifdef CONFIG_INFINIBAND_HNS_DFX
#ifdef CONFIG_KERNEL_419 #ifdef CONFIG_KERNEL_419
void rdfx_cp_sq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, void rdfx_cp_sq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
......
...@@ -77,10 +77,10 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr) ...@@ -77,10 +77,10 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
u8 phy_port; u8 phy_port;
u32 i = 0; u32 i = 0;
if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM)) if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
return 0; return 0;
for (i = 0; i < MAC_ADDR_OCTET_NUM; i++) for (i = 0; i < ETH_ALEN; i++)
hr_dev->dev_addr[port][i] = addr[i]; hr_dev->dev_addr[port][i] = addr[i];
phy_port = hr_dev->iboe.phy_port[port]; phy_port = hr_dev->iboe.phy_port[port];
...@@ -114,6 +114,11 @@ static int hns_roce_add_gid(const union ib_gid *gid, ...@@ -114,6 +114,11 @@ static int hns_roce_add_gid(const union ib_gid *gid,
ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, ret = hr_dev->hw->set_gid(hr_dev, port, attr->index,
(union ib_gid *)gid, attr); (union ib_gid *)gid, attr);
#endif #endif
if (ret)
dev_err(hr_dev->dev, "set gid failed(%d), index = %d", ret,
attr->index);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
return ret; return ret;
...@@ -127,12 +132,19 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context) ...@@ -127,12 +132,19 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
unsigned long flags; unsigned long flags;
int ret; int ret;
if (port >= hr_dev->caps.num_ports) if (port >= hr_dev->caps.num_ports) {
dev_err(hr_dev->dev,
"Port num %d id large than max port num %d.\n",
port, hr_dev->caps.num_ports);
return -EINVAL; return -EINVAL;
}
spin_lock_irqsave(&hr_dev->iboe.lock, flags); spin_lock_irqsave(&hr_dev->iboe.lock, flags);
ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr); ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr);
if (ret)
dev_warn(hr_dev->dev, "del gid failed(%d), index = %d", ret,
attr->index);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
...@@ -161,6 +173,9 @@ static int hns_roce_add_gid(struct ib_device *device, u8 port_num, ...@@ -161,6 +173,9 @@ static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
ret = hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid, ret = hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid,
attr); attr);
if (ret)
dev_err(hr_dev->dev, "set gid failed(%d), index = %d",
ret, index);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
...@@ -179,12 +194,19 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num, ...@@ -179,12 +194,19 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
rdfx_func_cnt(hr_dev, RDFX_FUNC_DEL_GID); rdfx_func_cnt(hr_dev, RDFX_FUNC_DEL_GID);
if (port >= hr_dev->caps.num_ports) if (port >= hr_dev->caps.num_ports) {
dev_err(hr_dev->dev,
"Port num %d id large than max port num %d.\n",
port, hr_dev->caps.num_ports);
return -EINVAL; return -EINVAL;
}
spin_lock_irqsave(&hr_dev->iboe.lock, flags); spin_lock_irqsave(&hr_dev->iboe.lock, flags);
ret = hr_dev->hw->set_gid(hr_dev, port, index, &zgid, &zattr); ret = hr_dev->hw->set_gid(hr_dev, port, index, &zgid, &zattr);
if (ret)
dev_warn(hr_dev->dev, "del gid failed(%d), index = %d", ret,
index);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
...@@ -211,6 +233,9 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, ...@@ -211,6 +233,9 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
case NETDEV_REGISTER: case NETDEV_REGISTER:
case NETDEV_CHANGEADDR: case NETDEV_CHANGEADDR:
ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr); ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
if (ret)
dev_err(dev, "set mac failed(%d), event = %ld\n",
ret, event);
break; break;
case NETDEV_DOWN: case NETDEV_DOWN:
/* /*
...@@ -229,10 +254,10 @@ static int hns_roce_netdev_event(struct notifier_block *self, ...@@ -229,10 +254,10 @@ static int hns_roce_netdev_event(struct notifier_block *self,
unsigned long event, void *ptr) unsigned long event, void *ptr)
{ {
struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct hns_roce_ib_iboe *iboe = NULL; struct hns_roce_ib_iboe *iboe;
struct hns_roce_dev *hr_dev = NULL; struct hns_roce_dev *hr_dev;
u8 port = 0; u8 port;
int ret = 0; int ret;
hr_dev = container_of(self, struct hns_roce_dev, iboe.nb); hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
iboe = &hr_dev->iboe; iboe = &hr_dev->iboe;
...@@ -260,9 +285,12 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev) ...@@ -260,9 +285,12 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
hr_dev->caps.max_mtu); hr_dev->caps.max_mtu);
ret = hns_roce_set_mac(hr_dev, i, ret = hns_roce_set_mac(hr_dev, i,
hr_dev->iboe.netdevs[i]->dev_addr); hr_dev->iboe.netdevs[i]->dev_addr);
if (ret) if (ret) {
dev_err(hr_dev->dev, "Port %d set mac failed(%d)\n",
i, ret);
return ret; return ret;
} }
}
return 0; return 0;
} }
...@@ -361,7 +389,11 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, ...@@ -361,7 +389,11 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
rdfx_func_cnt(hr_dev, RDFX_FUNC_QUERY_PORT); rdfx_func_cnt(hr_dev, RDFX_FUNC_QUERY_PORT);
assert(port_num > 0); if (port_num < 1) {
dev_err(dev, "invalid port num!\n");
return -EINVAL;
}
port = port_num - 1; port = port_num - 1;
/* props being zeroed by the caller, avoid zeroing it here */ /* props being zeroed by the caller, avoid zeroing it here */
...@@ -389,7 +421,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, ...@@ -389,7 +421,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256; props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN; IB_PORT_ACTIVE : IB_PORT_DOWN;
props->phys_state = (props->state == IB_PORT_ACTIVE) ? 5 : 3; props->phys_state = (props->state == IB_PORT_ACTIVE) ?
HNS_ROCE_PHY_LINKUP : HNS_ROCE_PHY_DISABLED;
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
...@@ -452,13 +485,15 @@ static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask, ...@@ -452,13 +485,15 @@ static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int ret = 0; int ret;
struct hns_roce_ucontext *context; struct hns_roce_ucontext *context;
struct hns_roce_ib_alloc_ucontext_resp resp = {}; struct hns_roce_ib_alloc_ucontext_resp resp = {};
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
if (!hr_dev->active) if (!hr_dev->active) {
dev_err(hr_dev->dev, "alloc uncontext failed, hr_dev is not active\n");
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
}
rdfx_func_cnt(hr_dev, RDFX_FUNC_ALLOC_UCONTEXT); rdfx_func_cnt(hr_dev, RDFX_FUNC_ALLOC_UCONTEXT);
...@@ -558,8 +593,10 @@ static int hns_roce_mmap(struct ib_ucontext *context, ...@@ -558,8 +593,10 @@ static int hns_roce_mmap(struct ib_ucontext *context,
rdfx_func_cnt(hr_dev, RDFX_FUNC_MMAP); rdfx_func_cnt(hr_dev, RDFX_FUNC_MMAP);
if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) {
dev_err(hr_dev->dev, "mmap failed, unexpected vm area size.\n");
return -EINVAL; return -EINVAL;
}
if (vma->vm_pgoff == 0) { if (vma->vm_pgoff == 0) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
...@@ -574,8 +611,10 @@ static int hns_roce_mmap(struct ib_ucontext *context, ...@@ -574,8 +611,10 @@ static int hns_roce_mmap(struct ib_ucontext *context,
hr_dev->uar2_size, hr_dev->uar2_size,
vma->vm_page_prot)) vma->vm_page_prot))
return -EAGAIN; return -EAGAIN;
} else } else {
dev_err(hr_dev->dev, "mmap failed, vm_pgoff is unsupported.\n");
return -EINVAL; return -EINVAL;
}
return hns_roce_set_vma_data(vma, to_hr_ucontext(context)); return hns_roce_set_vma_data(vma, to_hr_ucontext(context));
} }
...@@ -589,8 +628,11 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num, ...@@ -589,8 +628,11 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_PORT_IMMUTABLE); rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_PORT_IMMUTABLE);
ret = ib_query_port(ib_dev, port_num, &attr); ret = ib_query_port(ib_dev, port_num, &attr);
if (ret) if (ret) {
dev_err(to_hr_dev(ib_dev)->dev, "ib_query_port failed(%d)!\n",
ret);
return ret; return ret;
}
immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len;
...@@ -633,10 +675,10 @@ static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev) ...@@ -633,10 +675,10 @@ static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
static int hns_roce_register_device(struct hns_roce_dev *hr_dev) static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{ {
int ret;
struct hns_roce_ib_iboe *iboe = NULL;
struct ib_device *ib_dev = NULL;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_ib_iboe *iboe;
struct ib_device *ib_dev;
int ret;
iboe = &hr_dev->iboe; iboe = &hr_dev->iboe;
spin_lock_init(&iboe->lock); spin_lock_init(&iboe->lock);
...@@ -772,7 +814,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -772,7 +814,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ret = hns_roce_setup_mtu_mac(hr_dev); ret = hns_roce_setup_mtu_mac(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "setup_mtu_mac failed!\n"); dev_err(dev, "setup_mtu_mac failed, ret = %d\n", ret);
goto error_failed_setup_mtu_mac; goto error_failed_setup_mtu_mac;
} }
...@@ -1014,45 +1056,46 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -1014,45 +1056,46 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_uar_table(hr_dev); ret = hns_roce_init_uar_table(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Failed to initialize uar table. aborting\n"); dev_err(dev, "Failed to init uar table(%d). aborting\n", ret);
return ret; return ret;
} }
ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar); ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
if (ret) { if (ret) {
dev_err(dev, "Failed to allocate priv_uar.\n"); dev_err(dev, "Failed to allocate priv_uar(%d).\n", ret);
goto err_uar_table_free; goto err_uar_table_free;
} }
ret = hns_roce_init_pd_table(hr_dev); ret = hns_roce_init_pd_table(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Failed to init protected domain table.\n"); dev_err(dev, "Failed to init pd table(%d).\n", ret);
goto err_uar_alloc_free; goto err_uar_alloc_free;
} }
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
ret = hns_roce_init_xrcd_table(hr_dev); ret = hns_roce_init_xrcd_table(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Failed to init protected domain table.\n"); dev_err(dev, "Failed to init xrcd table(%d).\n",
ret);
goto err_pd_table_free; goto err_pd_table_free;
} }
} }
ret = hns_roce_init_mr_table(hr_dev); ret = hns_roce_init_mr_table(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Failed to init memory region table.\n"); dev_err(dev, "Failed to init mr table(%d).\n", ret);
goto err_xrcd_table_free; goto err_xrcd_table_free;
} }
ret = hns_roce_init_cq_table(hr_dev); ret = hns_roce_init_cq_table(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Failed to init completion queue table.\n"); dev_err(dev, "Failed to init cq table(%d).\n", ret);
goto err_mr_table_free; goto err_mr_table_free;
} }
ret = hns_roce_init_qp_table(hr_dev); ret = hns_roce_init_qp_table(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Failed to init queue pair table.\n"); dev_err(dev, "Failed to init qp table(%d).\n", ret);
goto err_cq_table_free; goto err_cq_table_free;
} }
...@@ -1060,7 +1103,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -1060,7 +1103,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_srq_table(hr_dev); ret = hns_roce_init_srq_table(hr_dev);
if (ret) { if (ret) {
dev_err(dev, dev_err(dev,
"Failed to init share receive queue table.\n"); "Failed to init srq table(%d).\n", ret);
goto err_qp_table_free; goto err_qp_table_free;
} }
} }
...@@ -1115,60 +1158,62 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) ...@@ -1115,60 +1158,62 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
ret = hns_roce_reset(hr_dev); ret = hns_roce_reset(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Reset RoCE engine failed!\n"); dev_err(dev, "Reset RoCE engine failed(%d)!\n", ret);
return ret; return ret;
} }
if (hr_dev->hw->cmq_init) { if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev); ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Init RoCE Command Queue failed!\n"); dev_err(dev, "Init RoCE cmq failed(%d)!\n", ret);
goto error_failed_cmq_init; goto error_failed_cmq_init;
} }
} }
ret = hr_dev->hw->hw_profile(hr_dev); ret = hr_dev->hw->hw_profile(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Get RoCE engine profile failed!\n"); dev_err(dev, "Get RoCE engine profile failed(%d)!\n", ret);
goto error_failed_cmd_init; goto error_failed_cmd_init;
} }
ret = hns_roce_cmd_init(hr_dev); ret = hns_roce_cmd_init(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "cmd init failed!\n"); dev_err(dev, "Cmd init failed(%d)!\n", ret);
goto error_failed_cmd_init; goto error_failed_cmd_init;
} }
ret = hr_dev->hw->init_eq(hr_dev); ret = hr_dev->hw->init_eq(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "eq init failed!\n"); dev_err(dev, "Eq init failed(%d)!\n", ret);
goto error_failed_eq_table; goto error_failed_eq_table;
} }
if (hr_dev->cmd_mod) { if (hr_dev->cmd_mod) {
ret = hns_roce_cmd_use_events(hr_dev); ret = hns_roce_cmd_use_events(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Switch to event-driven cmd failed!\n"); dev_err(dev, "Switch to event-driven cmd failed(%d)!\n",
ret);
goto error_failed_use_event; goto error_failed_use_event;
} }
} }
ret = hns_roce_init_hem(hr_dev); ret = hns_roce_init_hem(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n"); dev_err(dev, "Init HEM(Hardware Entry Memory) failed(%d)!\n",
ret);
goto error_failed_init_hem; goto error_failed_init_hem;
} }
ret = hns_roce_setup_hca(hr_dev); ret = hns_roce_setup_hca(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "setup hca failed!\n"); dev_err(dev, "Setup hca failed(%d)!\n", ret);
goto error_failed_setup_hca; goto error_failed_setup_hca;
} }
if (hr_dev->hw->hw_init) { if (hr_dev->hw->hw_init) {
ret = hr_dev->hw->hw_init(hr_dev); ret = hr_dev->hw->hw_init(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "hw_init failed!\n"); dev_err(dev, "Hw_init failed!\n");
goto error_failed_engine_init; goto error_failed_engine_init;
} }
} }
......
...@@ -319,11 +319,11 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, ...@@ -319,11 +319,11 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]); mr->pbl_l1_dma_addr[i]);
for (j = 0; j < pbl_bt_sz / 8; j++) { for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
if (i == loop_i && j >= loop_j) if (i == loop_i && j >= loop_j)
break; break;
bt_idx = i * pbl_bt_sz / 8 + j; bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
dma_free_coherent(dev, pbl_bt_sz, dma_free_coherent(dev, pbl_bt_sz,
mr->pbl_bt_l2[bt_idx], mr->pbl_bt_l2[bt_idx],
mr->pbl_l2_dma_addr[bt_idx]); mr->pbl_l2_dma_addr[bt_idx]);
...@@ -334,8 +334,8 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, ...@@ -334,8 +334,8 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]); mr->pbl_l1_dma_addr[i]);
for (j = 0; j < pbl_bt_sz / 8; j++) { for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
bt_idx = i * pbl_bt_sz / 8 + j; bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
dma_free_coherent(dev, pbl_bt_sz, dma_free_coherent(dev, pbl_bt_sz,
mr->pbl_bt_l2[bt_idx], mr->pbl_bt_l2[bt_idx],
mr->pbl_l2_dma_addr[bt_idx]); mr->pbl_l2_dma_addr[bt_idx]);
...@@ -360,12 +360,12 @@ static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -360,12 +360,12 @@ static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages,
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
if (npages > pbl_bt_sz / 8) { if (npages > pbl_bt_sz / BA_BYTE_LEN) {
dev_err(dev, "npages %d is larger than buf_pg_sz!", dev_err(dev, "npages %d is larger than buf_pg_sz!",
npages); npages);
return -EINVAL; return -EINVAL;
} }
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, mr->pbl_buf = dma_alloc_coherent(dev, npages * BA_BYTE_LEN,
&(mr->pbl_dma_addr), &(mr->pbl_dma_addr),
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_buf) if (!mr->pbl_buf)
...@@ -385,21 +385,21 @@ static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -385,21 +385,21 @@ static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr, u32 pbl_bt_sz) struct hns_roce_mr *mr, u32 pbl_bt_sz)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int npages_allocated; int npages_alloced;
u64 pbl_last_bt_num; u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0; u64 pbl_bt_cnt = 0;
u64 size; u64 size;
int i; int i;
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); pbl_last_bt_num = DIV_ROUND_UP(npages, pbl_bt_sz / BA_BYTE_LEN);
/* alloc L1 BT */ /* alloc L1 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) { for (i = 0; i < pbl_bt_sz / BA_BYTE_LEN; i++) {
if (pbl_bt_cnt + 1 < pbl_last_bt_num) { if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
size = pbl_bt_sz; size = pbl_bt_sz;
} else { } else {
npages_allocated = i * (pbl_bt_sz / 8); npages_alloced = i * (pbl_bt_sz / BA_BYTE_LEN);
size = (npages - npages_allocated) * 8; size = (npages - npages_alloced) * BA_BYTE_LEN;
} }
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size, mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
&(mr->pbl_l1_dma_addr[i]), &(mr->pbl_l1_dma_addr[i]),
...@@ -426,7 +426,7 @@ static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -426,7 +426,7 @@ static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int mr_alloc_done = 0; int mr_alloc_done = 0;
int npages_allocated; int npages_alloced;
u64 pbl_last_bt_num; u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0; u64 pbl_bt_cnt = 0;
u64 bt_idx; u64 bt_idx;
...@@ -434,7 +434,7 @@ static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -434,7 +434,7 @@ static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
int i; int i;
int j = 0; int j = 0;
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); pbl_last_bt_num = DIV_ROUND_UP(npages, pbl_bt_sz / BA_BYTE_LEN);
mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num, mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_l2_dma_addr), sizeof(*mr->pbl_l2_dma_addr),
...@@ -449,7 +449,7 @@ static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -449,7 +449,7 @@ static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
goto err_kcalloc_bt_l2; goto err_kcalloc_bt_l2;
/* alloc L1, L2 BT */ /* alloc L1, L2 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) { for (i = 0; i < pbl_bt_sz / BA_BYTE_LEN; i++) {
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l1_dma_addr[i]), &(mr->pbl_l1_dma_addr[i]),
GFP_KERNEL); GFP_KERNEL);
...@@ -460,15 +460,15 @@ static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -460,15 +460,15 @@ static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
for (j = 0; j < pbl_bt_sz / 8; j++) { for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
bt_idx = i * pbl_bt_sz / 8 + j; bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
if (pbl_bt_cnt + 1 < pbl_last_bt_num) { if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
size = pbl_bt_sz; size = pbl_bt_sz;
} else { } else {
npages_allocated = bt_idx * npages_alloced = bt_idx *
(pbl_bt_sz / 8); (pbl_bt_sz / BA_BYTE_LEN);
size = (npages - npages_allocated) * 8; size = (npages - npages_alloced) * BA_BYTE_LEN;
} }
mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent( mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
dev, size, dev, size,
...@@ -525,17 +525,16 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -525,17 +525,16 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
if (mhop_num == HNS_ROCE_HOP_NUM_0) if (mhop_num == HNS_ROCE_HOP_NUM_0)
return 0; return 0;
/* hop_num = 1 */
if (mhop_num == 1) if (mhop_num == 1)
return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz); return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz);
mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8, mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / BA_BYTE_LEN,
sizeof(*mr->pbl_l1_dma_addr), sizeof(*mr->pbl_l1_dma_addr),
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_l1_dma_addr) if (!mr->pbl_l1_dma_addr)
return -ENOMEM; return -ENOMEM;
mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1), mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / BA_BYTE_LEN, sizeof(*mr->pbl_bt_l1),
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_bt_l1) if (!mr->pbl_bt_l1)
goto err_kcalloc_bt_l1; goto err_kcalloc_bt_l1;
...@@ -583,7 +582,7 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, ...@@ -583,7 +582,7 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
unsigned long index = 0; unsigned long index = 0;
int ret = 0; int ret;
/* Allocate a key for mr from mr_table */ /* Allocate a key for mr from mr_table */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index); ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
...@@ -609,7 +608,8 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, ...@@ -609,7 +608,8 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->pbl_l0_dma_addr = 0; mr->pbl_l0_dma_addr = 0;
} else { } else {
if (!hr_dev->caps.pbl_hop_num) { if (!hr_dev->caps.pbl_hop_num) {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, mr->pbl_buf = dma_alloc_coherent(dev,
npages * BA_BYTE_LEN,
&(mr->pbl_dma_addr), &(mr->pbl_dma_addr),
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_buf) if (!mr->pbl_buf)
...@@ -626,7 +626,7 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, ...@@ -626,7 +626,7 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr) struct hns_roce_mr *mr)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int npages_allocated; int npages_alloced;
int npages; int npages;
int i, j; int i, j;
u32 pbl_bt_sz; u32 pbl_bt_sz;
...@@ -640,9 +640,8 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, ...@@ -640,9 +640,8 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
if (mhop_num == HNS_ROCE_HOP_NUM_0) if (mhop_num == HNS_ROCE_HOP_NUM_0)
return; return;
/* hop_num = 1 */
if (mhop_num == 1) { if (mhop_num == 1) {
dma_free_coherent(dev, (unsigned int)(npages * 8), dma_free_coherent(dev, (unsigned int)(npages * BA_BYTE_LEN),
mr->pbl_buf, mr->pbl_dma_addr); mr->pbl_buf, mr->pbl_dma_addr);
return; return;
} }
...@@ -653,12 +652,11 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, ...@@ -653,12 +652,11 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
if (mhop_num == 2) { if (mhop_num == 2) {
for (i = 0; i < mr->l0_chunk_last_num; i++) { for (i = 0; i < mr->l0_chunk_last_num; i++) {
if (i == mr->l0_chunk_last_num - 1) { if (i == mr->l0_chunk_last_num - 1) {
npages_allocated = i * (pbl_bt_sz / 8); npages_alloced = i * (pbl_bt_sz / BA_BYTE_LEN);
dma_free_coherent(dev, dma_free_coherent(dev,
(npages - npages_allocated) * 8, (npages - npages_alloced) * BA_BYTE_LEN,
mr->pbl_bt_l1[i], mr->pbl_bt_l1[i], mr->pbl_l1_dma_addr[i]);
mr->pbl_l1_dma_addr[i]);
break; break;
} }
...@@ -671,16 +669,16 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, ...@@ -671,16 +669,16 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]); mr->pbl_l1_dma_addr[i]);
for (j = 0; j < pbl_bt_sz / 8; j++) { for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
bt_idx = i * (pbl_bt_sz / 8) + j; bt_idx = i * (pbl_bt_sz / BA_BYTE_LEN) + j;
if ((i == mr->l0_chunk_last_num - 1) if ((i == mr->l0_chunk_last_num - 1)
&& j == mr->l1_chunk_last_num - 1) { && j == mr->l1_chunk_last_num - 1) {
npages_allocated = bt_idx * npages_alloced = bt_idx *
(pbl_bt_sz / 8); (pbl_bt_sz / BA_BYTE_LEN);
dma_free_coherent(dev, dma_free_coherent(dev,
(npages - npages_allocated) * 8, (npages - npages_alloced) * BA_BYTE_LEN,
mr->pbl_bt_l2[bt_idx], mr->pbl_bt_l2[bt_idx],
mr->pbl_l2_dma_addr[bt_idx]); mr->pbl_l2_dma_addr[bt_idx]);
...@@ -725,7 +723,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, ...@@ -725,7 +723,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
npages = ib_umem_page_count(mr->umem); npages = ib_umem_page_count(mr->umem);
if (!hr_dev->caps.pbl_hop_num) if (!hr_dev->caps.pbl_hop_num)
dma_free_coherent(dev, (unsigned int)(npages * 8), dma_free_coherent(dev,
(unsigned int)(npages * BA_BYTE_LEN),
mr->pbl_buf, mr->pbl_dma_addr); mr->pbl_buf, mr->pbl_dma_addr);
else else
hns_roce_mhop_free(hr_dev, mr); hns_roce_mhop_free(hr_dev, mr);
...@@ -750,9 +749,11 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, ...@@ -750,9 +749,11 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
/* Prepare HEM entry memory */ /* Prepare HEM entry memory */
ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx); ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
if (ret) if (ret) {
dev_err(dev, "get mtpt table(0x%lx) failed, ret = %d",
mtpt_idx, ret);
return ret; return ret;
}
/* Allocate mailbox memory */ /* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) { if (IS_ERR(mailbox)) {
...@@ -772,7 +773,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, ...@@ -772,7 +773,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
mtpt_idx & (hr_dev->caps.num_mtpts - 1)); mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) { if (ret) {
dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); dev_err(dev, "SW2HW_MPT(0x%lx) failed (%d)\n", mtpt_idx, ret);
goto err_page; goto err_page;
} }
...@@ -818,6 +819,9 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, ...@@ -818,6 +819,9 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT); bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
break; break;
default: default:
dev_err(hr_dev->dev,
"Unsupport mtt type %d, write mtt chunk failed\n",
mtt->mtt_type);
return -EINVAL; return -EINVAL;
} }
...@@ -927,9 +931,11 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) ...@@ -927,9 +931,11 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
hr_dev->caps.num_mtpts, hr_dev->caps.num_mtpts,
hr_dev->caps.num_mtpts - 1, hr_dev->caps.num_mtpts - 1,
hr_dev->caps.reserved_mrws, 0); hr_dev->caps.reserved_mrws, 0);
if (ret) if (ret) {
dev_err(hr_dev->dev,
"mtpt bitmap init failed, ret = %d\n", ret);
return ret; return ret;
}
ret = hns_roce_buddy_init(&mr_table->mtt_buddy, ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
ilog2(hr_dev->caps.num_mtt_segs)); ilog2(hr_dev->caps.num_mtt_segs));
if (ret) if (ret)
...@@ -995,8 +1001,12 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) ...@@ -995,8 +1001,12 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
/* Allocate memory region key */ /* Allocate memory region key */
ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0, ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
~0ULL, acc, 0, mr); ~0ULL, acc, 0, mr);
if (ret) if (ret) {
dev_err(to_hr_dev(pd->device)->dev,
"alloc mr failed(%d), pd =0x%lx\n",
ret, to_hr_pd(pd)->pdn);
goto err_free; goto err_free;
}
#ifdef CONFIG_INFINIBAND_HNS_TEST #ifdef CONFIG_INFINIBAND_HNS_TEST
test_set_mr_access(mr); test_set_mr_access(mr);
...@@ -1120,6 +1130,7 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, ...@@ -1120,6 +1130,7 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
(k << umem->page_shift); (k << umem->page_shift);
if (!hr_dev->caps.pbl_hop_num) { if (!hr_dev->caps.pbl_hop_num) {
/* for hip06, page addr is aligned to 4K */
mr->pbl_buf[i++] = page_addr >> 12; mr->pbl_buf[i++] = page_addr >> 12;
} else if (hr_dev->caps.pbl_hop_num == 1) { } else if (hr_dev->caps.pbl_hop_num == 1) {
mr->pbl_buf[i++] = page_addr; mr->pbl_buf[i++] = page_addr;
...@@ -1130,7 +1141,7 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, ...@@ -1130,7 +1141,7 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
mr->pbl_bt_l2[i][j] = page_addr; mr->pbl_bt_l2[i][j] = page_addr;
j++; j++;
if (j >= (pbl_bt_sz / 8)) { if (j >= (pbl_bt_sz / BA_BYTE_LEN)) {
i++; i++;
j = 0; j = 0;
} }
...@@ -1164,6 +1175,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1164,6 +1175,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
access_flags, 0); access_flags, 0);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem); ret = PTR_ERR(mr->umem);
dev_err(dev, " ib_umem_get failed, ret = %d\n", ret);
goto err_free; goto err_free;
} }
...@@ -1180,7 +1192,8 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1180,7 +1192,8 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} else { } else {
u64 pbl_size = 1; u64 pbl_size = 1;
bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8; bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) /
BA_BYTE_LEN;
for (i = 0; i < hr_dev->caps.pbl_hop_num; i++) for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
pbl_size *= bt_size; pbl_size *= bt_size;
if (n > pbl_size) { if (n > pbl_size) {
...@@ -1243,8 +1256,8 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags, ...@@ -1243,8 +1256,8 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
if (hr_dev->caps.pbl_hop_num) if (hr_dev->caps.pbl_hop_num)
hns_roce_mhop_free(hr_dev, mr); hns_roce_mhop_free(hr_dev, mr);
else else
dma_free_coherent(dev, npages * 8, mr->pbl_buf, dma_free_coherent(dev, npages * BA_BYTE_LEN,
mr->pbl_dma_addr); mr->pbl_buf, mr->pbl_dma_addr);
} }
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
...@@ -1262,7 +1275,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags, ...@@ -1262,7 +1275,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
if (ret) if (ret)
goto release_umem; goto release_umem;
} else { } else {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, mr->pbl_buf = dma_alloc_coherent(dev, npages * BA_BYTE_LEN,
&(mr->pbl_dma_addr), &(mr->pbl_dma_addr),
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_buf) { if (!mr->pbl_buf) {
...@@ -1286,7 +1299,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags, ...@@ -1286,7 +1299,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
if (hr_dev->caps.pbl_hop_num) if (hr_dev->caps.pbl_hop_num)
hns_roce_mhop_free(hr_dev, mr); hns_roce_mhop_free(hr_dev, mr);
else else
dma_free_coherent(dev, npages * 8, dma_free_coherent(dev, npages * BA_BYTE_LEN,
mr->pbl_buf, mr->pbl_buf,
mr->pbl_dma_addr); mr->pbl_dma_addr);
} }
......
...@@ -109,7 +109,8 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, ...@@ -109,7 +109,8 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n"); dev_err(dev, "[alloc_pd]ib_copy_to_udata failed, pd - 0x%lx!\n",
pd->pdn);
kfree(pd); kfree(pd);
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
} }
...@@ -119,7 +120,8 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, ...@@ -119,7 +120,8 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
if (context) { if (context) {
if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) { if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n"); dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!, pd -0x%lx\n",
pd->pdn);
kfree(pd); kfree(pd);
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
} }
...@@ -207,7 +209,7 @@ int hns_roce_ib_dealloc_xrcd(struct ib_xrcd *xrcd) ...@@ -207,7 +209,7 @@ int hns_roce_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{ {
struct resource *res; struct resource *res;
int ret = 0; int ret;
/* Using bitmap to manager UAR index */ /* Using bitmap to manager UAR index */
ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx); ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx);
...@@ -228,8 +230,8 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) ...@@ -228,8 +230,8 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
} }
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
} else { } else {
uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) uar->pfn = ((pci_resource_start(hr_dev->pci_dev,
>> PAGE_SHIFT); HNS_ROCE_PCI_BAR_NR)) >> PAGE_SHIFT);
} }
return 0; return 0;
......
...@@ -42,6 +42,52 @@ ...@@ -42,6 +42,52 @@
#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
static void flush_work_handle(struct work_struct *work)
{
struct hns_roce_flush_work *flush_work = container_of(work,
struct hns_roce_flush_work, work);
struct hns_roce_qp *hr_qp = flush_work->hr_qp;
struct device *dev = flush_work->hr_dev->dev;
struct ib_qp_attr attr;
int attr_mask;
int ret;
attr_mask = IB_QP_STATE;
attr.qp_state = IB_QPS_ERR;
ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
if (ret)
dev_err(dev, "Modify qp to err for flush cqe fail(%d)\n", ret);
kfree(flush_work);
}
void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
struct hns_roce_cq *cq, enum queue_type type)
{
struct hns_roce_flush_work *flush_work;
flush_work = kzalloc(sizeof(struct hns_roce_flush_work), GFP_ATOMIC);
if (!flush_work)
return;
flush_work->hr_dev = hr_dev;
flush_work->hr_qp = qp;
INIT_WORK(&flush_work->work, flush_work_handle);
switch (type) {
case HNS_ROCE_SQ:
queue_work(qp->sq.workq, &flush_work->work);
break;
case HNS_ROCE_RQ:
queue_work(qp->rq.workq, &flush_work->work);
break;
case HNS_ROCE_CQ:
queue_work(cq->workq, &flush_work->work);
break;
}
}
EXPORT_SYMBOL_GPL(init_flush_work);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
...@@ -61,6 +107,11 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) ...@@ -61,6 +107,11 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
return; return;
} }
if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)
init_flush_work(hr_dev, qp, NULL, HNS_ROCE_SQ);
qp->event(qp, (enum hns_roce_event)event_type); qp->event(qp, (enum hns_roce_event)event_type);
if (atomic_dec_and_test(&qp->refcount)) if (atomic_dec_and_test(&qp->refcount))
...@@ -103,7 +154,8 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, ...@@ -103,7 +154,8 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
event.event = IB_EVENT_QP_ACCESS_ERR; event.event = IB_EVENT_QP_ACCESS_ERR;
break; break;
default: default:
dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", dev_dbg(ibqp->device->dev.parent,
"roce_ib:Unexpected eventtype %d on QP%06lx\n",
type, hr_qp->qpn); type, hr_qp->qpn);
return; return;
} }
...@@ -156,7 +208,8 @@ static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ...@@ -156,7 +208,8 @@ static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock); spin_unlock_irq(&qp_table->lock);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n"); dev_err(hr_dev->dev, "QPC radix insert failed, qpn 0x%lx\n",
hr_qp->qpn);
goto err_put_irrl; goto err_put_irrl;
} }
...@@ -185,14 +238,14 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ...@@ -185,14 +238,14 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
/* Alloc memory for QPC */ /* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) { if (ret) {
dev_err(dev, "QPC table get failed\n"); dev_err(dev, "QPC table get failed, qpn 0x%lx\n", hr_qp->qpn);
goto err_out; goto err_out;
} }
/* Alloc memory for IRRL */ /* Alloc memory for IRRL */
ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
if (ret) { if (ret) {
dev_err(dev, "IRRL table get failed\n"); dev_err(dev, "IRRL table get failed, qpn 0x%lx\n", hr_qp->qpn);
goto err_put_qp; goto err_put_qp;
} }
...@@ -201,7 +254,8 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ...@@ -201,7 +254,8 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
hr_qp->qpn); hr_qp->qpn);
if (ret) { if (ret) {
dev_err(dev, "TRRL table get failed\n"); dev_err(dev, "TRRL table get failed, qpn 0x%lx\n",
hr_qp->qpn);
goto err_put_irrl; goto err_put_irrl;
} }
} }
...@@ -211,7 +265,8 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ...@@ -211,7 +265,8 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
ret = hns_roce_table_get(hr_dev, &qp_table->scc_ctx_table, ret = hns_roce_table_get(hr_dev, &qp_table->scc_ctx_table,
hr_qp->qpn); hr_qp->qpn);
if (ret) { if (ret) {
dev_err(dev, "SCC CTX table get failed\n"); dev_err(dev, "SCC CTX table get failed, qpn 0x%lx\n",
hr_qp->qpn);
goto err_put_trrl; goto err_put_trrl;
} }
} }
...@@ -221,7 +276,8 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ...@@ -221,7 +276,8 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock); spin_unlock_irq(&qp_table->lock);
if (ret) { if (ret) {
dev_err(dev, "QPC radix_tree_insert failed\n"); dev_err(dev, "QPC radix_tree_insert failed, qpn - 0x%lx\n",
hr_qp->qpn);
goto err_put_scc_ctx; goto err_put_scc_ctx;
} }
...@@ -304,8 +360,8 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, ...@@ -304,8 +360,8 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
/* Check the validity of QP support capacity */ /* Check the validity of QP support capacity */
if (cap->max_recv_wr > hr_dev->caps.max_wqes || if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
cap->max_recv_sge > hr_dev->caps.max_rq_sg) { cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n", dev_err(dev, "RQ(0x%lx) WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
cap->max_recv_wr, cap->max_recv_sge); hr_qp->qpn, cap->max_recv_wr, cap->max_recv_sge);
return -EINVAL; return -EINVAL;
} }
...@@ -329,13 +385,14 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, ...@@ -329,13 +385,14 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt); hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) { if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n"); dev_err(dev, "while setting rq(0x%lx) size, rq.wqe_cnt too large\n",
hr_qp->qpn);
return -EINVAL; return -EINVAL;
} }
max_cnt = max(1U, cap->max_recv_sge); max_cnt = max(1U, cap->max_recv_sge);
hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
if (hr_dev->caps.max_rq_sg <= 2) if (hr_dev->caps.max_rq_sg <= HNS_ROCE_MAX_SGE_NUM)
hr_qp->rq.wqe_shift = hr_qp->rq.wqe_shift =
ilog2(hr_dev->caps.max_rq_desc_sz); ilog2(hr_dev->caps.max_rq_desc_sz);
else else
...@@ -361,12 +418,14 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, ...@@ -361,12 +418,14 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
ucmd->log_sq_stride > max_sq_stride || ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
dev_err(hr_dev->dev, "check SQ size error!\n"); dev_err(hr_dev->dev,
"check SQ size error!Log sq stride 0x%x\n",
ucmd->log_sq_stride);
return -EINVAL; return -EINVAL;
} }
if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n", dev_err(hr_dev->dev, "SQ sge error!Max send sge %d\n",
cap->max_send_sge); cap->max_send_sge);
return -EINVAL; return -EINVAL;
} }
...@@ -386,7 +445,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -386,7 +445,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Sanity check sq size fail\n"); dev_err(hr_dev->dev, "Sanity check sq(0x%lx) size fail\n",
hr_qp->qpn);
return ret; return ret;
} }
...@@ -394,33 +454,34 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -394,33 +454,34 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift = ucmd->log_sq_stride; hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
max_cnt = max(1U, cap->max_send_sge); max_cnt = max(1U, cap->max_send_sge);
if (hr_dev->caps.max_sq_sg <= 2) if (hr_dev->caps.max_sq_sg <= HNS_ROCE_MAX_SGE_NUM)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else else
hr_qp->sq.max_gs = max_cnt; hr_qp->sq.max_gs = max_cnt;
if (hr_qp->sq.max_gs > 2) if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2)); (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE));
if (hr_qp->ibqp.qp_type == IB_QPT_UD) if (hr_qp->ibqp.qp_type == IB_QPT_UD)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->sq.max_gs); hr_qp->sq.max_gs);
if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) { if ((hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) &&
(hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A)) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev,
"The extended sge cnt error! sge_cnt=%d\n", "SQ(0x%lx) extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt); hr_qp->qpn, hr_qp->sge.sge_cnt);
return -EINVAL; return -EINVAL;
} }
} }
hr_qp->sge.sge_shift = 4; hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
ex_sge_num = hr_qp->sge.sge_cnt; ex_sge_num = hr_qp->sge.sge_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */ /* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) { if (hr_dev->caps.max_sq_sg <= HNS_ROCE_MAX_SGE_NUM) {
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) + hr_qp->rq.wqe_shift), PAGE_SIZE) +
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
...@@ -466,20 +527,22 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev, ...@@ -466,20 +527,22 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
if (hr_qp->sq.max_gs > 2) { if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2)); (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE));
hr_qp->sge.sge_shift = 4; hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
} }
/* ud sqwqe's sge use extend sge */ /* ud sqwqe's sge use extend sge */
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) { if (hr_dev->caps.max_sq_sg > HNS_ROCE_SGE_IN_WQE &&
hr_qp->ibqp.qp_type == IB_QPT_GSI) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->sq.max_gs); hr_qp->sq.max_gs);
hr_qp->sge.sge_shift = 4; hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
} }
if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) { if ((hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) &&
hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n", dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt); hr_qp->sge.sge_cnt);
...@@ -522,7 +585,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -522,7 +585,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
/* Get data_seg numbers */ /* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge); max_cnt = max(1U, cap->max_send_sge);
if (hr_dev->caps.max_sq_sg <= 2) if (hr_dev->caps.max_sq_sg <= HNS_ROCE_MAX_SGE_NUM)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else else
hr_qp->sq.max_gs = max_cnt; hr_qp->sq.max_gs = max_cnt;
...@@ -539,7 +602,8 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -539,7 +602,8 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
page_size); page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) { if (hr_dev->caps.max_sq_sg > HNS_ROCE_MAX_SGE_NUM &&
hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift), hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
(u32)hr_qp->sge.sge_cnt); (u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size; hr_qp->sge.offset = size;
...@@ -580,6 +644,35 @@ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) ...@@ -580,6 +644,35 @@ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
return 1; return 1;
} }
static void destroy_qp_workqueue(struct hns_roce_qp *hr_qp)
{
destroy_workqueue(hr_qp->rq.workq);
destroy_workqueue(hr_qp->sq.workq);
}
static int create_qp_workqueue(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
struct device *dev = hr_dev->dev;
hr_qp->sq.workq =
create_singlethread_workqueue("hns_roce_sq_workqueue");
if (!hr_qp->sq.workq) {
dev_err(dev, "Failed to create sq workqueue!\n");
return -ENOMEM;
}
hr_qp->rq.workq =
create_singlethread_workqueue("hns_roce_rq_workqueue");
if (!hr_qp->rq.workq) {
dev_err(dev, "Failed to create rq workqueue!\n");
destroy_workqueue(hr_qp->sq.workq);
return -ENOMEM;
}
return 0;
}
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_pd *ib_pd, struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
...@@ -590,9 +683,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -590,9 +683,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct hns_roce_ib_create_qp ucmd; struct hns_roce_ib_create_qp ucmd;
struct hns_roce_ib_create_qp_resp resp = {}; struct hns_roce_ib_create_qp_resp resp = {};
unsigned long qpn = 0; unsigned long qpn = 0;
int ret = 0;
u32 page_shift; u32 page_shift;
u32 npages; u32 npages;
int ret;
int i; int i;
mutex_init(&hr_qp->mutex); mutex_init(&hr_qp->mutex);
...@@ -600,6 +693,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -600,6 +693,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
spin_lock_init(&hr_qp->rq.lock); spin_lock_init(&hr_qp->rq.lock);
hr_qp->state = IB_QPS_RESET; hr_qp->state = IB_QPS_RESET;
hr_qp->next_state = IB_QPS_RESET;
hr_qp->ibqp.qp_type = init_attr->qp_type; hr_qp->ibqp.qp_type = init_attr->qp_type;
...@@ -731,13 +825,15 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -731,13 +825,15 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
} else { } else {
if (init_attr->create_flags & if (init_attr->create_flags &
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
dev_err(dev, "init_attr->create_flags error!\n"); dev_err(dev, "init_attr->create_flags error(%d)!\n",
init_attr->create_flags);
ret = -EINVAL; ret = -EINVAL;
goto err_rq_sge_list; goto err_rq_sge_list;
} }
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
dev_err(dev, "init_attr->create_flags error!\n"); dev_err(dev, "init_attr->create_flags error(%d)!\n",
init_attr->create_flags);
ret = -EINVAL; ret = -EINVAL;
goto err_rq_sge_list; goto err_rq_sge_list;
} }
...@@ -793,9 +889,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -793,9 +889,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_mtt; goto err_mtt;
} }
hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64), hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
GFP_KERNEL); GFP_KERNEL);
hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64), hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
GFP_KERNEL); GFP_KERNEL);
if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) { if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -819,13 +915,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -819,13 +915,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
/* In v1 engine, GSI QP context in RoCE engine's register */ /* In v1 engine, GSI QP context in RoCE engine's register */
ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_qp_alloc failed!\n"); dev_err(dev, "Failed to alloc gsi qp!\n");
goto err_qpn; goto err_qpn;
} }
} else { } else {
ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp); ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_qp_alloc failed!\n"); dev_err(dev, "Failed to alloc qp!\n");
goto err_qpn; goto err_qpn;
} }
} }
...@@ -945,11 +1041,18 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -945,11 +1041,18 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
if (!hr_qp) if (!hr_qp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = create_qp_workqueue(hr_dev, hr_qp);
if (ret) {
kfree(hr_qp);
return ERR_PTR(ret);
}
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
hr_qp); hr_qp);
if (ret) { if (ret) {
dev_err(dev, "Create RC QP 0x%06lx failed(%d)\n", dev_err(dev, "Create RC QP 0x%06lx failed(%d)\n",
hr_qp->qpn, ret); hr_qp->qpn, ret);
destroy_qp_workqueue(hr_qp);
kfree(hr_qp); kfree(hr_qp);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -974,7 +1077,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -974,7 +1077,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
/* when hw version is v1, the sqpn is allocated */ /* when hw version is v1, the sqpn is allocated */
if (hr_dev->caps.max_sq_sg <= 2) if (hr_dev->caps.max_sq_sg <= HNS_ROCE_MAX_SGE_NUM)
hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
hr_dev->iboe.phy_port[hr_qp->port]; hr_dev->iboe.phy_port[hr_qp->port];
else else
...@@ -983,7 +1086,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -983,7 +1086,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
hr_qp->ibqp.qp_num, hr_qp); hr_qp->ibqp.qp_num, hr_qp);
if (ret) { if (ret) {
dev_err(dev, "Create GSI QP failed!\n"); dev_err(dev, "Create GSI QP failed(%d)!\n", ret);
kfree(hr_sqp); kfree(hr_sqp);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -1038,8 +1141,8 @@ static int check_mtu_validate(struct hns_roce_dev *hr_dev, ...@@ -1038,8 +1141,8 @@ static int check_mtu_validate(struct hns_roce_dev *hr_dev,
if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
attr->path_mtu > hr_dev->caps.max_mtu) || attr->path_mtu > hr_dev->caps.max_mtu) ||
attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
dev_err(dev, "attr path_mtu(%d)invalid while modify qp", dev_err(dev, "attr path_mtu(%d)invalid while modify qp(0x%lx)",
attr->path_mtu); attr->path_mtu, hr_qp->qpn);
return -EINVAL; return -EINVAL;
} }
...@@ -1065,7 +1168,8 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1065,7 +1168,8 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_PKEY_INDEX) { if (attr_mask & IB_QP_PKEY_INDEX) {
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n", dev_err(dev,
"attr pkey_index invalid.attr->pkey_index=%d\n",
attr->pkey_index); attr->pkey_index);
return -EINVAL; return -EINVAL;
} }
...@@ -1079,14 +1183,14 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1079,14 +1183,14 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", dev_err(dev, "attr max_rd_atomic(%d) invalid.\n",
attr->max_rd_atomic); attr->max_rd_atomic);
return -EINVAL; return -EINVAL;
} }
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", dev_err(dev, "attr max_dest_rd_atomic(%d) invalid.\n",
attr->max_dest_rd_atomic); attr->max_dest_rd_atomic);
return -EINVAL; return -EINVAL;
} }
...@@ -1112,6 +1216,9 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1112,6 +1216,9 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ? new_state = attr_mask & IB_QP_STATE ?
attr->qp_state : cur_state; attr->qp_state : cur_state;
hr_qp->next_state = new_state;
hr_qp->attr_mask = attr_mask;
if (ibqp->pd->uobject && if (ibqp->pd->uobject &&
(attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
if (hr_qp->sdb_en == 1) { if (hr_qp->sdb_en == 1) {
...@@ -1244,7 +1351,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) ...@@ -1244,7 +1351,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC); INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
/* In hw v1, a port include two SQP, six ports total 12 */ /* In hw v1, a port include two SQP, six ports total 12 */
if (hr_dev->caps.max_sq_sg <= 2) if (hr_dev->caps.max_sq_sg <= HNS_ROCE_MAX_SGE_NUM)
reserved_from_bot = SQP_NUM; reserved_from_bot = SQP_NUM;
else else
reserved_from_bot = hr_dev->caps.reserved_qps; reserved_from_bot = hr_dev->caps.reserved_qps;
......
...@@ -443,8 +443,9 @@ static int hns_roce_fill_res_mr_entry(struct sk_buff *msg, ...@@ -443,8 +443,9 @@ static int hns_roce_fill_res_mr_entry(struct sk_buff *msg,
{ {
struct ib_mr *ib_mr = container_of(res, struct ib_mr, res); struct ib_mr *ib_mr = container_of(res, struct ib_mr, res);
struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device); struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
struct hns_roce_v2_mpt_entry context; struct hns_roce_v2_mpt_entry context;
int key = hr_dev->hr_stat.key; int key = hr_mr->key;
struct nlattr *table_attr; struct nlattr *table_attr;
int ret; int ret;
......
...@@ -146,8 +146,11 @@ int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd, ...@@ -146,8 +146,11 @@ int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd,
} }
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn); ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
if (ret) if (ret) {
dev_err(hr_dev->dev, "SRQ alloc.Failed to get table, srq - 0x%lx.\n",
srq->srqn);
goto err_out; goto err_out;
}
spin_lock_irq(&srq_table->lock); spin_lock_irq(&srq_table->lock);
ret = radix_tree_insert(&srq_table->tree, srq->srqn, srq); ret = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
...@@ -167,8 +170,11 @@ int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd, ...@@ -167,8 +170,11 @@ int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd,
ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn); ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) if (ret) {
dev_err(hr_dev->dev, "SW2HW_SRQ(0x%lx) failed(%d).\n",
srq->srqn, ret);
goto err_radix; goto err_radix;
}
refcount_set(&srq->refcount, 1); refcount_set(&srq->refcount, 1);
init_completion(&srq->free); init_completion(&srq->free);
...@@ -235,8 +241,10 @@ static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq, ...@@ -235,8 +241,10 @@ static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
} else } else
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->umem), ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->umem),
srq->umem->page_shift, &srq->mtt); srq->umem->page_shift, &srq->mtt);
if (ret) if (ret) {
dev_err(hr_dev->dev, "mtt init error when create srq\n");
goto err_user_buf; goto err_user_buf;
}
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem); ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
if (ret) if (ret)
...@@ -246,7 +254,7 @@ static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq, ...@@ -246,7 +254,7 @@ static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
srq->idx_que.umem = ib_umem_get(pd->uobject->context, ucmd.que_addr, srq->idx_que.umem = ib_umem_get(pd->uobject->context, ucmd.que_addr,
srq->idx_que.buf_size, 0, 0); srq->idx_que.buf_size, 0, 0);
if (IS_ERR(srq->idx_que.umem)) { if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n"); dev_err(hr_dev->dev, "umem get error for idx que\n");
goto err_user_srq_mtt; goto err_user_srq_mtt;
} }
...@@ -265,7 +273,7 @@ static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq, ...@@ -265,7 +273,7 @@ static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
} }
if (ret) { if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n"); dev_err(hr_dev->dev, "mtt init error for idx que\n");
goto err_user_idx_mtt; goto err_user_idx_mtt;
} }
...@@ -273,7 +281,7 @@ static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq, ...@@ -273,7 +281,7 @@ static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
srq->idx_que.umem); srq->idx_que.umem);
if (ret) { if (ret) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev,
"hns_roce_ib_umem_write_mtt error for idx que\n"); "write mtt error for idx que\n");
goto err_user_idx_buf; goto err_user_idx_buf;
} }
...@@ -303,13 +311,13 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, ...@@ -303,13 +311,13 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
int i; int i;
idx_que->entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ; idx_que->entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, 8 * sizeof(u64)); bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, BITS_PER_LONG_LONG);
idx_que->bitmap = kcalloc(1, bitmap_num / 8, GFP_KERNEL); idx_que->bitmap = kcalloc(1, bitmap_num / BITS_PER_BYTE, GFP_KERNEL);
if (!idx_que->bitmap) if (!idx_que->bitmap)
return -ENOMEM; return -ENOMEM;
bitmap_num = bitmap_num / (8 * sizeof(u64)); bitmap_num = bitmap_num / BITS_PER_LONG_LONG;
idx_que->buf_size = srq->max * idx_que->entry_sz; idx_que->buf_size = srq->max * idx_que->entry_sz;
...@@ -342,8 +350,10 @@ static int create_kernel_srq(struct ib_pd *pd, struct hns_roce_srq *srq, ...@@ -342,8 +350,10 @@ static int create_kernel_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift, ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
&srq->mtt); &srq->mtt);
if (ret) if (ret) {
dev_err(hr_dev->dev, "mtt init error when create srq\n");
goto err_kernel_buf; goto err_kernel_buf;
}
ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf); ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
if (ret) if (ret)
...@@ -360,13 +370,17 @@ static int create_kernel_srq(struct ib_pd *pd, struct hns_roce_srq *srq, ...@@ -360,13 +370,17 @@ static int create_kernel_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages, ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
srq->idx_que.idx_buf.page_shift, srq->idx_que.idx_buf.page_shift,
&srq->idx_que.mtt); &srq->idx_que.mtt);
if (ret) if (ret) {
dev_err(hr_dev->dev, "mtt init error for idx que\n");
goto err_kernel_create_idx; goto err_kernel_create_idx;
}
/* Write buffer address into the mtt table */ /* Write buffer address into the mtt table */
ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt, ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
&srq->idx_que.idx_buf); &srq->idx_que.idx_buf);
if (ret) if (ret) {
dev_err(hr_dev->dev, "write mtt error for idx que\n");
goto err_kernel_idx_buf; goto err_kernel_idx_buf;
}
srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL); srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) { if (!srq->wrid) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -438,7 +452,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, ...@@ -438,7 +452,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1); srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
srq->max_gs = srq_init_attr->attr.max_sge; srq->max_gs = srq_init_attr->attr.max_sge;
srq_desc_size = max(16, 16 * srq->max_gs); srq_desc_size = max(HNS_ROCE_SGE_SIZE, HNS_ROCE_SGE_SIZE * srq->max_gs);
srq->wqe_shift = ilog2(srq_desc_size); srq->wqe_shift = ilog2(srq_desc_size);
...@@ -470,8 +484,12 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, ...@@ -470,8 +484,12 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, 0, &srq->mtt, ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, 0, &srq->mtt,
0, srq); 0, srq);
if (ret) if (ret) {
dev_err(hr_dev->dev,
"failed to alloc srq, cqn - 0x%x, pdn - 0x%lx\n",
cqn, to_hr_pd(pd)->pdn);
goto err_wrid; goto err_wrid;
}
srq->event = hns_roce_ib_srq_event; srq->event = hns_roce_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->srqn; srq->ibsrq.ext.xrc.srq_num = srq->srqn;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册