提交 9c098d1d 编写于 作者: L Lin Wang 提交者: Xie XiuQi

RDMA/hns: Normalize print information

driver inclusion
category: cleanup
bugzilla: NA
CVE: NA

This patch modify all print information into unified format.

Feature or Bugfix:Bugfix
Signed-off-by: NLin Wang <wanglin137@huawei.com>
Signed-off-by: NWeihang Li <liweihang@hisilicon.com>
Reviewed-by: Nchenglang <chenglang@huawei.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NYixian Liu <liuyixian@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 6973e00c
......@@ -95,7 +95,9 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
rdma_ah_get_port_num(ah_attr),
grh->sgid_index, &sgid, &gid_attr);
if (ret) {
dev_err(dev, "get sgid failed! ret = %d\n", ret);
dev_err(dev, "Get index %u of sgid on port %u failed(%d)!\n",
grh->sgid_index, rdma_ah_get_port_num(ah_attr),
ret);
kfree(ah);
return ERR_PTR(ret);
}
......
......@@ -246,7 +246,7 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
end = start + buf_cnt;
if (end > buf->npages) {
dev_err(hr_dev->dev,
"invalid kmem region,offset %d,buf_cnt %d,total %d!\n",
"Invalid kmem region,offset 0x%x plus buf_cnt 0x%x larger than total 0x%x!\n",
start, buf_cnt, buf->npages);
return -EINVAL;
}
......@@ -276,7 +276,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int total;
if (page_shift < PAGE_SHIFT || page_shift > umem->page_shift) {
dev_err(hr_dev->dev, "invalid page shift %d, umem shift %d!\n",
dev_err(hr_dev->dev, "Invalid page shift %d, umem shift %d!\n",
page_shift, umem->page_shift);
return -EINVAL;
}
......@@ -287,7 +287,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
end = start + buf_cnt;
if (end > total) {
dev_err(hr_dev->dev,
"invalid umem region,offset %d,buf_cnt %d,total %d!\n",
"Invalid umem region,offset 0x%x plus buf_cnt 0x%x larger than total 0x%x!\n",
start, buf_cnt, total);
return -EINVAL;
}
......@@ -303,7 +303,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
(n << umem->page_shift);
if (addr & ((1 << page_shift) - 1)) {
dev_err(hr_dev->dev,
"not align to page_shift %d!\n",
"Umem addr not align to page_shift %d!\n",
page_shift);
return -ENOBUFS;
}
......
......@@ -69,7 +69,8 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
in_modifier, op_modifier, op,
CMD_POLL_TOKEN, 0);
if (ret) {
dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed(%d).\n",
ret);
return ret;
}
......@@ -138,14 +139,15 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
*/
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
dev_err(dev, "[cmd_wait]wait_for_completion_timeout timeout.\n");
ret = -EBUSY;
goto out;
}
ret = context->result;
if (ret) {
dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret);
dev_err(dev, "[cmd_wait]event mod cmd process error(%d)!\n",
ret);
goto out;
}
......
......@@ -60,7 +60,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
dev_err(hr_dev->dev,
"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
"hns_roce_ib: Unexpected event type 0x%x on CQ 0x%lx\n",
event_type, hr_cq->cqn);
return;
}
......@@ -110,22 +110,22 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
}
if (vector >= hr_dev->caps.num_comp_vectors) {
dev_err(dev, "CQ alloc.Invalid vector.\n");
dev_err(dev, "Invalid vector(0x%x) for CQ alloc.\n", vector);
return -EINVAL;
}
hr_cq->vector = vector;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
if (ret == -1) {
dev_err(dev, "CQ alloc.Failed to alloc index.\n");
dev_err(dev, "Num of cq out of range.\n");
return -ENOMEM;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
dev_err(dev, "CQ(0x%lx) alloc.Failed to get context mem(%d).\n",
hr_cq->cqn, ret);
dev_err(dev, "Get context mem failed(%d) when CQ(0x%lx) alloc.\n",
ret, hr_cq->cqn);
goto err_out;
}
......@@ -154,8 +154,8 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
dev_err(dev, "CQ(0x%lx) alloc.Failed to cmd mailbox(%d).\n",
hr_cq->cqn, ret);
dev_err(dev, "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n",
ret, hr_cq->cqn);
goto err_radix;
}
......@@ -198,7 +198,7 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
if (ret)
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
dev_err(dev, "HW2SW_CQ failed(%d) for CQN 0x%0lx\n", ret,
hr_cq->cqn);
/* Waiting interrupt process procedure carried out */
......@@ -250,13 +250,15 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
&buf->hr_mtt);
}
if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for create cq\n");
dev_err(hr_dev->dev, "hns_roce_mtt_init error(%d) for create cq.\n",
ret);
goto err_buf;
}
ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
if (ret) {
dev_err(hr_dev->dev, "hns_roce_ib_umem_write_mtt error for create cq\n");
dev_err(hr_dev->dev, "hns_roce_ib_umem_write_mtt error(%d) for create cq.\n",
ret);
goto err_mtt;
}
......@@ -290,13 +292,15 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
buf->hr_buf.page_shift, &buf->hr_mtt);
if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for kernel create cq\n");
dev_err(hr_dev->dev, "hns_roce_mtt_init error(%d) for kernel create cq.\n",
ret);
goto err_buf;
}
ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
if (ret) {
dev_err(hr_dev->dev, "hns_roce_ib_umem_write_mtt error for kernel create cq\n");
dev_err(hr_dev->dev, "hns_roce_ib_umem_write_mtt error(%d) for kernel create cq.\n",
ret);
goto err_mtt;
}
......@@ -332,7 +336,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
int ret;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "Failed to copy_from_udata.\n");
dev_err(dev, "Copy_from_udata failed.\n");
return -EFAULT;
}
......@@ -341,7 +345,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
&hr_cq->umem, ucmd.buf_addr,
cq_entries);
if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n");
dev_err(dev, "Get_cq_umem failed(%d).\n", ret);
return ret;
}
......@@ -350,7 +354,8 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
ret = hns_roce_db_map_user(to_hr_ucontext(context),
ucmd.db_addr, &hr_cq->db);
if (ret) {
dev_err(dev, "cq record doorbell map failed!\n");
dev_err(dev, "cq record doorbell map failed(%d)!\n",
ret);
goto err_mtt;
}
hr_cq->db_en = 1;
......@@ -379,7 +384,7 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
if (ret) {
dev_err(dev, "Failed to alloc db for cq.\n");
dev_err(dev, "Alloc db for cq failed(%d).\n", ret);
return ret;
}
......@@ -391,7 +396,7 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
/* Init mmt table and write buff address to mtt table */
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
if (ret) {
dev_err(dev, "Failed to alloc cq buf.\n");
dev_err(dev, "Alloc cq buf failed(%d).\n", ret);
goto err_db;
}
......@@ -450,7 +455,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
rdfx_func_cnt(hr_dev, RDFX_FUNC_CREATE_CQ);
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
dev_err(dev, "Create CQ failed. entries is %d, max cqe is %d\n",
cq_entries, hr_dev->caps.max_cqes);
return ERR_PTR(-EINVAL);
}
......@@ -472,13 +477,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
ret = create_user_cq(hr_dev, hr_cq, context, udata, &resp, uar,
cq_entries);
if (ret) {
dev_err(dev, "Failed to create cq for user mode!\n");
dev_err(dev, "Create cq for user mode failed(%d)!\n",
ret);
goto err_cq;
}
} else {
ret = create_kernel_cq(hr_dev, hr_cq, uar, cq_entries);
if (ret) {
dev_err(dev, "Failed to create cq for kernel mode!\n");
dev_err(dev, "Create cq for kernel mode failed(%d)!\n",
ret);
goto err_cq;
}
}
......@@ -487,7 +494,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
hr_cq, vector);
if (ret) {
dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
dev_err(dev, "Cq alloc failed(%d).\n", ret);
goto err_dbmap;
}
......@@ -600,7 +607,7 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
atomic_inc(&cq->refcount);
if (!cq) {
dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
dev_warn(dev, "Async event for bogus CQ 0x%08x\n", cqn);
return;
}
......
......@@ -523,7 +523,7 @@ static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev,
if (!ret)
memcpy(bt0_ba, mailbox->buf, sizeof(*bt0_ba));
else {
pr_err("QUERY CQ bt0 cmd process error\n");
pr_err("Query CQ bt0 cmd process error(%d).\n", ret);
goto out;
}
......@@ -533,7 +533,7 @@ static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev,
if (!ret)
memcpy(bt1_ba, mailbox->buf, sizeof(*bt1_ba));
else {
pr_err("QUERY CQ bt1 cmd process error\n");
pr_err("Query CQ bt1 cmd process error(%d).\n", ret);
goto out;
}
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
......@@ -626,7 +626,8 @@ int hns_roce_v2_modify_eq(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
dev_err(hr_dev->dev, "MODIFY EQ Failed to cmd mailbox.\n");
dev_err(hr_dev->dev, "Modify EQ Failed(%d) for cmd mailbox.\n",
ret);
return ret;
}
......@@ -268,7 +268,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (le32_to_cpu(rc_sq_wqe->msg_len) >
hr_dev->caps.max_sq_inline) {
*bad_wr = wr;
dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
dev_err(hr_dev->dev, "Inline len(0x%x)illegal, max is 0x%x.\n",
rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
return -EINVAL;
}
......@@ -686,7 +686,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ind++;
} else {
dev_err(dev, "Illegal qp(0x%x) type:0x%x\n",
dev_err(dev, "Post send failed for illegal qp(0x%x) type:0x%x\n",
ibqp->qp_num, ibqp->qp_type);
v2_spin_unlock_irqrestore(qp_lock, &qp->sq.lock,
&flags);
......@@ -770,6 +770,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
if (hr_qp->state == IB_QPS_RESET) {
v2_spin_unlock_irqrestore(qp_lock, &hr_qp->rq.lock, &flags);
*bad_wr = wr;
dev_err(dev, "Post recv failed: QP state is RESET, qp num is 0x%lx.\n",
hr_qp->qpn);
return -EINVAL;
}
......@@ -785,7 +787,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) {
dev_err(dev, "rq:num_sge=%d >= qp->rq.max_gs=%d\n",
dev_err(dev, "RQ: sge num(%d) is larger or equal than max sge num(%d)\n",
wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL;
*bad_wr = wr;
......@@ -1075,14 +1077,14 @@ static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
/* Init CSQ */
ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
if (ret) {
dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
dev_err(hr_dev->dev, "Init CSQ error(%d).\n", ret);
return ret;
}
/* Init CRQ */
ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
if (ret) {
dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
dev_err(hr_dev->dev, "Init CRQ error(%d).\n", ret);
goto err_crq;
}
......@@ -1390,8 +1392,7 @@ static void hns_roce_query_func_num(struct hns_roce_dev *hr_dev)
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_VF_NUM, true);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
dev_err(hr_dev->dev, "Query vf count fail, ret = %d.\n",
ret);
dev_err(hr_dev->dev, "Query vf count failed(%d).\n", ret);
return;
}
......@@ -1418,8 +1419,7 @@ static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id)
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
fclr_write_fail_flag = true;
dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
ret);
dev_err(hr_dev->dev, "Func clear write failed(%d).\n", ret);
goto out;
}
......@@ -1448,8 +1448,7 @@ static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id)
}
out:
dev_err(hr_dev->dev, "Func clear read vf_id %d fail.\n", vf_id);
hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
(void)hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
}
static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
......@@ -1483,8 +1482,10 @@ static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
if (ret) {
dev_err(hr_dev->dev, "Query fw version failed(%d)!\n", ret);
return ret;
}
resp = (struct hns_roce_query_fw_info *)desc.data;
hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
......@@ -2170,21 +2171,21 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
ret = hns_roce_cmq_query_hw_info(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
dev_err(hr_dev->dev, "Query hardware version failed(%d).\n",
ret);
return ret;
}
ret = hns_roce_query_fw_ver(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
dev_err(hr_dev->dev, "Query firmware version failed(%d).\n",
ret);
return ret;
}
ret = hns_roce_config_global_param(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
dev_err(hr_dev->dev, "Configure global param failed(%d).\n",
ret);
return ret;
}
......@@ -2192,8 +2193,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
/* Get pf resource owned by every pf */
ret = hns_roce_query_pf_resource(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
ret);
dev_err(hr_dev->dev, "Query pf resource failed(%d).\n", ret);
return ret;
}
......@@ -2203,15 +2203,14 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
ret = hns_roce_query_pf_timer_resource(hr_dev);
if (ret) {
dev_err(hr_dev->dev,
"Query pf timer resource fail, ret = %d.\n",
ret);
"Query pf timer resource failed(%d).\n", ret);
return ret;
}
}
ret = hns_roce_alloc_vf_resource(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
dev_err(hr_dev->dev, "Allocate vf resource failed(%d).\n",
ret);
return ret;
}
......@@ -2220,7 +2219,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
ret = hns_roce_set_vf_switch_param(hr_dev, 0);
if (ret) {
dev_err(hr_dev->dev,
"Set function switch param fail, ret = %d.\n",
"Set function switch param failed(%d).\n",
ret);
return ret;
}
......@@ -2249,7 +2248,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
ret = hns_roce_v2_set_bt(hr_dev);
if (ret)
dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
dev_err(hr_dev->dev, "Configure bt attribute failed(%d).\n",
ret);
return ret;
......@@ -2718,7 +2717,8 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
if (ret)
dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
dev_err(hr_dev->dev, "Configure sgid table failed(%d), gid index is %d, sgid type is %d!\n",
ret, gid_index, sgid_type);
return ret;
}
......@@ -3166,7 +3166,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
if (cq_period * HNS_ROCE_CLOCK_ADJUST > 0xFFFF) {
dev_info(hr_dev->dev, "config cq_period param out of range. config value is 0x%x, adjusted to 65.\n",
dev_info(hr_dev->dev, "Config cq_period param(0x%x) out of range for write_cqc, adjusted to 65.\n",
cq_period);
cq_period = HNS_ROCE_MAX_CQ_PERIOD;
}
......@@ -3333,7 +3333,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (unlikely(!hr_qp)) {
dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
dev_err(hr_dev->dev, "CQ 0x%06lx with entry for unknown QPN 0x%06x\n",
hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
return -EINVAL;
}
......@@ -5303,7 +5303,7 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
HNS_ROCE_CMD_QUERY_QPC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
dev_err(hr_dev->dev, "QUERY QP cmd process error(%d).\n", ret);
goto out;
}
......@@ -5467,7 +5467,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->state, IB_QPS_RESET);
if (ret)
dev_err(dev,
"modify QP %06lx to Reset failed, ret = %d.\n",
"Modify QP 0x%06lx to Reset failed(%d).\n",
hr_qp->qpn, ret);
}
......@@ -5605,7 +5605,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
return 0;
}
dev_err(hr_dev->dev, "clear scc ctx failure!");
dev_err(hr_dev->dev, "Clear scc ctx failure!");
return -EINVAL;
}
......@@ -5635,7 +5635,7 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
0);
if (cq_period * HNS_ROCE_CLOCK_ADJUST > 0xFFFF) {
dev_info(hr_dev->dev, "config cq_period param out of range. config value is 0x%x, adjusted to 65.\n",
dev_info(hr_dev->dev, "Config cq_period param(0x%x) out of range for modify_cq, adjusted to 65.\n",
cq_period);
cq_period = HNS_ROCE_MAX_CQ_PERIOD;
}
......@@ -5651,8 +5651,8 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
dev_err(hr_dev->dev, "MODIFY CQ(0x%lx) cmd process error.\n",
hr_cq->cqn);
dev_err(hr_dev->dev, "Modify CQ(0x%lx) cmd process error(%d).\n",
hr_cq->cqn, ret);
return ret;
}
......@@ -6213,7 +6213,7 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
test_set_eq_param(eq->type_flag, &eq_period, &eq_max_cnt, &eq_arm_st);
#endif
if (eq_period * HNS_ROCE_CLOCK_ADJUST > 0xFFFF) {
dev_info(hr_dev->dev, "config eq_period param out of range. config value is 0x%x, adjusted to 65.\n",
dev_info(hr_dev->dev, "Config eq_period param(0x%x) out of range for config_eqc, adjusted to 65.\n",
eq_period);
eq_period = HNS_ROCE_MAX_CQ_PERIOD;
}
......@@ -6389,8 +6389,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
if (mhop_num == HNS_ROCE_HOP_NUM_0) {
if (eq->entries > buf_chk_sz / eq->eqe_size) {
dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
eq->entries);
dev_err(dev, "eq entries %d is larger than buf_pg_sz %d!",
eq->entries, buf_chk_sz / eq->eqe_size);
return -EINVAL;
}
eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
......@@ -6697,8 +6697,7 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
0, hr_dev->irq_names[j - comp_num],
&eq_table->eq[j - other_num]);
if (ret) {
dev_err(hr_dev->dev, "Request irq error, ret = %d\n",
ret);
dev_err(hr_dev->dev, "Request irq error(%d)\n", ret);
goto err_request_failed;
}
}
......@@ -6805,7 +6804,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
aeq_num, other_num);
if (ret) {
dev_err(dev, "Request irq failed.\n");
dev_err(dev, "Request irq failed(%d).\n", ret);
goto err_request_irq_fail;
}
......@@ -6992,9 +6991,12 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
int ret;
if (srq_attr_mask & IB_SRQ_LIMIT) {
if (srq_attr->srq_limit >= srq->max)
if (srq_attr->srq_limit >= srq->max) {
dev_err(hr_dev->dev,
"Modify SRQ failed: limit(%d) larger than max wr num(%d).\n",
srq_attr->srq_limit, srq->max);
return -EINVAL;
}
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
......
......@@ -1002,7 +1002,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
#endif
ret = ib_register_device(ib_dev, NULL);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
dev_err(dev, "ib_register_device failed(%d)!\n", ret);
return ret;
}
......@@ -1016,7 +1016,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ret = register_netdevice_notifier(&iboe->nb);
if (ret) {
iboe->nb.notifier_call = NULL;
dev_err(dev, "register_netdevice_notifier failed!\n");
dev_err(dev, "register_netdevice_notifier failed(%d)!\n", ret);
goto error_failed_setup_mtu_mac_state;
}
......@@ -1038,7 +1038,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
hr_dev->caps.num_mtt_segs, 1);
if (ret) {
dev_err(dev, "Failed to init MTT context memory, aborting.\n");
dev_err(dev, "Init MTT context memory failed(%d).\n", ret);
return ret;
}
......@@ -1048,7 +1048,8 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz,
hr_dev->caps.num_cqe_segs, 1);
if (ret) {
dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n");
dev_err(dev, "Init MTT CQE context memory failed(%d).\n",
ret);
goto err_unmap_cqe;
}
}
......@@ -1057,7 +1058,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
hr_dev->caps.num_mtpts, 1);
if (ret) {
dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
dev_err(dev, "Init MTPT context memory failed(%d).\n", ret);
goto err_unmap_mtt;
}
......@@ -1065,7 +1066,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz,
hr_dev->caps.num_qps, 1);
if (ret) {
dev_err(dev, "Failed to init QP context memory, aborting.\n");
dev_err(dev, "Init QP context memory failed(%d).\n", ret);
goto err_unmap_dmpt;
}
......@@ -1075,7 +1076,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
hr_dev->caps.max_qp_init_rdma,
hr_dev->caps.num_qps, 1);
if (ret) {
dev_err(dev, "Failed to init irrl_table memory, aborting.\n");
dev_err(dev, "Init irrl_table memory failed(%d).\n", ret);
goto err_unmap_qp;
}
......@@ -1087,8 +1088,8 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
hr_dev->caps.max_qp_dest_rdma,
hr_dev->caps.num_qps, 1);
if (ret) {
dev_err(dev,
"Failed to init trrl_table memory, aborting.\n");
dev_err(dev, "Init trrl_table memory failed(%d).\n",
ret);
goto err_unmap_irrl;
}
}
......@@ -1097,7 +1098,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
hr_dev->caps.num_cqs, 1);
if (ret) {
dev_err(dev, "Failed to init CQ context memory, aborting.\n");
dev_err(dev, "Init CQ context memory failed(%d).\n", ret);
goto err_unmap_trrl;
}
......@@ -1108,8 +1109,8 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
hr_dev->caps.scc_ctx_entry_sz,
hr_dev->caps.num_qps, 1);
if (ret) {
dev_err(dev,
"Failed to init SCC context memory, aborting.\n");
dev_err(dev, "Init SCC context memory failed(%d).\n",
ret);
goto err_unmap_cq;
}
}
......@@ -1121,8 +1122,8 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
hr_dev->caps.qpc_timer_entry_sz,
hr_dev->caps.num_qpc_timer, 1);
if (ret) {
dev_err(dev,
"Failed to init QPC timer memory, aborting.\n");
dev_err(dev, "Init QPC timer memory failed(%d).\n",
ret);
goto err_unmap_ctx;
}
}
......@@ -1134,8 +1135,8 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
hr_dev->caps.cqc_timer_entry_sz,
hr_dev->caps.num_cqc_timer, 1);
if (ret) {
dev_err(dev,
"Failed to init CQC timer memory, aborting.\n");
dev_err(dev, "Init CQC timer memory failed(%d).\n",
ret);
goto err_unmap_qpc_timer;
}
}
......@@ -1146,8 +1147,8 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
hr_dev->caps.srqc_entry_sz,
hr_dev->caps.num_srqs, 1);
if (ret) {
dev_err(dev,
"Failed to init SRQ context memory, aborting.\n");
dev_err(dev, "Init SRQ context memory failed(%d).\n",
ret);
goto err_unmap_cqc_timer;
}
}
......@@ -1159,8 +1160,8 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
hr_dev->caps.mtt_entry_sz,
hr_dev->caps.num_srqwqe_segs, 1);
if (ret) {
dev_err(dev,
"Failed to init MTT srqwqe memory, aborting.\n");
dev_err(dev, "Init MTT srqwqe memory failed(%d).\n",
ret);
goto err_unmap_srq;
}
}
......@@ -1172,8 +1173,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
hr_dev->caps.idx_entry_sz,
hr_dev->caps.num_idx_segs, 1);
if (ret) {
dev_err(dev,
"Failed to init MTT idx memory, aborting.\n");
dev_err(dev, "Init MTT idx memory failed(%d).\n", ret);
goto err_unmap_srqwqe;
}
}
......@@ -1270,8 +1270,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
ret = hns_roce_init_xrcd_table(hr_dev);
if (ret) {
dev_err(dev, "Failed to init xrcd table(%d).\n",
ret);
dev_err(dev, "Failed to init xrcd table(%d).\n", ret);
goto err_pd_table_free;
}
}
......@@ -1436,7 +1435,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
ret = hns_roce_cmd_use_events(hr_dev);
if (ret) {
dev_warn(dev,
"Cmd event mode failed, set back to poll!\n");
"Cmd event mode failed(%d), set back to poll!\n",
ret);
hns_roce_cmd_use_polling(hr_dev);
}
}
......@@ -1456,7 +1456,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
ret = hr_dev->hw->hw_init(hr_dev);
if (ret) {
dev_err(dev, "Hw_init failed!\n");
dev_err(dev, "Hw_init failed(%d)!\n", ret);
goto error_failed_engine_init;
}
......
......@@ -362,8 +362,7 @@ static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct device *dev = hr_dev->dev;
if (npages > pbl_bt_sz / BA_BYTE_LEN) {
dev_err(dev, "npages %d is larger than buf_pg_sz!",
npages);
dev_err(dev, "Npages %d is larger than buf_pg_sz!", npages);
return -EINVAL;
}
mr->pbl_buf = dma_alloc_coherent(dev, npages * BA_BYTE_LEN,
......@@ -753,7 +752,7 @@ int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
/* Prepare HEM entry memory */
ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
if (ret) {
dev_err(dev, "get mtpt table(0x%lx) failed, ret = %d",
dev_err(dev, "Get mtpt table(0x%lx) failed(%d).",
mtpt_idx, ret);
return ret;
}
......@@ -769,14 +768,15 @@ int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
else
ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
if (ret) {
dev_err(dev, "Write mtpt fail!\n");
dev_err(dev, "Write mtpt fail(%d)!\n", ret);
goto err_page;
}
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
dev_err(dev, "SW2HW_MPT(0x%lx) failed (%d)\n", mtpt_idx, ret);
dev_err(dev, "SW2HW_MPT(0x%lx) failed(%d) for mr_enable.\n",
mtpt_idx, ret);
goto err_page;
}
......@@ -1007,8 +1007,8 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
~0ULL, acc, 0, mr);
if (ret) {
dev_err(to_hr_dev(pd->device)->dev,
"alloc mr failed(%d), pd =0x%lx\n",
ret, to_hr_pd(pd)->pdn);
"alloc mr failed(%d), pd is 0x%lx , access is 0x%x.\n",
ret, to_hr_pd(pd)->pdn, acc);
goto err_free;
}
......@@ -1065,7 +1065,7 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
order = hr_dev->caps.idx_ba_pg_sz;
break;
default:
dev_err(dev, "Unsupport mtt type %d, write mtt failed\n",
dev_err(dev, "Unsupport mtt type %d, umem write mtt failed\n",
mtt->mtt_type);
return -EINVAL;
}
......@@ -1377,7 +1377,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) {
dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
dev_err(dev, "SW2HW_MPT failed(%d) for rereg_usr_mr\n", ret);
ib_umem_release(mr->umem);
goto free_cmd_mbox;
}
......@@ -1538,14 +1538,14 @@ static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
if (ret) {
dev_err(dev, "MW write mtpt fail!\n");
dev_err(dev, "MW write mtpt failed(%d)!\n", ret);
goto err_page;
}
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
dev_err(dev, "MW sw2hw_mpt failed (%d).\n", ret);
goto err_page;
}
......
......@@ -99,7 +99,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
if (ret) {
kfree(pd);
dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n");
dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed(%d)!\n", ret);
return ERR_PTR(ret);
}
......@@ -171,7 +171,7 @@ struct ib_xrcd *hns_roce_ib_alloc_xrcd(struct ib_device *ib_dev,
if (ret) {
kfree(xrcd);
dev_err(hr_dev->dev,
"[alloc_xrcd]hns_roce_xrcd_alloc failed!\n");
"[alloc_xrcd]hns_roce_xrcd_alloc failed(%d)!\n", ret);
return ERR_PTR(ret);
}
......
......@@ -70,9 +70,10 @@ void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
struct hns_roce_flush_work *flush_work;
flush_work = kzalloc(sizeof(struct hns_roce_flush_work), GFP_ATOMIC);
if (!flush_work)
if (ZERO_OR_NULL_PTR(flush_work)) {
dev_err(hr_dev->dev, "Init flush work queue fail!\n");
return;
}
flush_work->hr_dev = hr_dev;
flush_work->hr_qp = hr_qp;
INIT_WORK(&flush_work->work, flush_work_handle);
......@@ -96,7 +97,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
spin_unlock(&qp_table->lock);
if (!qp) {
dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
dev_warn(dev, "Async event for bogus QP 0x%08x\n", qpn);
return;
}
......@@ -203,8 +204,8 @@ static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock);
if (ret) {
dev_err(hr_dev->dev, "QPC radix insert failed, qpn 0x%lx\n",
hr_qp->qpn);
dev_err(hr_dev->dev, "GSI QPC radix insert failed(%d), qpn is 0x%lx\n",
ret, hr_qp->qpn);
goto err_put_irrl;
}
......@@ -233,14 +234,16 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
/* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) {
dev_err(dev, "QPC table get failed, qpn 0x%lx\n", hr_qp->qpn);
dev_err(dev, "QPC table get failed(%d), qpn 0x%lx\n", ret,
hr_qp->qpn);
goto err_out;
}
/* Alloc memory for IRRL */
ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
if (ret) {
dev_err(dev, "IRRL table get failed, qpn 0x%lx\n", hr_qp->qpn);
dev_err(dev, "IRRL table get failed(%d), qpn 0x%lx\n", ret,
hr_qp->qpn);
goto err_put_qp;
}
......@@ -249,8 +252,8 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
hr_qp->qpn);
if (ret) {
dev_err(dev, "TRRL table get failed, qpn 0x%lx\n",
hr_qp->qpn);
dev_err(dev, "TRRL table get failed(%d), qpn 0x%lx\n",
ret, hr_qp->qpn);
goto err_put_irrl;
}
}
......@@ -260,8 +263,8 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
ret = hns_roce_table_get(hr_dev, &qp_table->scc_ctx_table,
hr_qp->qpn);
if (ret) {
dev_err(dev, "SCC CTX table get failed, qpn 0x%lx\n",
hr_qp->qpn);
dev_err(dev, "SCC CTX table get failed(%d), qpn 0x%lx\n",
ret, hr_qp->qpn);
goto err_put_trrl;
}
}
......@@ -271,8 +274,8 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock);
if (ret) {
dev_err(dev, "QPC radix_tree_insert failed, qpn - 0x%lx\n",
hr_qp->qpn);
dev_err(dev, "QPC radix_tree_insert failed(%d), qpn - 0x%lx\n",
ret, hr_qp->qpn);
goto err_put_scc_ctx;
}
......@@ -364,7 +367,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
cap->max_recv_sge = 0;
} else {
if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
dev_err(dev, "User space no need config max_recv_wr max_recv_sge\n");
return -EINVAL;
}
......@@ -411,14 +414,14 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
dev_err(hr_dev->dev,
"check SQ size error!Log sq stride 0x%x\n",
"Check SQ size error! Log sq stride 0x%x\n",
ucmd->log_sq_stride);
return -EINVAL;
}
if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
dev_err(hr_dev->dev, "SQ sge error!Max send sge %d\n",
cap->max_send_sge);
dev_err(hr_dev->dev, "SQ sge error! Max send sge is %d, Max sq sge is %d\n",
cap->max_send_sge, hr_dev->caps.max_sq_sg);
return -EINVAL;
}
......@@ -463,8 +466,9 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
(hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A)) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(hr_dev->dev,
"SQ(0x%lx) extended sge cnt error! sge_cnt=%d\n",
hr_qp->qpn, hr_qp->sge.sge_cnt);
"SQ(0x%lx) extended sge cnt error! sge cnt is %d, max extend sg is %d.\n",
hr_qp->qpn, hr_qp->sge.sge_cnt,
hr_dev->caps.max_extend_sg);
return -EINVAL;
}
}
......@@ -621,8 +625,8 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
if ((hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) &&
hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
dev_err(dev, "The extended sge cnt error! sge_cnt is %d, max extend sg is %d.\n",
hr_qp->sge.sge_cnt, hr_dev->caps.max_extend_sg);
return -EINVAL;
}
}
......@@ -656,7 +660,8 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
dev_err(dev, "sq.wqe_cnt(0x%x) too large for setting kernel sq size.\n",
(u32)hr_qp->sq.wqe_cnt);
return -EINVAL;
}
......@@ -669,7 +674,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
ret = set_extend_sge_param(hr_dev, hr_qp);
if (ret) {
dev_err(dev, "set extend sge parameters fail\n");
dev_err(dev, "set extend sge parameters failed(%d)\n", ret);
return ret;
}
......@@ -829,7 +834,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
hns_roce_qp_has_rq(init_attr), hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_rq_size failed\n");
dev_err(dev, "hns_roce_set_rq_size failed(%d).\n", ret);
goto err_out;
}
......@@ -845,7 +850,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
if (ib_pd->uobject) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "ib_copy_from_udata error for create qp\n");
dev_err(dev, "ib_copy_from_udata error for create qp.\n");
ret = -EFAULT;
goto err_alloc_recv_inline_buffer;
}
......@@ -853,7 +858,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
&ucmd);
if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
dev_err(dev, "hns_roce_set_user_sq_size error(%d) for create qp.\n",
ret);
goto err_alloc_recv_inline_buffer;
}
......@@ -861,7 +867,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ucmd.buf_addr, hr_qp->buff_size, 0,
0);
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
dev_err(dev, "ib_umem_get error for create qp.\n");
ret = PTR_ERR(hr_qp->umem);
goto err_alloc_recv_inline_buffer;
}
......@@ -871,7 +877,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list,
hr_qp->region_cnt);
if (ret) {
dev_err(dev, "alloc buf_list error for create qp\n");
dev_err(dev, "alloc buf_list error(%d) for create qp.\n",
ret);
goto err_alloc_list;
}
......@@ -897,7 +904,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
to_hr_ucontext(ib_pd->uobject->context),
ucmd.sdb_addr, &hr_qp->sdb);
if (ret) {
dev_err(dev, "sq record doorbell map failed!\n");
dev_err(dev, "SQ record doorbell map failed(%d)!\n",
ret);
goto err_get_bufs;
}
......@@ -913,7 +921,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
to_hr_ucontext(ib_pd->uobject->context),
ucmd.db_addr, &hr_qp->rdb);
if (ret) {
dev_err(dev, "rq record doorbell map failed!\n");
dev_err(dev, "RQ record doorbell map failed(%d)!\n",
ret);
goto err_sq_dbmap;
}
......@@ -924,14 +933,14 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
} else {
if (init_attr->create_flags &
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
dev_err(dev, "init_attr->create_flags error(%d)!\n",
dev_err(dev, "init_attr->create_flags error(%d) for BLOCK_MULTICAST_LOOPBACK!\n",
init_attr->create_flags);
ret = -EINVAL;
goto err_alloc_recv_inline_buffer;
}
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
dev_err(dev, "init_attr->create_flags error(%d)!\n",
dev_err(dev, "init_attr->create_flags error(%d) for IPOIB_UD_LSO!\n",
init_attr->create_flags);
ret = -EINVAL;
goto err_alloc_recv_inline_buffer;
......@@ -941,7 +950,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
dev_err(dev, "hns_roce_set_kernel_sq_size error(%d)!\n",
ret);
goto err_alloc_recv_inline_buffer;
}
......@@ -955,7 +965,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_qp_has_rq(init_attr)) {
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
if (ret) {
dev_err(dev, "rq record doorbell alloc failed!\n");
dev_err(dev, "RQ record doorbell alloc failed(%d)!\n",
ret);
goto err_alloc_recv_inline_buffer;
}
*hr_qp->rdb.db_record = 0;
......@@ -976,7 +987,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list,
hr_qp->region_cnt);
if (ret) {
dev_err(dev, "alloc buf_list error for create qp!\n");
dev_err(dev, "Alloc buf_list error(%d) for create qp!\n",
ret);
goto err_alloc_list;
}
......@@ -1038,7 +1050,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
/* In v1 engine, GSI QP context in RoCE engine's register */
ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
if (ret) {
dev_err(dev, "Failed to alloc gsi qp!\n");
dev_err(dev, "Alloc GSI QP failed(%d)!\n", ret);
goto err_qpn;
}
} else {
......@@ -1063,7 +1075,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
if (ret) {
dev_err(hr_dev->dev, "qp flow control init failure!");
dev_err(hr_dev->dev, "QP flow control init failure(%d)!",
ret);
goto err_qp;
}
}
......@@ -1289,8 +1302,9 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
dev_err(dev,
"attr pkey_index invalid.attr->pkey_index=%d\n",
attr->pkey_index);
"Attr pkey_index(%d) invalid.Max index is %d.\n",
attr->pkey_index,
hr_dev->caps.pkey_table_len[p]);
return -EINVAL;
}
}
......@@ -1303,15 +1317,17 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
dev_err(dev, "attr max_rd_atomic(%d) invalid.\n",
attr->max_rd_atomic);
dev_err(dev, "Attr max_rd_atomic(%d) invalid, max is %d.\n",
attr->max_rd_atomic, hr_dev->caps.max_qp_init_rdma);
return -EINVAL;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
dev_err(dev, "attr max_dest_rd_atomic(%d) invalid.\n",
attr->max_dest_rd_atomic);
dev_err(dev,
"Attr max_dest_rd_atomic(%d) invalid, max is %d.\n",
attr->max_dest_rd_atomic,
hr_dev->caps.max_qp_dest_rdma);
return -EINVAL;
}
......@@ -1325,7 +1341,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state;
struct device *dev = hr_dev->dev;
int ret = -EINVAL;
int ret = 0;
rdfx_func_cnt(hr_dev, RDFX_FUNC_MODIFY_QP);
......@@ -1348,13 +1364,16 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else {
dev_warn(dev, "flush cqe is not supported in userspace!\n");
ret = -EINVAL;
goto out;
}
}
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
IB_LINK_LAYER_ETHERNET)) {
dev_err(dev, "ib_modify_qp_is_ok failed\n");
dev_err(dev, "ib_modify_qp_is_ok failed. type: %d, cur_state: %d, new_state: %d, mask: 0x%x.\n",
ibqp->qp_type, cur_state, new_state, attr_mask);
ret = -EINVAL;
goto out;
}
......@@ -1365,10 +1384,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
if (hr_dev->caps.min_wqes) {
ret = -EPERM;
dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
dev_err(dev, "cur_state=%d new_state=%d.\n", cur_state,
new_state);
} else {
ret = 0;
}
goto out;
......@@ -1376,6 +1393,9 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
new_state);
if (ret)
dev_err(dev, "Modify QP(%08x) failed(%d).\n", ibqp->qp_num,
ret);
out:
mutex_unlock(&hr_qp->mutex);
......
......@@ -49,7 +49,8 @@ void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
if (srq) {
refcount_inc(&srq->refcount);
} else {
dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
dev_warn(hr_dev->dev, "Async event for bogus SRQ 0x%08x\n",
srqn);
return;
}
......@@ -79,7 +80,7 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
break;
default:
dev_err(hr_dev->dev,
"hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
"hns_roce:Unexpected event type 0x%x on SRQ 0x%06lx\n",
event_type, srq->srqn);
return;
}
......@@ -147,8 +148,8 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
if (ret) {
dev_err(hr_dev->dev, "SRQ alloc.Failed to get table, srq - 0x%lx.\n",
srq->srqn);
dev_err(hr_dev->dev, "Get table failed(%d) for SRQ(0x%lx) alloc.\n",
ret, srq->srqn);
goto err_out;
}
......@@ -201,7 +202,7 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
if (ret)
dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN 0x%06lx.\n",
ret, srq->srqn);
spin_lock_irq(&srq_table->lock);
......@@ -243,7 +244,8 @@ static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->umem),
srq->umem->page_shift, &srq->mtt);
if (ret) {
dev_err(hr_dev->dev, "mtt init error when create srq\n");
dev_err(hr_dev->dev, "Mtt init error(%d) when create srq.\n",
ret);
goto err_user_buf;
}
......@@ -274,7 +276,7 @@ static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
}
if (ret) {
dev_err(hr_dev->dev, "mtt init error for idx que\n");
dev_err(hr_dev->dev, "User mtt init error for idx que\n");
goto err_user_idx_mtt;
}
......@@ -282,7 +284,7 @@ static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
srq->idx_que.umem);
if (ret) {
dev_err(hr_dev->dev,
"write mtt error for idx que\n");
"Write mtt error(%d) for idx que\n", ret);
goto err_user_idx_buf;
}
......@@ -352,7 +354,8 @@ static int create_kernel_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
&srq->mtt);
if (ret) {
dev_err(hr_dev->dev, "mtt init error when create srq\n");
dev_err(hr_dev->dev, "Mtt init error(%d) when create srq.\n",
ret);
goto err_kernel_buf;
}
......@@ -372,14 +375,15 @@ static int create_kernel_srq(struct ib_pd *pd, struct hns_roce_srq *srq,
srq->idx_que.idx_buf.page_shift,
&srq->idx_que.mtt);
if (ret) {
dev_err(hr_dev->dev, "mtt init error for idx que\n");
dev_err(hr_dev->dev, "Kernel mtt init error(%d) for idx que.\n",
ret);
goto err_kernel_create_idx;
}
/* Write buffer address into the mtt table */
ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
&srq->idx_que.idx_buf);
if (ret) {
dev_err(hr_dev->dev, "write mtt error for idx que\n");
dev_err(hr_dev->dev, "Write mtt error(%d) for idx que.\n", ret);
goto err_kernel_idx_buf;
}
srq->wrid = kcalloc(srq->max, sizeof(u64), GFP_KERNEL);
......@@ -487,8 +491,8 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
0, srq);
if (ret) {
dev_err(hr_dev->dev,
"failed to alloc srq, cqn - 0x%x, pdn - 0x%lx\n",
cqn, to_hr_pd(pd)->pdn);
"Alloc srq failed(%d), cqn is 0x%x, pdn is 0x%lx.\n",
ret, cqn, to_hr_pd(pd)->pdn);
goto err_wrid;
}
......
......@@ -78,7 +78,7 @@ static ssize_t cqc_show(struct device *dev,
ret = hr_dev->dfx->query_cqc_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "cqc query failed");
dev_err(dev, "CQC query failed(%d).", ret);
return -EBUSY;
}
......@@ -95,7 +95,7 @@ static ssize_t cmd_show(struct device *dev,
ret = hr_dev->dfx->query_cmd_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "cmd query failed");
dev_err(dev, "Cmd query failed(%d).", ret);
return -EBUSY;
}
......@@ -112,7 +112,7 @@ static ssize_t pkt_show(struct device *dev,
ret = hr_dev->dfx->query_pkt_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "pkt query failed");
dev_err(dev, "Pkt query failed(%d).", ret);
return -EBUSY;
}
......@@ -146,7 +146,7 @@ static ssize_t ceqc_show(struct device *dev,
ret = hr_dev->dfx->query_ceqc_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "ceqc query failed");
dev_err(dev, "CEQC query failed");
return -EBUSY;
}
......@@ -214,7 +214,7 @@ static ssize_t qpc_show(struct device *dev, struct device_attribute *attr,
ret = hr_dev->dfx->query_qpc_stat(hr_dev,
buf, &count);
if (ret) {
dev_err(dev, "qpc query failed");
dev_err(dev, "QPC query failed");
return -EBUSY;
}
......@@ -247,7 +247,7 @@ static ssize_t srqc_show(struct device *dev, struct device_attribute *attr,
ret = hr_dev->dfx->query_srqc_stat(hr_dev, buf, &count);
if (ret) {
dev_err(dev, "srqc query failed");
dev_err(dev, "SRQC query failed");
return -EBUSY;
}
......@@ -318,7 +318,8 @@ static ssize_t coalesce_maxcnt_store(struct device *dev,
}
if (int_maxcnt > HNS_ROCE_CEQ_MAX_BURST_NUM) {
dev_err(dev, "int_maxcnt must be less than 2^16!\n");
dev_err(dev, "int_maxcnt(%d) must be less than 2^16!\n",
int_maxcnt);
return -EINVAL;
}
......@@ -328,7 +329,8 @@ static ssize_t coalesce_maxcnt_store(struct device *dev,
ret = hr_dev->dfx->modify_eq(hr_dev, eq, eq->eq_max_cnt, 0,
HNS_ROCE_EQ_MAXCNT_MASK);
if (ret) {
dev_err(dev, "eqc modify failed, eq_num=%d\n", eq->eqn);
dev_err(dev, "EQC(%d) modify failed(%d).\n", eq->eqn,
ret);
return -EBUSY;
}
}
......@@ -367,7 +369,8 @@ static ssize_t coalesce_period_store(struct device *dev,
}
if (int_period > HNS_ROCE_CEQ_MAX_INTERVAL) {
dev_err(dev, "int_period must be less than 2^16!\n");
dev_err(dev, "int_period(%d) must be less than 2^16!\n",
int_period);
return -EINVAL;
}
......@@ -377,7 +380,8 @@ static ssize_t coalesce_period_store(struct device *dev,
ret = hr_dev->dfx->modify_eq(hr_dev, eq, 0, eq->eq_period,
HNS_ROCE_EQ_PERIOD_MASK);
if (ret) {
dev_err(dev, "eqc modify failed, eq_num=%d\n", eq->eqn);
dev_err(dev, "EQC(%d) modify failed(%d).\n", eq->eqn,
ret);
return -EBUSY;
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册