提交 1848757c 编写于 作者: D Doug Ledford

Merge branches 'hns' and 'misc' into k.o/for-next

Signed-off-by: NDoug Ledford <dledford@redhat.com>
menuconfig INFINIBAND menuconfig INFINIBAND
tristate "InfiniBand support" tristate "InfiniBand support"
depends on PCI || BROKEN
depends on HAS_IOMEM depends on HAS_IOMEM
depends on NET depends on NET
depends on INET depends on INET
......
...@@ -47,21 +47,28 @@ ...@@ -47,21 +47,28 @@
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ static inline void
do { \ ib_uverbs_init_udata(struct ib_udata *udata,
(udata)->inbuf = (const void __user *) (ibuf); \ const void __user *ibuf,
(udata)->outbuf = (void __user *) (obuf); \ void __user *obuf,
(udata)->inlen = (ilen); \ size_t ilen, size_t olen)
(udata)->outlen = (olen); \ {
} while (0) udata->inbuf = ibuf;
udata->outbuf = obuf;
udata->inlen = ilen;
udata->outlen = olen;
}
#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \ static inline void
do { \ ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
(udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \ const void __user *ibuf,
(udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \ void __user *obuf,
(udata)->inlen = (ilen); \ size_t ilen, size_t olen)
(udata)->outlen = (olen); \ {
} while (0) ib_uverbs_init_udata(udata,
ilen ? ibuf : NULL, olen ? obuf : NULL,
ilen, olen);
}
/* /*
* Our lifetime rules for these structs are the following: * Our lifetime rules for these structs are the following:
......
...@@ -91,8 +91,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, ...@@ -91,8 +91,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto err; goto err;
} }
INIT_UDATA(&udata, buf + sizeof(cmd), ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
(unsigned long) cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -141,8 +141,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, ...@@ -141,8 +141,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto err_fd; goto err_fd;
} }
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
&resp, sizeof resp)) {
ret = -EFAULT; ret = -EFAULT;
goto err_file; goto err_file;
} }
...@@ -238,8 +237,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, ...@@ -238,8 +237,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
memset(&resp, 0, sizeof resp); memset(&resp, 0, sizeof resp);
copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
&resp, sizeof resp))
return -EFAULT; return -EFAULT;
return in_len; return in_len;
...@@ -295,8 +293,7 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, ...@@ -295,8 +293,7 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
resp.link_layer = rdma_port_get_link_layer(ib_dev, resp.link_layer = rdma_port_get_link_layer(ib_dev,
cmd.port_num); cmd.port_num);
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
&resp, sizeof resp))
return -EFAULT; return -EFAULT;
return in_len; return in_len;
...@@ -320,8 +317,8 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, ...@@ -320,8 +317,8 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
INIT_UDATA(&udata, buf + sizeof(cmd), ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
(unsigned long) cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -344,8 +341,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, ...@@ -344,8 +341,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
memset(&resp, 0, sizeof resp); memset(&resp, 0, sizeof resp);
resp.pd_handle = uobj->id; resp.pd_handle = uobj->id;
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
&resp, sizeof resp)) {
ret = -EFAULT; ret = -EFAULT;
goto err_copy; goto err_copy;
} }
...@@ -490,8 +486,8 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, ...@@ -490,8 +486,8 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
INIT_UDATA(&udata, buf + sizeof(cmd), ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
(unsigned long) cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -556,8 +552,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, ...@@ -556,8 +552,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
atomic_inc(&xrcd->usecnt); atomic_inc(&xrcd->usecnt);
} }
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
&resp, sizeof resp)) {
ret = -EFAULT; ret = -EFAULT;
goto err_copy; goto err_copy;
} }
...@@ -655,8 +650,8 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, ...@@ -655,8 +650,8 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
INIT_UDATA(&udata, buf + sizeof(cmd), ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
(unsigned long) cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -705,8 +700,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, ...@@ -705,8 +700,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
resp.rkey = mr->rkey; resp.rkey = mr->rkey;
resp.mr_handle = uobj->id; resp.mr_handle = uobj->id;
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
&resp, sizeof resp)) {
ret = -EFAULT; ret = -EFAULT;
goto err_copy; goto err_copy;
} }
...@@ -748,8 +742,8 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, ...@@ -748,8 +742,8 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof(cmd))) if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
INIT_UDATA(&udata, buf + sizeof(cmd), ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
(unsigned long) cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -800,8 +794,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, ...@@ -800,8 +794,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
resp.lkey = mr->lkey; resp.lkey = mr->lkey;
resp.rkey = mr->rkey; resp.rkey = mr->rkey;
if (copy_to_user((void __user *)(unsigned long)cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
&resp, sizeof(resp)))
ret = -EFAULT; ret = -EFAULT;
else else
ret = in_len; ret = in_len;
...@@ -867,8 +860,8 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, ...@@ -867,8 +860,8 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
goto err_free; goto err_free;
} }
INIT_UDATA(&udata, buf + sizeof(cmd), ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
(unsigned long)cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -889,8 +882,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, ...@@ -889,8 +882,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
resp.rkey = mw->rkey; resp.rkey = mw->rkey;
resp.mw_handle = uobj->id; resp.mw_handle = uobj->id;
if (copy_to_user((void __user *)(unsigned long)cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) {
&resp, sizeof(resp))) {
ret = -EFAULT; ret = -EFAULT;
goto err_copy; goto err_copy;
} }
...@@ -956,8 +948,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, ...@@ -956,8 +948,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
uobj_file.uobj); uobj_file.uobj);
ib_uverbs_init_event_queue(&ev_file->ev_queue); ib_uverbs_init_event_queue(&ev_file->ev_queue);
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
&resp, sizeof resp)) {
uobj_alloc_abort(uobj); uobj_alloc_abort(uobj);
return -EFAULT; return -EFAULT;
} }
...@@ -1087,10 +1078,11 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, ...@@ -1087,10 +1078,11 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof(cmd))) if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
sizeof(cmd), sizeof(resp));
INIT_UDATA(&uhw, buf + sizeof(cmd), ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
(unsigned long)cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -1173,8 +1165,8 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, ...@@ -1173,8 +1165,8 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
INIT_UDATA(&udata, buf + sizeof(cmd), ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
(unsigned long) cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -1188,8 +1180,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, ...@@ -1188,8 +1180,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
resp.cqe = cq->cqe; resp.cqe = cq->cqe;
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe))
&resp, sizeof resp.cqe))
ret = -EFAULT; ret = -EFAULT;
out: out:
...@@ -1249,7 +1240,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, ...@@ -1249,7 +1240,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
return -EINVAL; return -EINVAL;
/* we copy a struct ib_uverbs_poll_cq_resp to user space */ /* we copy a struct ib_uverbs_poll_cq_resp to user space */
header_ptr = (void __user *)(unsigned long) cmd.response; header_ptr = u64_to_user_ptr(cmd.response);
data_ptr = header_ptr + sizeof resp; data_ptr = header_ptr + sizeof resp;
memset(&resp, 0, sizeof resp); memset(&resp, 0, sizeof resp);
...@@ -1343,8 +1334,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, ...@@ -1343,8 +1334,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
resp.async_events_reported = obj->async_events_reported; resp.async_events_reported = obj->async_events_reported;
uverbs_uobject_put(uobj); uverbs_uobject_put(uobj);
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
&resp, sizeof resp))
return -EFAULT; return -EFAULT;
return in_len; return in_len;
...@@ -1650,10 +1640,10 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, ...@@ -1650,10 +1640,10 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof(cmd))) if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
resp_size); sizeof(cmd), resp_size);
INIT_UDATA(&uhw, buf + sizeof(cmd), ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
(unsigned long)cmd.response + resp_size, u64_to_user_ptr(cmd.response) + resp_size,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - resp_size); out_len - resp_size);
...@@ -1750,8 +1740,8 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, ...@@ -1750,8 +1740,8 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
INIT_UDATA(&udata, buf + sizeof(cmd), ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
(unsigned long) cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -1795,8 +1785,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, ...@@ -1795,8 +1785,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
resp.qpn = qp->qp_num; resp.qpn = qp->qp_num;
resp.qp_handle = obj->uevent.uobject.id; resp.qp_handle = obj->uevent.uobject.id;
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
&resp, sizeof resp)) {
ret = -EFAULT; ret = -EFAULT;
goto err_destroy; goto err_destroy;
} }
...@@ -1911,8 +1900,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, ...@@ -1911,8 +1900,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
resp.max_inline_data = init_attr->cap.max_inline_data; resp.max_inline_data = init_attr->cap.max_inline_data;
resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
&resp, sizeof resp))
ret = -EFAULT; ret = -EFAULT;
out: out:
...@@ -2042,7 +2030,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, ...@@ -2042,7 +2030,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
return -EOPNOTSUPP; return -EOPNOTSUPP;
INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL, ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL,
in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
out_len); out_len);
...@@ -2126,8 +2114,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, ...@@ -2126,8 +2114,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
resp.events_reported = obj->uevent.events_reported; resp.events_reported = obj->uevent.events_reported;
uverbs_uobject_put(uobj); uverbs_uobject_put(uobj);
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
&resp, sizeof resp))
return -EFAULT; return -EFAULT;
return in_len; return in_len;
...@@ -2311,8 +2298,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, ...@@ -2311,8 +2298,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
break; break;
} }
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
&resp, sizeof resp))
ret = -EFAULT; ret = -EFAULT;
out_put: out_put:
...@@ -2460,8 +2446,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, ...@@ -2460,8 +2446,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
} }
} }
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
&resp, sizeof resp))
ret = -EFAULT; ret = -EFAULT;
out: out:
...@@ -2510,8 +2495,7 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, ...@@ -2510,8 +2495,7 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
break; break;
} }
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
&resp, sizeof resp))
ret = -EFAULT; ret = -EFAULT;
out: out:
...@@ -2548,8 +2532,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, ...@@ -2548,8 +2532,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
return -EINVAL; return -EINVAL;
INIT_UDATA(&udata, buf + sizeof(cmd), ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
(unsigned long)cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -2600,8 +2584,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, ...@@ -2600,8 +2584,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
resp.ah_handle = uobj->id; resp.ah_handle = uobj->id;
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
&resp, sizeof resp)) {
ret = -EFAULT; ret = -EFAULT;
goto err_copy; goto err_copy;
} }
...@@ -3627,8 +3610,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, ...@@ -3627,8 +3610,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
xcmd.max_sge = cmd.max_sge; xcmd.max_sge = cmd.max_sge;
xcmd.srq_limit = cmd.srq_limit; xcmd.srq_limit = cmd.srq_limit;
INIT_UDATA(&udata, buf + sizeof(cmd), ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
(unsigned long) cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -3654,8 +3637,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, ...@@ -3654,8 +3637,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
INIT_UDATA(&udata, buf + sizeof(cmd), ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
(unsigned long) cmd.response + sizeof(resp), u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp)); out_len - sizeof(resp));
...@@ -3680,7 +3663,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, ...@@ -3680,7 +3663,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len); out_len);
srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
...@@ -3731,8 +3714,7 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, ...@@ -3731,8 +3714,7 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
resp.max_sge = attr.max_sge; resp.max_sge = attr.max_sge;
resp.srq_limit = attr.srq_limit; resp.srq_limit = attr.srq_limit;
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
&resp, sizeof resp))
return -EFAULT; return -EFAULT;
return in_len; return in_len;
...@@ -3773,8 +3755,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, ...@@ -3773,8 +3755,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
} }
resp.events_reported = obj->events_reported; resp.events_reported = obj->events_reported;
uverbs_uobject_put(uobj); uverbs_uobject_put(uobj);
if (copy_to_user((void __user *)(unsigned long)cmd.response, if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
&resp, sizeof(resp)))
return -EFAULT; return -EFAULT;
return in_len; return in_len;
......
...@@ -376,7 +376,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me ...@@ -376,7 +376,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
min_id) || min_id) ||
WARN(attr_obj_with_special_access && WARN(attr_obj_with_special_access &&
!(attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY), !(attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY),
"ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy aceess but isn't mandatory\n", "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy access but isn't mandatory\n",
min_id) || min_id) ||
WARN(IS_ATTR_OBJECT(attr) && WARN(IS_ATTR_OBJECT(attr) &&
attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ, attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ,
......
...@@ -763,7 +763,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -763,7 +763,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
} }
if (!access_ok(VERIFY_WRITE, if (!access_ok(VERIFY_WRITE,
(void __user *) (unsigned long) ex_hdr.response, u64_to_user_ptr(ex_hdr.response),
(hdr.out_words + ex_hdr.provider_out_words) * 8)) { (hdr.out_words + ex_hdr.provider_out_words) * 8)) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
...@@ -775,19 +775,17 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -775,19 +775,17 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
} }
} }
INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response, ib_uverbs_init_udata_buf_or_null(&ucore, buf,
hdr.in_words * 8, hdr.out_words * 8); u64_to_user_ptr(ex_hdr.response),
hdr.in_words * 8, hdr.out_words * 8);
INIT_UDATA_BUF_OR_NULL(&uhw, ib_uverbs_init_udata_buf_or_null(&uhw,
buf + ucore.inlen, buf + ucore.inlen,
(unsigned long) ex_hdr.response + ucore.outlen, u64_to_user_ptr(ex_hdr.response) + ucore.outlen,
ex_hdr.provider_in_words * 8, ex_hdr.provider_in_words * 8,
ex_hdr.provider_out_words * 8); ex_hdr.provider_out_words * 8);
ret = uverbs_ex_cmd_table[command](file, ret = uverbs_ex_cmd_table[command](file, ib_dev, &ucore, &uhw);
ib_dev,
&ucore,
&uhw);
if (!ret) if (!ret)
ret = written_count; ret = written_count;
} else { } else {
......
...@@ -246,7 +246,8 @@ static void create_udata(struct uverbs_attr_bundle *ctx, ...@@ -246,7 +246,8 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
outbuf_len = uhw_out->ptr_attr.len; outbuf_len = uhw_out->ptr_attr.len;
} }
INIT_UDATA_BUF_OR_NULL(udata, inbuf, outbuf, inbuf_len, outbuf_len); ib_uverbs_init_udata_buf_or_null(udata, inbuf, outbuf, inbuf_len,
outbuf_len);
} }
static int uverbs_create_cq_handler(struct ib_device *ib_dev, static int uverbs_create_cq_handler(struct ib_device *ib_dev,
......
config INFINIBAND_CXGB3 config INFINIBAND_CXGB3
tristate "Chelsio RDMA Driver" tristate "Chelsio RDMA Driver"
depends on CHELSIO_T3 && INET depends on CHELSIO_T3
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
---help--- ---help---
This is an iWARP/RDMA driver for the Chelsio T3 1GbE and This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
......
...@@ -404,12 +404,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, ...@@ -404,12 +404,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
{ {
__u32 ptr; __u32 ptr = wq->sq_rptr + count;
int flushed = 0; int flushed = 0;
struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2); struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
ptr = wq->sq_rptr + count;
sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
while (ptr != wq->sq_wptr) { while (ptr != wq->sq_wptr) {
sqp->signaled = 0; sqp->signaled = 0;
insert_sq_cqe(wq, cq, sqp); insert_sq_cqe(wq, cq, sqp);
......
config INFINIBAND_CXGB4 config INFINIBAND_CXGB4
tristate "Chelsio T4/T5 RDMA Driver" tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) depends on CHELSIO_T4 && INET
select CHELSIO_LIB select CHELSIO_LIB
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
---help--- ---help---
......
...@@ -180,7 +180,7 @@ static void ref_qp(struct c4iw_ep *ep) ...@@ -180,7 +180,7 @@ static void ref_qp(struct c4iw_ep *ep)
static void start_ep_timer(struct c4iw_ep *ep) static void start_ep_timer(struct c4iw_ep *ep)
{ {
pr_debug("%s ep %p\n", __func__, ep); pr_debug("ep %p\n", ep);
if (timer_pending(&ep->timer)) { if (timer_pending(&ep->timer)) {
pr_err("%s timer already started! ep %p\n", pr_err("%s timer already started! ep %p\n",
__func__, ep); __func__, ep);
...@@ -196,7 +196,7 @@ static void start_ep_timer(struct c4iw_ep *ep) ...@@ -196,7 +196,7 @@ static void start_ep_timer(struct c4iw_ep *ep)
static int stop_ep_timer(struct c4iw_ep *ep) static int stop_ep_timer(struct c4iw_ep *ep)
{ {
pr_debug("%s ep %p stopping\n", __func__, ep); pr_debug("ep %p stopping\n", ep);
del_timer_sync(&ep->timer); del_timer_sync(&ep->timer);
if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
c4iw_put_ep(&ep->com); c4iw_put_ep(&ep->com);
...@@ -212,7 +212,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, ...@@ -212,7 +212,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
if (c4iw_fatal_error(rdev)) { if (c4iw_fatal_error(rdev)) {
kfree_skb(skb); kfree_skb(skb);
pr_debug("%s - device in error state - dropping\n", __func__); pr_err("%s - device in error state - dropping\n", __func__);
return -EIO; return -EIO;
} }
error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
...@@ -229,7 +229,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) ...@@ -229,7 +229,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
if (c4iw_fatal_error(rdev)) { if (c4iw_fatal_error(rdev)) {
kfree_skb(skb); kfree_skb(skb);
pr_debug("%s - device in error state - dropping\n", __func__); pr_err("%s - device in error state - dropping\n", __func__);
return -EIO; return -EIO;
} }
error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
...@@ -263,10 +263,10 @@ static void set_emss(struct c4iw_ep *ep, u16 opt) ...@@ -263,10 +263,10 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
if (ep->emss < 128) if (ep->emss < 128)
ep->emss = 128; ep->emss = 128;
if (ep->emss & 7) if (ep->emss & 7)
pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n", pr_warn("Warning: misaligned mtu idx %u mss %u emss=%u\n",
TCPOPT_MSS_G(opt), ep->mss, ep->emss); TCPOPT_MSS_G(opt), ep->mss, ep->emss);
pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt), pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
ep->mss, ep->emss); ep->emss);
} }
static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
...@@ -287,7 +287,7 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) ...@@ -287,7 +287,7 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{ {
mutex_lock(&epc->mutex); mutex_lock(&epc->mutex);
pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]); pr_debug("%s -> %s\n", states[epc->state], states[new]);
__state_set(epc, new); __state_set(epc, new);
mutex_unlock(&epc->mutex); mutex_unlock(&epc->mutex);
return; return;
...@@ -322,7 +322,7 @@ static void *alloc_ep(int size, gfp_t gfp) ...@@ -322,7 +322,7 @@ static void *alloc_ep(int size, gfp_t gfp)
mutex_init(&epc->mutex); mutex_init(&epc->mutex);
c4iw_init_wr_wait(&epc->wr_wait); c4iw_init_wr_wait(&epc->wr_wait);
} }
pr_debug("%s alloc ep %p\n", __func__, epc); pr_debug("alloc ep %p\n", epc);
return epc; return epc;
} }
...@@ -384,7 +384,7 @@ void _c4iw_free_ep(struct kref *kref) ...@@ -384,7 +384,7 @@ void _c4iw_free_ep(struct kref *kref)
struct c4iw_ep *ep; struct c4iw_ep *ep;
ep = container_of(kref, struct c4iw_ep, com.kref); ep = container_of(kref, struct c4iw_ep, com.kref);
pr_debug("%s ep %p state %s\n", __func__, ep, states[ep->com.state]); pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
if (test_bit(QP_REFERENCED, &ep->com.flags)) if (test_bit(QP_REFERENCED, &ep->com.flags))
deref_qp(ep); deref_qp(ep);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
...@@ -570,7 +570,7 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb) ...@@ -570,7 +570,7 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
struct c4iw_rdev *rdev = &ep->com.dev->rdev; struct c4iw_rdev *rdev = &ep->com.dev->rdev;
struct cpl_abort_req *req = cplhdr(skb); struct cpl_abort_req *req = cplhdr(skb);
pr_debug("%s rdev %p\n", __func__, rdev); pr_debug("rdev %p\n", rdev);
req->cmd = CPL_ABORT_NO_RST; req->cmd = CPL_ABORT_NO_RST;
skb_get(skb); skb_get(skb);
ret = c4iw_ofld_send(rdev, skb); ret = c4iw_ofld_send(rdev, skb);
...@@ -647,7 +647,7 @@ static int send_halfclose(struct c4iw_ep *ep) ...@@ -647,7 +647,7 @@ static int send_halfclose(struct c4iw_ep *ep)
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16); u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
if (WARN_ON(!skb)) if (WARN_ON(!skb))
return -ENOMEM; return -ENOMEM;
...@@ -662,7 +662,7 @@ static int send_abort(struct c4iw_ep *ep) ...@@ -662,7 +662,7 @@ static int send_abort(struct c4iw_ep *ep)
u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16); u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list); struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
if (WARN_ON(!req_skb)) if (WARN_ON(!req_skb))
return -ENOMEM; return -ENOMEM;
...@@ -725,7 +725,7 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -725,7 +725,7 @@ static int send_connect(struct c4iw_ep *ep)
roundup(sizev4, 16) : roundup(sizev4, 16) :
roundup(sizev6, 16); roundup(sizev6, 16);
pr_debug("%s ep %p atid %u\n", __func__, ep, ep->atid); pr_debug("ep %p atid %u\n", ep, ep->atid);
skb = get_skb(NULL, wrlen, GFP_KERNEL); skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) { if (!skb) {
...@@ -824,13 +824,13 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -824,13 +824,13 @@ static int send_connect(struct c4iw_ep *ep)
t5req->params = t5req->params =
cpu_to_be64(FILTER_TUPLE_V(params)); cpu_to_be64(FILTER_TUPLE_V(params));
t5req->rsvd = cpu_to_be32(isn); t5req->rsvd = cpu_to_be32(isn);
pr_debug("%s snd_isn %u\n", __func__, t5req->rsvd); pr_debug("snd_isn %u\n", t5req->rsvd);
t5req->opt2 = cpu_to_be32(opt2); t5req->opt2 = cpu_to_be32(opt2);
} else { } else {
t6req->params = t6req->params =
cpu_to_be64(FILTER_TUPLE_V(params)); cpu_to_be64(FILTER_TUPLE_V(params));
t6req->rsvd = cpu_to_be32(isn); t6req->rsvd = cpu_to_be32(isn);
pr_debug("%s snd_isn %u\n", __func__, t6req->rsvd); pr_debug("snd_isn %u\n", t6req->rsvd);
t6req->opt2 = cpu_to_be32(opt2); t6req->opt2 = cpu_to_be32(opt2);
} }
} }
...@@ -877,13 +877,13 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -877,13 +877,13 @@ static int send_connect(struct c4iw_ep *ep)
t5req6->params = t5req6->params =
cpu_to_be64(FILTER_TUPLE_V(params)); cpu_to_be64(FILTER_TUPLE_V(params));
t5req6->rsvd = cpu_to_be32(isn); t5req6->rsvd = cpu_to_be32(isn);
pr_debug("%s snd_isn %u\n", __func__, t5req6->rsvd); pr_debug("snd_isn %u\n", t5req6->rsvd);
t5req6->opt2 = cpu_to_be32(opt2); t5req6->opt2 = cpu_to_be32(opt2);
} else { } else {
t6req6->params = t6req6->params =
cpu_to_be64(FILTER_TUPLE_V(params)); cpu_to_be64(FILTER_TUPLE_V(params));
t6req6->rsvd = cpu_to_be32(isn); t6req6->rsvd = cpu_to_be32(isn);
pr_debug("%s snd_isn %u\n", __func__, t6req6->rsvd); pr_debug("snd_isn %u\n", t6req6->rsvd);
t6req6->opt2 = cpu_to_be32(opt2); t6req6->opt2 = cpu_to_be32(opt2);
} }
...@@ -907,8 +907,8 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -907,8 +907,8 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
struct mpa_message *mpa; struct mpa_message *mpa;
struct mpa_v2_conn_params mpa_v2_params; struct mpa_v2_conn_params mpa_v2_params;
pr_debug("%s ep %p tid %u pd_len %d\n", pr_debug("ep %p tid %u pd_len %d\n",
__func__, ep, ep->hwtid, ep->plen); ep, ep->hwtid, ep->plen);
BUG_ON(skb_cloned(skb)); BUG_ON(skb_cloned(skb));
...@@ -961,7 +961,7 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -961,7 +961,7 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
if (mpa_rev_to_use == 2) { if (mpa_rev_to_use == 2) {
mpa->private_data_size = htons(ntohs(mpa->private_data_size) + mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
sizeof (struct mpa_v2_conn_params)); sizeof (struct mpa_v2_conn_params));
pr_debug("%s initiator ird %u ord %u\n", __func__, ep->ird, pr_debug("initiator ird %u ord %u\n", ep->ird,
ep->ord); ep->ord);
mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord); mpa_v2_params.ord = htons((u16)ep->ord);
...@@ -1014,8 +1014,8 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) ...@@ -1014,8 +1014,8 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb; struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params; struct mpa_v2_conn_params mpa_v2_params;
pr_debug("%s ep %p tid %u pd_len %d\n", pr_debug("ep %p tid %u pd_len %d\n",
__func__, ep, ep->hwtid, ep->plen); ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen; mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
...@@ -1094,8 +1094,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) ...@@ -1094,8 +1094,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb; struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params; struct mpa_v2_conn_params mpa_v2_params;
pr_debug("%s ep %p tid %u pd_len %d\n", pr_debug("ep %p tid %u pd_len %d\n",
__func__, ep, ep->hwtid, ep->plen); ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen; mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
...@@ -1185,7 +1185,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1185,7 +1185,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_atid(t, atid); ep = lookup_atid(t, atid);
pr_debug("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
mutex_lock(&ep->com.mutex); mutex_lock(&ep->com.mutex);
...@@ -1229,7 +1229,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status) ...@@ -1229,7 +1229,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
{ {
struct iw_cm_event event; struct iw_cm_event event;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event)); memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE; event.event = IW_CM_EVENT_CLOSE;
event.status = status; event.status = status;
...@@ -1246,7 +1246,7 @@ static void peer_close_upcall(struct c4iw_ep *ep) ...@@ -1246,7 +1246,7 @@ static void peer_close_upcall(struct c4iw_ep *ep)
{ {
struct iw_cm_event event; struct iw_cm_event event;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event)); memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_DISCONNECT; event.event = IW_CM_EVENT_DISCONNECT;
if (ep->com.cm_id) { if (ep->com.cm_id) {
...@@ -1261,7 +1261,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep) ...@@ -1261,7 +1261,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
{ {
struct iw_cm_event event; struct iw_cm_event event;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event)); memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE; event.event = IW_CM_EVENT_CLOSE;
event.status = -ECONNRESET; event.status = -ECONNRESET;
...@@ -1278,8 +1278,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status) ...@@ -1278,8 +1278,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
{ {
struct iw_cm_event event; struct iw_cm_event event;
pr_debug("%s ep %p tid %u status %d\n", pr_debug("ep %p tid %u status %d\n",
__func__, ep, ep->hwtid, status); ep, ep->hwtid, status);
memset(&event, 0, sizeof(event)); memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REPLY; event.event = IW_CM_EVENT_CONNECT_REPLY;
event.status = status; event.status = status;
...@@ -1308,7 +1308,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status) ...@@ -1308,7 +1308,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
} }
} }
pr_debug("%s ep %p tid %u status %d\n", __func__, ep, pr_debug("ep %p tid %u status %d\n", ep,
ep->hwtid, status); ep->hwtid, status);
set_bit(CONN_RPL_UPCALL, &ep->com.history); set_bit(CONN_RPL_UPCALL, &ep->com.history);
ep->com.cm_id->event_handler(ep->com.cm_id, &event); ep->com.cm_id->event_handler(ep->com.cm_id, &event);
...@@ -1322,7 +1322,7 @@ static int connect_request_upcall(struct c4iw_ep *ep) ...@@ -1322,7 +1322,7 @@ static int connect_request_upcall(struct c4iw_ep *ep)
struct iw_cm_event event; struct iw_cm_event event;
int ret; int ret;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event)); memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST; event.event = IW_CM_EVENT_CONNECT_REQUEST;
memcpy(&event.local_addr, &ep->com.local_addr, memcpy(&event.local_addr, &ep->com.local_addr,
...@@ -1359,13 +1359,13 @@ static void established_upcall(struct c4iw_ep *ep) ...@@ -1359,13 +1359,13 @@ static void established_upcall(struct c4iw_ep *ep)
{ {
struct iw_cm_event event; struct iw_cm_event event;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event)); memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_ESTABLISHED; event.event = IW_CM_EVENT_ESTABLISHED;
event.ird = ep->ord; event.ird = ep->ord;
event.ord = ep->ird; event.ord = ep->ird;
if (ep->com.cm_id) { if (ep->com.cm_id) {
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event); ep->com.cm_id->event_handler(ep->com.cm_id, &event);
set_bit(ESTAB_UPCALL, &ep->com.history); set_bit(ESTAB_UPCALL, &ep->com.history);
} }
...@@ -1377,8 +1377,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) ...@@ -1377,8 +1377,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16); u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
u32 credit_dack; u32 credit_dack;
pr_debug("%s ep %p tid %u credits %u\n", pr_debug("ep %p tid %u credits %u\n",
__func__, ep, ep->hwtid, credits); ep, ep->hwtid, credits);
skb = get_skb(NULL, wrlen, GFP_KERNEL); skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) { if (!skb) {
pr_err("update_rx_credits - cannot alloc skb!\n"); pr_err("update_rx_credits - cannot alloc skb!\n");
...@@ -1429,7 +1429,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1429,7 +1429,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
int err; int err;
int disconnect = 0; int disconnect = 0;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/* /*
* If we get more than the supported amount of private data * If we get more than the supported amount of private data
...@@ -1527,8 +1527,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1527,8 +1527,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK; MPA_V2_IRD_ORD_MASK;
resp_ord = ntohs(mpa_v2_params->ord) & resp_ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK; MPA_V2_IRD_ORD_MASK;
pr_debug("%s responder ird %u ord %u ep ird %u ord %u\n", pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
__func__,
resp_ird, resp_ord, ep->ird, ep->ord); resp_ird, resp_ord, ep->ird, ep->ord);
/* /*
...@@ -1573,8 +1572,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1573,8 +1572,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer) if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type; ep->mpa_attr.p2p_type = p2p_type;
pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n", pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
__func__, ep->mpa_attr.crc_enabled, ep->mpa_attr.crc_enabled,
ep->mpa_attr.recv_marker_enabled, ep->mpa_attr.recv_marker_enabled,
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
ep->mpa_attr.p2p_type, p2p_type); ep->mpa_attr.p2p_type, p2p_type);
...@@ -1670,7 +1669,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1670,7 +1669,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
struct mpa_v2_conn_params *mpa_v2_params; struct mpa_v2_conn_params *mpa_v2_params;
u16 plen; u16 plen;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/* /*
* If we get more than the supported amount of private data * If we get more than the supported amount of private data
...@@ -1679,7 +1678,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1679,7 +1678,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
goto err_stop_timer; goto err_stop_timer;
pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
/* /*
* Copy the new data into our accumulation buffer. * Copy the new data into our accumulation buffer.
...@@ -1695,7 +1694,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1695,7 +1694,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (ep->mpa_pkt_len < sizeof(*mpa)) if (ep->mpa_pkt_len < sizeof(*mpa))
return 0; return 0;
pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
mpa = (struct mpa_message *) ep->mpa_pkt; mpa = (struct mpa_message *) ep->mpa_pkt;
/* /*
...@@ -1758,8 +1757,8 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1758,8 +1757,8 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK; MPA_V2_IRD_ORD_MASK;
ep->ord = min_t(u32, ep->ord, ep->ord = min_t(u32, ep->ord,
cur_max_read_depth(ep->com.dev)); cur_max_read_depth(ep->com.dev));
pr_debug("%s initiator ird %u ord %u\n", pr_debug("initiator ird %u ord %u\n",
__func__, ep->ird, ep->ord); ep->ird, ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
if (peer2peer) { if (peer2peer) {
if (ntohs(mpa_v2_params->ord) & if (ntohs(mpa_v2_params->ord) &
...@@ -1776,8 +1775,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1776,8 +1775,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer) if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type; ep->mpa_attr.p2p_type = p2p_type;
pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n", pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
__func__,
ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
ep->mpa_attr.p2p_type); ep->mpa_attr.p2p_type);
...@@ -1816,7 +1814,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1816,7 +1814,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid); ep = get_ep_from_tid(dev, tid);
if (!ep) if (!ep)
return 0; return 0;
pr_debug("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
skb_pull(skb, sizeof(*hdr)); skb_pull(skb, sizeof(*hdr));
skb_trim(skb, dlen); skb_trim(skb, dlen);
mutex_lock(&ep->com.mutex); mutex_lock(&ep->com.mutex);
...@@ -1870,7 +1868,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1870,7 +1868,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
pr_warn("Abort rpl to freed endpoint\n"); pr_warn("Abort rpl to freed endpoint\n");
return 0; return 0;
} }
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex); mutex_lock(&ep->com.mutex);
switch (ep->com.state) { switch (ep->com.state) {
case ABORTING: case ABORTING:
...@@ -1994,8 +1992,8 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) ...@@ -1994,8 +1992,8 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
{ {
ep->snd_win = snd_win; ep->snd_win = snd_win;
ep->rcv_win = rcv_win; ep->rcv_win = rcv_win;
pr_debug("%s snd_win %d rcv_win %d\n", pr_debug("snd_win %d rcv_win %d\n",
__func__, ep->snd_win, ep->rcv_win); ep->snd_win, ep->rcv_win);
} }
#define ACT_OPEN_RETRY_COUNT 2 #define ACT_OPEN_RETRY_COUNT 2
...@@ -2100,7 +2098,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) ...@@ -2100,7 +2098,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
int iptype; int iptype;
__u8 *ra; __u8 *ra;
pr_debug("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
init_timer(&ep->timer); init_timer(&ep->timer);
c4iw_init_wr_wait(&ep->com.wr_wait); c4iw_init_wr_wait(&ep->com.wr_wait);
...@@ -2163,8 +2161,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep) ...@@ -2163,8 +2161,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
goto fail4; goto fail4;
} }
pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
__func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
ep->l2t->idx); ep->l2t->idx);
state_set(&ep->com, CONNECTING); state_set(&ep->com, CONNECTING);
...@@ -2215,12 +2213,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2215,12 +2213,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
la6 = (struct sockaddr_in6 *)&ep->com.local_addr; la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
pr_debug("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
status, status2errno(status)); status, status2errno(status));
if (cxgb_is_neg_adv(status)) { if (cxgb_is_neg_adv(status)) {
pr_debug("%s Connection problems for atid %u status %u (%s)\n", pr_debug("Connection problems for atid %u status %u (%s)\n",
__func__, atid, status, neg_adv_str(status)); atid, status, neg_adv_str(status));
ep->stats.connect_neg_adv++; ep->stats.connect_neg_adv++;
mutex_lock(&dev->rdev.stats.lock); mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++; dev->rdev.stats.neg_adv++;
...@@ -2316,10 +2314,10 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2316,10 +2314,10 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
if (!ep) { if (!ep) {
pr_debug("%s stid %d lookup failure!\n", __func__, stid); pr_warn("%s stid %d lookup failure!\n", __func__, stid);
goto out; goto out;
} }
pr_debug("%s ep %p status %d error %d\n", __func__, ep, pr_debug("ep %p status %d error %d\n", ep,
rpl->status, status2errno(rpl->status)); rpl->status, status2errno(rpl->status));
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
c4iw_put_ep(&ep->com); c4iw_put_ep(&ep->com);
...@@ -2334,10 +2332,10 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2334,10 +2332,10 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
if (!ep) { if (!ep) {
pr_debug("%s stid %d lookup failure!\n", __func__, stid); pr_warn("%s stid %d lookup failure!\n", __func__, stid);
goto out; goto out;
} }
pr_debug("%s ep %p\n", __func__, ep); pr_debug("ep %p\n", ep);
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
c4iw_put_ep(&ep->com); c4iw_put_ep(&ep->com);
out: out:
...@@ -2356,7 +2354,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -2356,7 +2354,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
int win; int win;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
BUG_ON(skb_cloned(skb)); BUG_ON(skb_cloned(skb));
skb_get(skb); skb_get(skb);
...@@ -2427,7 +2425,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -2427,7 +2425,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
if (peer2peer) if (peer2peer)
isn += 4; isn += 4;
rpl5->iss = cpu_to_be32(isn); rpl5->iss = cpu_to_be32(isn);
pr_debug("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss)); pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
} }
rpl->opt0 = cpu_to_be64(opt0); rpl->opt0 = cpu_to_be64(opt0);
...@@ -2440,7 +2438,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -2440,7 +2438,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
{ {
pr_debug("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); pr_debug("c4iw_dev %p tid %u\n", dev, hwtid);
BUG_ON(skb_cloned(skb)); BUG_ON(skb_cloned(skb));
skb_trim(skb, sizeof(struct cpl_tid_release)); skb_trim(skb, sizeof(struct cpl_tid_release));
release_tid(&dev->rdev, hwtid, skb); release_tid(&dev->rdev, hwtid, skb);
...@@ -2466,13 +2464,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2466,13 +2464,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!parent_ep) { if (!parent_ep) {
pr_debug("%s connect request on invalid stid %d\n", pr_err("%s connect request on invalid stid %d\n",
__func__, stid); __func__, stid);
goto reject; goto reject;
} }
if (state_read(&parent_ep->com) != LISTEN) { if (state_read(&parent_ep->com) != LISTEN) {
pr_debug("%s - listening ep not in LISTEN\n", __func__); pr_err("%s - listening ep not in LISTEN\n", __func__);
goto reject; goto reject;
} }
...@@ -2481,16 +2479,16 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2481,16 +2479,16 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
/* Find output route */ /* Find output route */
if (iptype == 4) { if (iptype == 4) {
pr_debug("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n" pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
, __func__, parent_ep, hwtid, , parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port), local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss); ntohs(peer_port), peer_mss);
dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
*(__be32 *)local_ip, *(__be32 *)peer_ip, *(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port, tos); local_port, peer_port, tos);
} else { } else {
pr_debug("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
, __func__, parent_ep, hwtid, , parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port), local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss); ntohs(peer_port), peer_mss);
dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
...@@ -2576,7 +2574,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2576,7 +2574,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
child_ep->dst = dst; child_ep->dst = dst;
child_ep->hwtid = hwtid; child_ep->hwtid = hwtid;
pr_debug("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, pr_debug("tx_chan %u smac_idx %u rss_qid %u\n",
child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
init_timer(&child_ep->timer); init_timer(&child_ep->timer);
...@@ -2613,11 +2611,11 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2613,11 +2611,11 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
int ret; int ret;
ep = get_ep_from_tid(dev, tid); ep = get_ep_from_tid(dev, tid);
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn); ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn); ep->rcv_seq = be32_to_cpu(req->rcv_isn);
pr_debug("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid,
ntohs(req->tcp_opt)); ntohs(req->tcp_opt));
set_emss(ep, ntohs(req->tcp_opt)); set_emss(ep, ntohs(req->tcp_opt));
...@@ -2650,7 +2648,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2650,7 +2648,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
if (!ep) if (!ep)
return 0; return 0;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
dst_confirm(ep->dst); dst_confirm(ep->dst);
set_bit(PEER_CLOSE, &ep->com.history); set_bit(PEER_CLOSE, &ep->com.history);
...@@ -2741,16 +2739,16 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2741,16 +2739,16 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
return 0; return 0;
if (cxgb_is_neg_adv(req->status)) { if (cxgb_is_neg_adv(req->status)) {
pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n", pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
__func__, ep->hwtid, req->status, __func__, ep->hwtid, req->status,
neg_adv_str(req->status)); neg_adv_str(req->status));
ep->stats.abort_neg_adv++; ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock); mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++; dev->rdev.stats.neg_adv++;
mutex_unlock(&dev->rdev.stats.lock); mutex_unlock(&dev->rdev.stats.lock);
goto deref_ep; goto deref_ep;
} }
pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
ep->com.state); ep->com.state);
set_bit(PEER_ABORT, &ep->com.history); set_bit(PEER_ABORT, &ep->com.history);
...@@ -2783,8 +2781,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2783,8 +2781,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
* do some housekeeping so as to re-initiate the * do some housekeeping so as to re-initiate the
* connection * connection
*/ */
pr_debug("%s: mpa_rev=%d. Retrying with mpav1\n", pr_info("%s: mpa_rev=%d. Retrying with mpav1\n",
__func__, mpa_rev); __func__, mpa_rev);
ep->retry_with_mpa_v1 = 1; ep->retry_with_mpa_v1 = 1;
} }
break; break;
...@@ -2810,7 +2808,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2810,7 +2808,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
case ABORTING: case ABORTING:
break; break;
case DEAD: case DEAD:
pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
mutex_unlock(&ep->com.mutex); mutex_unlock(&ep->com.mutex);
goto deref_ep; goto deref_ep;
default: default:
...@@ -2875,7 +2873,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2875,7 +2873,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (!ep) if (!ep)
return 0; return 0;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/* The cm_id may be null if we failed to connect */ /* The cm_id may be null if we failed to connect */
mutex_lock(&ep->com.mutex); mutex_lock(&ep->com.mutex);
...@@ -2950,19 +2948,19 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2950,19 +2948,19 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid); ep = get_ep_from_tid(dev, tid);
if (!ep) if (!ep)
return 0; return 0;
pr_debug("%s ep %p tid %u credits %u\n", pr_debug("ep %p tid %u credits %u\n",
__func__, ep, ep->hwtid, credits); ep, ep->hwtid, credits);
if (credits == 0) { if (credits == 0) {
pr_debug("%s 0 credit ack ep %p tid %u state %u\n", pr_debug("0 credit ack ep %p tid %u state %u\n",
__func__, ep, ep->hwtid, state_read(&ep->com)); ep, ep->hwtid, state_read(&ep->com));
goto out; goto out;
} }
dst_confirm(ep->dst); dst_confirm(ep->dst);
if (ep->mpa_skb) { if (ep->mpa_skb) {
pr_debug("%s last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n", pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
__func__, ep, ep->hwtid, ep, ep->hwtid, state_read(&ep->com),
state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); ep->mpa_attr.initiator ? 1 : 0);
mutex_lock(&ep->com.mutex); mutex_lock(&ep->com.mutex);
kfree_skb(ep->mpa_skb); kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL; ep->mpa_skb = NULL;
...@@ -2980,7 +2978,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) ...@@ -2980,7 +2978,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
int abort; int abort;
struct c4iw_ep *ep = to_ep(cm_id); struct c4iw_ep *ep = to_ep(cm_id);
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex); mutex_lock(&ep->com.mutex);
if (ep->com.state != MPA_REQ_RCVD) { if (ep->com.state != MPA_REQ_RCVD) {
...@@ -3011,7 +3009,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3011,7 +3009,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
int abort = 0; int abort = 0;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex); mutex_lock(&ep->com.mutex);
if (ep->com.state != MPA_REQ_RCVD) { if (ep->com.state != MPA_REQ_RCVD) {
...@@ -3064,7 +3062,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3064,7 +3062,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->ird = 1; ep->ird = 1;
} }
pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
ep->com.cm_id = cm_id; ep->com.cm_id = cm_id;
ref_cm_id(&ep->com); ref_cm_id(&ep->com);
...@@ -3220,12 +3218,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3220,12 +3218,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->com.dev = dev; ep->com.dev = dev;
ep->com.qp = get_qhp(dev, conn_param->qpn); ep->com.qp = get_qhp(dev, conn_param->qpn);
if (!ep->com.qp) { if (!ep->com.qp) {
pr_debug("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
err = -EINVAL; err = -EINVAL;
goto fail2; goto fail2;
} }
ref_qp(ep); ref_qp(ep);
pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn,
ep->com.qp, cm_id); ep->com.qp, cm_id);
/* /*
...@@ -3263,8 +3261,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3263,8 +3261,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
} }
/* find a route */ /* find a route */
pr_debug("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n", pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
__func__, &laddr->sin_addr, ntohs(laddr->sin_port), &laddr->sin_addr, ntohs(laddr->sin_port),
ra, ntohs(raddr->sin_port)); ra, ntohs(raddr->sin_port));
ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
laddr->sin_addr.s_addr, laddr->sin_addr.s_addr,
...@@ -3285,8 +3283,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3285,8 +3283,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
} }
/* find a route */ /* find a route */
pr_debug("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n", pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
__func__, laddr6->sin6_addr.s6_addr, laddr6->sin6_addr.s6_addr,
ntohs(laddr6->sin6_port), ntohs(laddr6->sin6_port),
raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port)); raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
...@@ -3309,8 +3307,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3309,8 +3307,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail4; goto fail4;
} }
pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
__func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
ep->l2t->idx); ep->l2t->idx);
state_set(&ep->com, CONNECTING); state_set(&ep->com, CONNECTING);
...@@ -3424,7 +3422,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) ...@@ -3424,7 +3422,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail1; goto fail1;
} }
skb_queue_head_init(&ep->com.ep_skb_list); skb_queue_head_init(&ep->com.ep_skb_list);
pr_debug("%s ep %p\n", __func__, ep); pr_debug("ep %p\n", ep);
ep->com.cm_id = cm_id; ep->com.cm_id = cm_id;
ref_cm_id(&ep->com); ref_cm_id(&ep->com);
ep->com.dev = dev; ep->com.dev = dev;
...@@ -3478,7 +3476,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) ...@@ -3478,7 +3476,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
int err; int err;
struct c4iw_listen_ep *ep = to_listen_ep(cm_id); struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
pr_debug("%s ep %p\n", __func__, ep); pr_debug("ep %p\n", ep);
might_sleep(); might_sleep();
state_set(&ep->com, DEAD); state_set(&ep->com, DEAD);
...@@ -3519,7 +3517,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) ...@@ -3519,7 +3517,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
mutex_lock(&ep->com.mutex); mutex_lock(&ep->com.mutex);
pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep, pr_debug("ep %p state %s, abrupt %d\n", ep,
states[ep->com.state], abrupt); states[ep->com.state], abrupt);
/* /*
...@@ -3573,8 +3571,8 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) ...@@ -3573,8 +3571,8 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MORIBUND: case MORIBUND:
case ABORTING: case ABORTING:
case DEAD: case DEAD:
pr_debug("%s ignoring disconnect ep %p state %u\n", pr_info("%s ignoring disconnect ep %p state %u\n",
__func__, ep, ep->com.state); __func__, ep, ep->com.state);
break; break;
default: default:
BUG(); BUG();
...@@ -3678,7 +3676,7 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, ...@@ -3678,7 +3676,7 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
BUG_ON(!rpl_skb); BUG_ON(!rpl_skb);
if (req->retval) { if (req->retval) {
pr_debug("%s passive open failure %d\n", __func__, req->retval); pr_err("%s passive open failure %d\n", __func__, req->retval);
mutex_lock(&dev->rdev.stats.lock); mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.pas_ofld_conn_fails++; dev->rdev.stats.pas_ofld_conn_fails++;
mutex_unlock(&dev->rdev.stats.lock); mutex_unlock(&dev->rdev.stats.lock);
...@@ -3895,8 +3893,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -3895,8 +3893,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!lep) { if (!lep) {
pr_debug("%s connect request on invalid stid %d\n", pr_warn("%s connect request on invalid stid %d\n",
__func__, stid); __func__, stid);
goto reject; goto reject;
} }
...@@ -3933,7 +3931,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -3933,7 +3931,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
skb_set_transport_header(skb, (void *)tcph - (void *)rss); skb_set_transport_header(skb, (void *)tcph - (void *)rss);
skb_get(skb); skb_get(skb);
pr_debug("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__, pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n",
ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
ntohs(tcph->source), iph->tos); ntohs(tcph->source), iph->tos);
...@@ -3941,15 +3939,13 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -3941,15 +3939,13 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
iph->daddr, iph->saddr, tcph->dest, iph->daddr, iph->saddr, tcph->dest,
tcph->source, iph->tos); tcph->source, iph->tos);
if (!dst) { if (!dst) {
pr_err("%s - failed to find dst entry!\n", pr_err("%s - failed to find dst entry!\n", __func__);
__func__);
goto reject; goto reject;
} }
neigh = dst_neigh_lookup_skb(dst, skb); neigh = dst_neigh_lookup_skb(dst, skb);
if (!neigh) { if (!neigh) {
pr_err("%s - failed to allocate neigh!\n", pr_err("%s - failed to allocate neigh!\n", __func__);
__func__);
goto free_dst; goto free_dst;
} }
...@@ -4032,8 +4028,7 @@ static void process_timeout(struct c4iw_ep *ep) ...@@ -4032,8 +4028,7 @@ static void process_timeout(struct c4iw_ep *ep)
int abort = 1; int abort = 1;
mutex_lock(&ep->com.mutex); mutex_lock(&ep->com.mutex);
pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
ep->com.state);
set_bit(TIMEDOUT, &ep->com.history); set_bit(TIMEDOUT, &ep->com.history);
switch (ep->com.state) { switch (ep->com.state) {
case MPA_REQ_SENT: case MPA_REQ_SENT:
...@@ -4176,13 +4171,13 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -4176,13 +4171,13 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_wr_wait *wr_waitp; struct c4iw_wr_wait *wr_waitp;
int ret; int ret;
pr_debug("%s type %u\n", __func__, rpl->type); pr_debug("type %u\n", rpl->type);
switch (rpl->type) { switch (rpl->type) {
case FW6_TYPE_WR_RPL: case FW6_TYPE_WR_RPL:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
pr_debug("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret);
if (wr_waitp) if (wr_waitp)
c4iw_wake_up(wr_waitp, ret ? -ret : 0); c4iw_wake_up(wr_waitp, ret ? -ret : 0);
kfree_skb(skb); kfree_skb(skb);
...@@ -4214,13 +4209,12 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -4214,13 +4209,12 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
return 0; return 0;
} }
if (cxgb_is_neg_adv(req->status)) { if (cxgb_is_neg_adv(req->status)) {
pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n", pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
__func__, ep->hwtid, req->status, __func__, ep->hwtid, req->status,
neg_adv_str(req->status)); neg_adv_str(req->status));
goto out; goto out;
} }
pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
ep->com.state);
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
out: out:
......
...@@ -144,7 +144,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -144,7 +144,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
ret = c4iw_ofld_send(rdev, skb); ret = c4iw_ofld_send(rdev, skb);
if (ret) if (ret)
goto err4; goto err4;
pr_debug("%s wait_event wr_wait %p\n", __func__, &wr_wait); pr_debug("wait_event wr_wait %p\n", &wr_wait);
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
if (ret) if (ret)
goto err4; goto err4;
...@@ -178,7 +178,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) ...@@ -178,7 +178,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
{ {
struct t4_cqe cqe; struct t4_cqe cqe;
pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
wq, cq, cq->sw_cidx, cq->sw_pidx); wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe)); memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
...@@ -197,7 +197,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) ...@@ -197,7 +197,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
int in_use = wq->rq.in_use - count; int in_use = wq->rq.in_use - count;
BUG_ON(in_use < 0); BUG_ON(in_use < 0);
pr_debug("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__, pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
wq, cq, wq->rq.in_use, count); wq, cq, wq->rq.in_use, count);
while (in_use--) { while (in_use--) {
insert_recv_cqe(wq, cq); insert_recv_cqe(wq, cq);
...@@ -211,7 +211,7 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, ...@@ -211,7 +211,7 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
{ {
struct t4_cqe cqe; struct t4_cqe cqe;
pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
wq, cq, cq->sw_cidx, cq->sw_pidx); wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe)); memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
...@@ -281,8 +281,8 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) ...@@ -281,8 +281,8 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
/* /*
* Insert this completed cqe into the swcq. * Insert this completed cqe into the swcq.
*/ */
pr_debug("%s moving cqe into swcq sq idx %u cq idx %u\n", pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
__func__, cidx, cq->sw_pidx); cidx, cq->sw_pidx);
swsqe->cqe.header |= htonl(CQE_SWCQE_V(1)); swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
cq->sw_queue[cq->sw_pidx] = swsqe->cqe; cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
t4_swcq_produce(cq); t4_swcq_produce(cq);
...@@ -337,7 +337,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) ...@@ -337,7 +337,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
struct t4_swsqe *swsqe; struct t4_swsqe *swsqe;
int ret; int ret;
pr_debug("%s cqid 0x%x\n", __func__, chp->cq.cqid); pr_debug("cqid 0x%x\n", chp->cq.cqid);
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
/* /*
...@@ -430,7 +430,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) ...@@ -430,7 +430,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
u32 ptr; u32 ptr;
*count = 0; *count = 0;
pr_debug("%s count zero %d\n", __func__, *count); pr_debug("count zero %d\n", *count);
ptr = cq->sw_cidx; ptr = cq->sw_cidx;
while (ptr != cq->sw_pidx) { while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr]; cqe = &cq->sw_queue[ptr];
...@@ -440,7 +440,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) ...@@ -440,7 +440,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
if (++ptr == cq->size) if (++ptr == cq->size)
ptr = 0; ptr = 0;
} }
pr_debug("%s cq %p count %d\n", __func__, cq, *count); pr_debug("cq %p count %d\n", cq, *count);
} }
/* /*
...@@ -471,8 +471,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ...@@ -471,8 +471,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (ret) if (ret)
return ret; return ret;
pr_debug("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
__func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe), CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe), CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
CQE_WRID_LOW(hw_cqe)); CQE_WRID_LOW(hw_cqe));
...@@ -603,8 +603,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ...@@ -603,8 +603,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
struct t4_swsqe *swsqe; struct t4_swsqe *swsqe;
pr_debug("%s out of order completion going in sw_sq at idx %u\n", pr_debug("out of order completion going in sw_sq at idx %u\n",
__func__, CQE_WRID_SQ_IDX(hw_cqe)); CQE_WRID_SQ_IDX(hw_cqe));
swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
swsqe->cqe = *hw_cqe; swsqe->cqe = *hw_cqe;
swsqe->complete = 1; swsqe->complete = 1;
...@@ -638,13 +638,13 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ...@@ -638,13 +638,13 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size); BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
wq->sq.cidx = (uint16_t)idx; wq->sq.cidx = (uint16_t)idx;
pr_debug("%s completing sq idx %u\n", __func__, wq->sq.cidx); pr_debug("completing sq idx %u\n", wq->sq.cidx);
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
if (c4iw_wr_log) if (c4iw_wr_log)
c4iw_log_wr_stats(wq, hw_cqe); c4iw_log_wr_stats(wq, hw_cqe);
t4_sq_consume(wq); t4_sq_consume(wq);
} else { } else {
pr_debug("%s completing rq idx %u\n", __func__, wq->rq.cidx); pr_debug("completing rq idx %u\n", wq->rq.cidx);
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
BUG_ON(t4_rq_empty(wq)); BUG_ON(t4_rq_empty(wq));
if (c4iw_wr_log) if (c4iw_wr_log)
...@@ -661,12 +661,12 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ...@@ -661,12 +661,12 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
skip_cqe: skip_cqe:
if (SW_CQE(hw_cqe)) { if (SW_CQE(hw_cqe)) {
pr_debug("%s cq %p cqid 0x%x skip sw cqe cidx %u\n", pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
__func__, cq, cq->cqid, cq->sw_cidx); cq, cq->cqid, cq->sw_cidx);
t4_swcq_consume(cq); t4_swcq_consume(cq);
} else { } else {
pr_debug("%s cq %p cqid 0x%x skip hw cqe cidx %u\n", pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
__func__, cq, cq->cqid, cq->cidx); cq, cq->cqid, cq->cidx);
t4_hwcq_consume(cq); t4_hwcq_consume(cq);
} }
return ret; return ret;
...@@ -712,8 +712,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) ...@@ -712,8 +712,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->vendor_err = CQE_STATUS(&cqe); wc->vendor_err = CQE_STATUS(&cqe);
wc->wc_flags = 0; wc->wc_flags = 0;
pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n", pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
__func__, CQE_QPID(&cqe), CQE_QPID(&cqe),
CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
CQE_STATUS(&cqe), CQE_LEN(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
...@@ -857,7 +857,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq) ...@@ -857,7 +857,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
struct c4iw_cq *chp; struct c4iw_cq *chp;
struct c4iw_ucontext *ucontext; struct c4iw_ucontext *ucontext;
pr_debug("%s ib_cq %p\n", __func__, ib_cq); pr_debug("ib_cq %p\n", ib_cq);
chp = to_c4iw_cq(ib_cq); chp = to_c4iw_cq(ib_cq);
remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
...@@ -889,7 +889,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -889,7 +889,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
size_t memsize, hwentries; size_t memsize, hwentries;
struct c4iw_mm_entry *mm, *mm2; struct c4iw_mm_entry *mm, *mm2;
pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries); pr_debug("ib_dev %p entries %d\n", ibdev, entries);
if (attr->flags) if (attr->flags)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -996,8 +996,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -996,8 +996,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
mm2->len = PAGE_SIZE; mm2->len = PAGE_SIZE;
insert_mmap(ucontext, mm2); insert_mmap(ucontext, mm2);
} }
pr_debug("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
__func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.cqid, chp, chp->cq.size,
chp->cq.memsize, (unsigned long long)chp->cq.dma_addr); chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
return &chp->ibcq; return &chp->ibcq;
err6: err6:
......
...@@ -811,8 +811,8 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -811,8 +811,8 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->qpmask = rdev->lldi.udb_density - 1; rdev->qpmask = rdev->lldi.udb_density - 1;
rdev->cqmask = rdev->lldi.ucq_density - 1; rdev->cqmask = rdev->lldi.ucq_density - 1;
pr_debug("%s dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n", pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
__func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
rdev->lldi.vr->pbl.start, rdev->lldi.vr->pbl.start,
rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
...@@ -935,7 +935,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx) ...@@ -935,7 +935,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
static void c4iw_remove(struct uld_ctx *ctx) static void c4iw_remove(struct uld_ctx *ctx)
{ {
pr_debug("%s c4iw_dev %p\n", __func__, ctx->dev); pr_debug("c4iw_dev %p\n", ctx->dev);
c4iw_unregister_device(ctx->dev); c4iw_unregister_device(ctx->dev);
c4iw_dealloc(ctx); c4iw_dealloc(ctx);
} }
...@@ -969,8 +969,8 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -969,8 +969,8 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.lldi = *infop; devp->rdev.lldi = *infop;
/* init various hw-queue params based on lld info */ /* init various hw-queue params based on lld info */
pr_debug("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n", pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
__func__, devp->rdev.lldi.sge_ingpadboundary, devp->rdev.lldi.sge_ingpadboundary,
devp->rdev.lldi.sge_egrstatuspagesize); devp->rdev.lldi.sge_egrstatuspagesize);
devp->rdev.hw_queue.t4_eq_status_entries = devp->rdev.hw_queue.t4_eq_status_entries =
...@@ -1069,8 +1069,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) ...@@ -1069,8 +1069,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
} }
ctx->lldi = *infop; ctx->lldi = *infop;
pr_debug("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n",
__func__, pci_name(ctx->lldi.pdev), pci_name(ctx->lldi.pdev),
ctx->lldi.nchan, ctx->lldi.nrxq, ctx->lldi.nchan, ctx->lldi.nrxq,
ctx->lldi.ntxq, ctx->lldi.nports); ctx->lldi.ntxq, ctx->lldi.nports);
...@@ -1203,7 +1203,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) ...@@ -1203,7 +1203,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
{ {
struct uld_ctx *ctx = handle; struct uld_ctx *ctx = handle;
pr_debug("%s new_state %u\n", __func__, new_state); pr_debug("new_state %u\n", new_state);
switch (new_state) { switch (new_state) {
case CXGB4_STATE_UP: case CXGB4_STATE_UP:
pr_info("%s: Up\n", pci_name(ctx->lldi.pdev)); pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
......
...@@ -234,7 +234,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) ...@@ -234,7 +234,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
if (atomic_dec_and_test(&chp->refcnt)) if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait); wake_up(&chp->wait);
} else { } else {
pr_debug("%s unknown cqid 0x%x\n", __func__, qid); pr_warn("%s unknown cqid 0x%x\n", __func__, qid);
spin_unlock_irqrestore(&dev->lock, flag); spin_unlock_irqrestore(&dev->lock, flag);
} }
return 0; return 0;
......
...@@ -230,8 +230,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, ...@@ -230,8 +230,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO); ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
if (!ret) { if (!ret) {
pr_debug("%s - Device %s not responding (disabling device) - tid %u qpid %u\n", pr_err("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
func, pci_name(rdev->lldi.pdev), hwtid, qpid); func, pci_name(rdev->lldi.pdev), hwtid, qpid);
rdev->flags |= T4_FATAL_ERROR; rdev->flags |= T4_FATAL_ERROR;
wr_waitp->ret = -EIO; wr_waitp->ret = -EIO;
} }
...@@ -537,8 +537,7 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, ...@@ -537,8 +537,7 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
if (mm->key == key && mm->len == len) { if (mm->key == key && mm->len == len) {
list_del_init(&mm->entry); list_del_init(&mm->entry);
spin_unlock(&ucontext->mmap_lock); spin_unlock(&ucontext->mmap_lock);
pr_debug("%s key 0x%x addr 0x%llx len %d\n", pr_debug("key 0x%x addr 0x%llx len %d\n", key,
__func__, key,
(unsigned long long)mm->addr, mm->len); (unsigned long long)mm->addr, mm->len);
return mm; return mm;
} }
...@@ -551,8 +550,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext, ...@@ -551,8 +550,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext,
struct c4iw_mm_entry *mm) struct c4iw_mm_entry *mm)
{ {
spin_lock(&ucontext->mmap_lock); spin_lock(&ucontext->mmap_lock);
pr_debug("%s key 0x%x addr 0x%llx len %d\n", pr_debug("key 0x%x addr 0x%llx len %d\n",
__func__, mm->key, (unsigned long long)mm->addr, mm->len); mm->key, (unsigned long long)mm->addr, mm->len);
list_add_tail(&mm->entry, &ucontext->mmaps); list_add_tail(&mm->entry, &ucontext->mmaps);
spin_unlock(&ucontext->mmap_lock); spin_unlock(&ucontext->mmap_lock);
} }
...@@ -671,16 +670,14 @@ enum c4iw_mmid_state { ...@@ -671,16 +670,14 @@ enum c4iw_mmid_state {
#define MPA_V2_IRD_ORD_MASK 0x3FFF #define MPA_V2_IRD_ORD_MASK 0x3FFF
#define c4iw_put_ep(ep) { \ #define c4iw_put_ep(ep) { \
pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \ pr_debug("put_ep ep %p refcnt %d\n", \
__func__, __LINE__, \
ep, kref_read(&((ep)->kref))); \ ep, kref_read(&((ep)->kref))); \
WARN_ON(kref_read(&((ep)->kref)) < 1); \ WARN_ON(kref_read(&((ep)->kref)) < 1); \
kref_put(&((ep)->kref), _c4iw_free_ep); \ kref_put(&((ep)->kref), _c4iw_free_ep); \
} }
#define c4iw_get_ep(ep) { \ #define c4iw_get_ep(ep) { \
pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \ pr_debug("get_ep ep %p, refcnt %d\n", \
__func__, __LINE__, \
ep, kref_read(&((ep)->kref))); \ ep, kref_read(&((ep)->kref))); \
kref_get(&((ep)->kref)); \ kref_get(&((ep)->kref)); \
} }
......
...@@ -124,7 +124,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, ...@@ -124,7 +124,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F); cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
addr &= 0x7FFFFFF; addr &= 0x7FFFFFF;
pr_debug("%s addr 0x%x len %u\n", __func__, addr, len); pr_debug("addr 0x%x len %u\n", addr, len);
num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
c4iw_init_wr_wait(&wr_wait); c4iw_init_wr_wait(&wr_wait);
for (i = 0; i < num_wqe; i++) { for (i = 0; i < num_wqe; i++) {
...@@ -285,8 +285,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, ...@@ -285,8 +285,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
} }
pr_debug("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
__func__, stag_state, type, pdid, stag_idx); stag_state, type, pdid, stag_idx);
/* write TPT entry */ /* write TPT entry */
if (reset_tpt_entry) if (reset_tpt_entry)
...@@ -327,8 +327,8 @@ static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, ...@@ -327,8 +327,8 @@ static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
{ {
int err; int err;
pr_debug("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
__func__, pbl_addr, rdev->lldi.vr->pbl.start, pbl_addr, rdev->lldi.vr->pbl.start,
pbl_size); pbl_size);
err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL); err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL);
...@@ -372,7 +372,7 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) ...@@ -372,7 +372,7 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
mhp->attr.stag = stag; mhp->attr.stag = stag;
mmid = stag >> 8; mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag; mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
} }
...@@ -422,7 +422,7 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) ...@@ -422,7 +422,7 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
int ret; int ret;
u32 stag = T4_STAG_UNSET; u32 stag = T4_STAG_UNSET;
pr_debug("%s ib_pd %p\n", __func__, pd); pr_debug("ib_pd %p\n", pd);
php = to_c4iw_pd(pd); php = to_c4iw_pd(pd);
rhp = php->rhp; rhp = php->rhp;
...@@ -479,7 +479,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -479,7 +479,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct c4iw_pd *php; struct c4iw_pd *php;
struct c4iw_mr *mhp; struct c4iw_mr *mhp;
pr_debug("%s ib_pd %p\n", __func__, pd); pr_debug("ib_pd %p\n", pd);
if (length == ~0ULL) if (length == ~0ULL)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -616,7 +616,7 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, ...@@ -616,7 +616,7 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ret = -ENOMEM; ret = -ENOMEM;
goto dealloc_win; goto dealloc_win;
} }
pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
return &(mhp->ibmw); return &(mhp->ibmw);
dealloc_win: dealloc_win:
...@@ -641,7 +641,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw) ...@@ -641,7 +641,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb); deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
kfree_skb(mhp->dereg_skb); kfree_skb(mhp->dereg_skb);
kfree(mhp); kfree(mhp);
pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
return 0; return 0;
} }
...@@ -699,7 +699,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, ...@@ -699,7 +699,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
goto err3; goto err3;
} }
pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
return &(mhp->ibmr); return &(mhp->ibmr);
err3: err3:
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
...@@ -744,7 +744,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr) ...@@ -744,7 +744,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
struct c4iw_mr *mhp; struct c4iw_mr *mhp;
u32 mmid; u32 mmid;
pr_debug("%s ib_mr %p\n", __func__, ib_mr); pr_debug("ib_mr %p\n", ib_mr);
mhp = to_c4iw_mr(ib_mr); mhp = to_c4iw_mr(ib_mr);
rhp = mhp->rhp; rhp = mhp->rhp;
...@@ -762,7 +762,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr) ...@@ -762,7 +762,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
kfree((void *) (unsigned long) mhp->kva); kfree((void *) (unsigned long) mhp->kva);
if (mhp->umem) if (mhp->umem)
ib_umem_release(mhp->umem); ib_umem_release(mhp->umem);
pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp); pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
kfree(mhp); kfree(mhp);
return 0; return 0;
} }
......
...@@ -102,7 +102,7 @@ void _c4iw_free_ucontext(struct kref *kref) ...@@ -102,7 +102,7 @@ void _c4iw_free_ucontext(struct kref *kref)
ucontext = container_of(kref, struct c4iw_ucontext, kref); ucontext = container_of(kref, struct c4iw_ucontext, kref);
rhp = to_c4iw_dev(ucontext->ibucontext.device); rhp = to_c4iw_dev(ucontext->ibucontext.device);
pr_debug("%s ucontext %p\n", __func__, ucontext); pr_debug("ucontext %p\n", ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm); kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
...@@ -113,7 +113,7 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context) ...@@ -113,7 +113,7 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{ {
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
pr_debug("%s context %p\n", __func__, context); pr_debug("context %p\n", context);
c4iw_put_ucontext(ucontext); c4iw_put_ucontext(ucontext);
return 0; return 0;
} }
...@@ -127,7 +127,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, ...@@ -127,7 +127,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
int ret = 0; int ret = 0;
struct c4iw_mm_entry *mm = NULL; struct c4iw_mm_entry *mm = NULL;
pr_debug("%s ibdev %p\n", __func__, ibdev); pr_debug("ibdev %p\n", ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL); context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) { if (!context) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -185,7 +185,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ...@@ -185,7 +185,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
struct c4iw_ucontext *ucontext; struct c4iw_ucontext *ucontext;
u64 addr; u64 addr;
pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff, pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
key, len); key, len);
if (vma->vm_start & (PAGE_SIZE-1)) if (vma->vm_start & (PAGE_SIZE-1))
...@@ -251,7 +251,7 @@ static int c4iw_deallocate_pd(struct ib_pd *pd) ...@@ -251,7 +251,7 @@ static int c4iw_deallocate_pd(struct ib_pd *pd)
php = to_c4iw_pd(pd); php = to_c4iw_pd(pd);
rhp = php->rhp; rhp = php->rhp;
pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid); pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid);
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid); c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
mutex_lock(&rhp->rdev.stats.lock); mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur--; rhp->rdev.stats.pd.cur--;
...@@ -268,7 +268,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, ...@@ -268,7 +268,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
u32 pdid; u32 pdid;
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
pr_debug("%s ibdev %p\n", __func__, ibdev); pr_debug("ibdev %p\n", ibdev);
rhp = (struct c4iw_dev *) ibdev; rhp = (struct c4iw_dev *) ibdev;
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table); pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
if (!pdid) if (!pdid)
...@@ -291,14 +291,14 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, ...@@ -291,14 +291,14 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max) if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur; rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
mutex_unlock(&rhp->rdev.stats.lock); mutex_unlock(&rhp->rdev.stats.lock);
pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php); pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
return &php->ibpd; return &php->ibpd;
} }
static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey) u16 *pkey)
{ {
pr_debug("%s ibdev %p\n", __func__, ibdev); pr_debug("ibdev %p\n", ibdev);
*pkey = 0; *pkey = 0;
return 0; return 0;
} }
...@@ -308,8 +308,8 @@ static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, ...@@ -308,8 +308,8 @@ static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
{ {
struct c4iw_dev *dev; struct c4iw_dev *dev;
pr_debug("%s ibdev %p, port %d, index %d, gid %p\n", pr_debug("ibdev %p, port %d, index %d, gid %p\n",
__func__, ibdev, port, index, gid); ibdev, port, index, gid);
dev = to_c4iw_dev(ibdev); dev = to_c4iw_dev(ibdev);
BUG_ON(port == 0); BUG_ON(port == 0);
memset(&(gid->raw[0]), 0, sizeof(gid->raw)); memset(&(gid->raw[0]), 0, sizeof(gid->raw));
...@@ -323,7 +323,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro ...@@ -323,7 +323,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
struct c4iw_dev *dev; struct c4iw_dev *dev;
pr_debug("%s ibdev %p\n", __func__, ibdev); pr_debug("ibdev %p\n", ibdev);
if (uhw->inlen || uhw->outlen) if (uhw->inlen || uhw->outlen)
return -EINVAL; return -EINVAL;
...@@ -364,7 +364,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, ...@@ -364,7 +364,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct net_device *netdev; struct net_device *netdev;
struct in_device *inetdev; struct in_device *inetdev;
pr_debug("%s ibdev %p\n", __func__, ibdev); pr_debug("ibdev %p\n", ibdev);
dev = to_c4iw_dev(ibdev); dev = to_c4iw_dev(ibdev);
netdev = dev->rdev.lldi.ports[port-1]; netdev = dev->rdev.lldi.ports[port-1];
...@@ -406,7 +406,7 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr, ...@@ -406,7 +406,7 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
{ {
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev); ibdev.dev);
pr_debug("%s dev 0x%p\n", __func__, dev); pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%d\n", return sprintf(buf, "%d\n",
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type)); CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
} }
...@@ -419,7 +419,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr, ...@@ -419,7 +419,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
struct ethtool_drvinfo info; struct ethtool_drvinfo info;
struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0]; struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
pr_debug("%s dev 0x%p\n", __func__, dev); pr_debug("dev 0x%p\n", dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info); lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver); return sprintf(buf, "%s\n", info.driver);
} }
...@@ -429,7 +429,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr, ...@@ -429,7 +429,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
{ {
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev); ibdev.dev);
pr_debug("%s dev 0x%p\n", __func__, dev); pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor, return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
c4iw_dev->rdev.lldi.pdev->device); c4iw_dev->rdev.lldi.pdev->device);
} }
...@@ -521,7 +521,7 @@ static void get_dev_fw_str(struct ib_device *dev, char *str) ...@@ -521,7 +521,7 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
{ {
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev); ibdev);
pr_debug("%s dev 0x%p\n", __func__, dev); pr_debug("dev 0x%p\n", dev);
snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u", snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers), FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
...@@ -535,7 +535,7 @@ int c4iw_register_device(struct c4iw_dev *dev) ...@@ -535,7 +535,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
int ret; int ret;
int i; int i;
pr_debug("%s c4iw_dev %p\n", __func__, dev); pr_debug("c4iw_dev %p\n", dev);
BUG_ON(!dev->rdev.lldi.ports[0]); BUG_ON(!dev->rdev.lldi.ports[0]);
strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX); strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
...@@ -645,7 +645,7 @@ void c4iw_unregister_device(struct c4iw_dev *dev) ...@@ -645,7 +645,7 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
{ {
int i; int i;
pr_debug("%s c4iw_dev %p\n", __func__, dev); pr_debug("c4iw_dev %p\n", dev);
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
device_remove_file(&dev->ibdev.dev, device_remove_file(&dev->ibdev.dev,
c4iw_class_attributes[i]); c4iw_class_attributes[i]);
......
...@@ -254,8 +254,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -254,8 +254,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
ret = -ENOMEM; ret = -ENOMEM;
goto free_sq; goto free_sq;
} }
pr_debug("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
__func__, wq->sq.queue, wq->sq.queue,
(unsigned long long)virt_to_phys(wq->sq.queue), (unsigned long long)virt_to_phys(wq->sq.queue),
wq->rq.queue, wq->rq.queue,
(unsigned long long)virt_to_phys(wq->rq.queue)); (unsigned long long)virt_to_phys(wq->rq.queue));
...@@ -361,8 +361,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -361,8 +361,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (ret) if (ret)
goto free_dma; goto free_dma;
pr_debug("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n", pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
__func__, wq->sq.qid, wq->rq.qid, wq->db, wq->sq.qid, wq->rq.qid, wq->db,
wq->sq.bar2_va, wq->rq.bar2_va); wq->sq.bar2_va, wq->rq.bar2_va);
return 0; return 0;
...@@ -724,7 +724,7 @@ static void free_qp_work(struct work_struct *work) ...@@ -724,7 +724,7 @@ static void free_qp_work(struct work_struct *work)
ucontext = qhp->ucontext; ucontext = qhp->ucontext;
rhp = qhp->rhp; rhp = qhp->rhp;
pr_debug("%s qhp %p ucontext %p\n", __func__, qhp, ucontext); pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
destroy_qp(&rhp->rdev, &qhp->wq, destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx); ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
...@@ -738,19 +738,19 @@ static void queue_qp_free(struct kref *kref) ...@@ -738,19 +738,19 @@ static void queue_qp_free(struct kref *kref)
struct c4iw_qp *qhp; struct c4iw_qp *qhp;
qhp = container_of(kref, struct c4iw_qp, kref); qhp = container_of(kref, struct c4iw_qp, kref);
pr_debug("%s qhp %p\n", __func__, qhp); pr_debug("qhp %p\n", qhp);
queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work); queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
} }
void c4iw_qp_add_ref(struct ib_qp *qp) void c4iw_qp_add_ref(struct ib_qp *qp)
{ {
pr_debug("%s ib_qp %p\n", __func__, qp); pr_debug("ib_qp %p\n", qp);
kref_get(&to_c4iw_qp(qp)->kref); kref_get(&to_c4iw_qp(qp)->kref);
} }
void c4iw_qp_rem_ref(struct ib_qp *qp) void c4iw_qp_rem_ref(struct ib_qp *qp)
{ {
pr_debug("%s ib_qp %p\n", __func__, qp); pr_debug("ib_qp %p\n", qp);
kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free); kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
} }
...@@ -958,8 +958,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -958,8 +958,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey); c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
break; break;
default: default:
pr_debug("%s post of type=%d TBD!\n", __func__, pr_warn("%s post of type=%d TBD!\n", __func__,
wr->opcode); wr->opcode);
err = -EINVAL; err = -EINVAL;
} }
if (err) { if (err) {
...@@ -980,8 +980,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -980,8 +980,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
pr_debug("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n", pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
__func__,
(unsigned long long)wr->wr_id, qhp->wq.sq.pidx, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
swsqe->opcode, swsqe->read_len); swsqe->opcode, swsqe->read_len);
wr = wr->next; wr = wr->next;
...@@ -1057,8 +1056,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -1057,8 +1056,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wqe->recv.r2[1] = 0; wqe->recv.r2[1] = 0;
wqe->recv.r2[2] = 0; wqe->recv.r2[2] = 0;
wqe->recv.len16 = len16; wqe->recv.len16 = len16;
pr_debug("%s cookie 0x%llx pidx %u\n", pr_debug("cookie 0x%llx pidx %u\n",
__func__,
(unsigned long long)wr->wr_id, qhp->wq.rq.pidx); (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
t4_rq_produce(&qhp->wq, len16); t4_rq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
...@@ -1218,7 +1216,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, ...@@ -1218,7 +1216,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
struct sk_buff *skb; struct sk_buff *skb;
struct terminate_message *term; struct terminate_message *term;
pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
qhp->ep->hwtid); qhp->ep->hwtid);
skb = skb_dequeue(&qhp->ep->com.ep_skb_list); skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
...@@ -1255,7 +1253,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1255,7 +1253,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
int rq_flushed, sq_flushed; int rq_flushed, sq_flushed;
unsigned long flag; unsigned long flag;
pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
/* locking hierarchy: cq lock first, then qp lock. */ /* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, flag); spin_lock_irqsave(&rchp->lock, flag);
...@@ -1340,8 +1338,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1340,8 +1338,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int ret; int ret;
struct sk_buff *skb; struct sk_buff *skb;
pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
ep->hwtid);
skb = skb_dequeue(&ep->com.ep_skb_list); skb = skb_dequeue(&ep->com.ep_skb_list);
if (WARN_ON(!skb)) if (WARN_ON(!skb))
...@@ -1367,13 +1364,13 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1367,13 +1364,13 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid, ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
qhp->wq.sq.qid, __func__); qhp->wq.sq.qid, __func__);
out: out:
pr_debug("%s ret %d\n", __func__, ret); pr_debug("ret %d\n", ret);
return ret; return ret;
} }
static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
{ {
pr_debug("%s p2p_type = %d\n", __func__, p2p_type); pr_debug("p2p_type = %d\n", p2p_type);
memset(&init->u, 0, sizeof init->u); memset(&init->u, 0, sizeof init->u);
switch (p2p_type) { switch (p2p_type) {
case FW_RI_INIT_P2PTYPE_RDMA_WRITE: case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
...@@ -1402,7 +1399,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) ...@@ -1402,7 +1399,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
int ret; int ret;
struct sk_buff *skb; struct sk_buff *skb;
pr_debug("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp, pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL); skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
...@@ -1475,7 +1472,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) ...@@ -1475,7 +1472,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
err1: err1:
free_ird(rhp, qhp->attr.max_ird); free_ird(rhp, qhp->attr.max_ird);
out: out:
pr_debug("%s ret %d\n", __func__, ret); pr_debug("ret %d\n", ret);
return ret; return ret;
} }
...@@ -1492,8 +1489,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1492,8 +1489,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int free = 0; int free = 0;
struct c4iw_ep *ep = NULL; struct c4iw_ep *ep = NULL;
pr_debug("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
__func__,
qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
(mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
...@@ -1680,7 +1676,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1680,7 +1676,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
} }
goto out; goto out;
err: err:
pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
qhp->wq.sq.qid); qhp->wq.sq.qid);
/* disassociate the LLP connection */ /* disassociate the LLP connection */
...@@ -1717,7 +1713,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1717,7 +1713,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
*/ */
if (free) if (free)
c4iw_put_ep(&ep->com); c4iw_put_ep(&ep->com);
pr_debug("%s exit state %d\n", __func__, qhp->attr.state); pr_debug("exit state %d\n", qhp->attr.state);
return ret; return ret;
} }
...@@ -1747,7 +1743,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) ...@@ -1747,7 +1743,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_qp_rem_ref(ib_qp); c4iw_qp_rem_ref(ib_qp);
pr_debug("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
return 0; return 0;
} }
...@@ -1766,7 +1762,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1766,7 +1762,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm; struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL; struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
pr_debug("%s ib_pd %p\n", __func__, pd); pr_debug("ib_pd %p\n", pd);
if (attrs->qp_type != IB_QPT_RC) if (attrs->qp_type != IB_QPT_RC)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -1937,8 +1933,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1937,8 +1933,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->ibqp.qp_num = qhp->wq.sq.qid; qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer)); init_timer(&(qhp->timer));
INIT_LIST_HEAD(&qhp->db_fc_entry); INIT_LIST_HEAD(&qhp->db_fc_entry);
pr_debug("%s sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n", pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
__func__,
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
qhp->wq.rq.memsize, attrs->cap.max_recv_wr); qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
...@@ -1971,7 +1966,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1971,7 +1966,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum c4iw_qp_attr_mask mask = 0; enum c4iw_qp_attr_mask mask = 0;
struct c4iw_qp_attributes attrs; struct c4iw_qp_attributes attrs;
pr_debug("%s ib_qp %p\n", __func__, ibqp); pr_debug("ib_qp %p\n", ibqp);
/* iwarp does not support the RTR state */ /* iwarp does not support the RTR state */
if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
...@@ -2017,7 +2012,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -2017,7 +2012,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
{ {
pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
} }
......
...@@ -90,7 +90,7 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table) ...@@ -90,7 +90,7 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table)
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
{ {
pr_debug("%s entry 0x%x\n", __func__, entry); pr_debug("entry 0x%x\n", entry);
c4iw_id_free(id_table, entry); c4iw_id_free(id_table, entry);
} }
...@@ -141,7 +141,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) ...@@ -141,7 +141,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
} }
out: out:
mutex_unlock(&uctx->lock); mutex_unlock(&uctx->lock);
pr_debug("%s qid 0x%x\n", __func__, qid); pr_debug("qid 0x%x\n", qid);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max) if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur; rdev->stats.qid.max = rdev->stats.qid.cur;
...@@ -157,7 +157,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, ...@@ -157,7 +157,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry) if (!entry)
return; return;
pr_debug("%s qid 0x%x\n", __func__, qid); pr_debug("qid 0x%x\n", qid);
entry->qid = qid; entry->qid = qid;
mutex_lock(&uctx->lock); mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->cqids); list_add_tail(&entry->entry, &uctx->cqids);
...@@ -215,7 +215,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) ...@@ -215,7 +215,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
} }
out: out:
mutex_unlock(&uctx->lock); mutex_unlock(&uctx->lock);
pr_debug("%s qid 0x%x\n", __func__, qid); pr_debug("qid 0x%x\n", qid);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max) if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur; rdev->stats.qid.max = rdev->stats.qid.cur;
...@@ -231,7 +231,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, ...@@ -231,7 +231,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry) if (!entry)
return; return;
pr_debug("%s qid 0x%x\n", __func__, qid); pr_debug("qid 0x%x\n", qid);
entry->qid = qid; entry->qid = qid;
mutex_lock(&uctx->lock); mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->qpids); list_add_tail(&entry->entry, &uctx->qpids);
...@@ -254,7 +254,7 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp) ...@@ -254,7 +254,7 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{ {
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size); pr_debug("addr 0x%x size %d\n", (u32)addr, size);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
if (addr) { if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
...@@ -268,7 +268,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) ...@@ -268,7 +268,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{ {
pr_debug("%s addr 0x%x size %d\n", __func__, addr, size); pr_debug("addr 0x%x size %d\n", addr, size);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
...@@ -290,8 +290,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev) ...@@ -290,8 +290,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
while (pbl_start < pbl_top) { while (pbl_start < pbl_top) {
pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk); pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) { if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
pr_debug("%s failed to add PBL chunk (%x/%x)\n", pr_debug("failed to add PBL chunk (%x/%x)\n",
__func__, pbl_start, pbl_chunk); pbl_start, pbl_chunk);
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
pr_warn("Failed to add all PBL chunks (%x/%x)\n", pr_warn("Failed to add all PBL chunks (%x/%x)\n",
pbl_start, pbl_top - pbl_start); pbl_start, pbl_top - pbl_start);
...@@ -299,8 +299,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev) ...@@ -299,8 +299,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
} }
pbl_chunk >>= 1; pbl_chunk >>= 1;
} else { } else {
pr_debug("%s added PBL chunk (%x/%x)\n", pr_debug("added PBL chunk (%x/%x)\n",
__func__, pbl_start, pbl_chunk); pbl_start, pbl_chunk);
pbl_start += pbl_chunk; pbl_start += pbl_chunk;
} }
} }
...@@ -322,7 +322,7 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) ...@@ -322,7 +322,7 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{ {
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6);
if (!addr) if (!addr)
pr_warn_ratelimited("%s: Out of RQT memory\n", pr_warn_ratelimited("%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev)); pci_name(rdev->lldi.pdev));
...@@ -339,7 +339,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) ...@@ -339,7 +339,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{ {
pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6); pr_debug("addr 0x%x size %d\n", addr, size << 6);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
...@@ -361,8 +361,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev) ...@@ -361,8 +361,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
while (rqt_start < rqt_top) { while (rqt_start < rqt_top) {
rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk); rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) { if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
pr_debug("%s failed to add RQT chunk (%x/%x)\n", pr_debug("failed to add RQT chunk (%x/%x)\n",
__func__, rqt_start, rqt_chunk); rqt_start, rqt_chunk);
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) { if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
pr_warn("Failed to add all RQT chunks (%x/%x)\n", pr_warn("Failed to add all RQT chunks (%x/%x)\n",
rqt_start, rqt_top - rqt_start); rqt_start, rqt_top - rqt_start);
...@@ -370,8 +370,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev) ...@@ -370,8 +370,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
} }
rqt_chunk >>= 1; rqt_chunk >>= 1;
} else { } else {
pr_debug("%s added RQT chunk (%x/%x)\n", pr_debug("added RQT chunk (%x/%x)\n",
__func__, rqt_start, rqt_chunk); rqt_start, rqt_chunk);
rqt_start += rqt_chunk; rqt_start += rqt_chunk;
} }
} }
...@@ -391,7 +391,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) ...@@ -391,7 +391,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
{ {
unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size); pr_debug("addr 0x%x size %d\n", (u32)addr, size);
if (addr) { if (addr) {
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT); rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
...@@ -404,7 +404,7 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) ...@@ -404,7 +404,7 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{ {
pr_debug("%s addr 0x%x size %d\n", __func__, addr, size); pr_debug("addr 0x%x size %d\n", addr, size);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT); rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
...@@ -426,8 +426,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) ...@@ -426,8 +426,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
while (start < top) { while (start < top) {
chunk = min(top - start + 1, chunk); chunk = min(top - start + 1, chunk);
if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) { if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
pr_debug("%s failed to add OCQP chunk (%x/%x)\n", pr_debug("failed to add OCQP chunk (%x/%x)\n",
__func__, start, chunk); start, chunk);
if (chunk <= 1024 << MIN_OCQP_SHIFT) { if (chunk <= 1024 << MIN_OCQP_SHIFT) {
pr_warn("Failed to add all OCQP chunks (%x/%x)\n", pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
start, top - start); start, top - start);
...@@ -435,8 +435,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) ...@@ -435,8 +435,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
} }
chunk >>= 1; chunk >>= 1;
} else { } else {
pr_debug("%s added OCQP chunk (%x/%x)\n", pr_debug("added OCQP chunk (%x/%x)\n",
__func__, start, chunk); start, chunk);
start += chunk; start += chunk;
} }
} }
......
...@@ -466,14 +466,12 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe) ...@@ -466,14 +466,12 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
wmb(); wmb();
if (wq->sq.bar2_va) { if (wq->sq.bar2_va) {
if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) { if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
pr_debug("%s: WC wq->sq.pidx = %d\n", pr_debug("WC wq->sq.pidx = %d\n", wq->sq.pidx);
__func__, wq->sq.pidx);
pio_copy((u64 __iomem *) pio_copy((u64 __iomem *)
(wq->sq.bar2_va + SGE_UDB_WCDOORBELL), (wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
(u64 *)wqe); (u64 *)wqe);
} else { } else {
pr_debug("%s: DB wq->sq.pidx = %d\n", pr_debug("DB wq->sq.pidx = %d\n", wq->sq.pidx);
__func__, wq->sq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid), writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid),
wq->sq.bar2_va + SGE_UDB_KDOORBELL); wq->sq.bar2_va + SGE_UDB_KDOORBELL);
} }
...@@ -493,14 +491,12 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, ...@@ -493,14 +491,12 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
wmb(); wmb();
if (wq->rq.bar2_va) { if (wq->rq.bar2_va) {
if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) { if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
pr_debug("%s: WC wq->rq.pidx = %d\n", pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx);
__func__, wq->rq.pidx);
pio_copy((u64 __iomem *) pio_copy((u64 __iomem *)
(wq->rq.bar2_va + SGE_UDB_WCDOORBELL), (wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
(void *)wqe); (void *)wqe);
} else { } else {
pr_debug("%s: DB wq->rq.pidx = %d\n", pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx);
__func__, wq->rq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid), writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
wq->rq.bar2_va + SGE_UDB_KDOORBELL); wq->rq.bar2_va + SGE_UDB_KDOORBELL);
} }
...@@ -601,8 +597,8 @@ static inline void t4_swcq_produce(struct t4_cq *cq) ...@@ -601,8 +597,8 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
{ {
cq->sw_in_use++; cq->sw_in_use++;
if (cq->sw_in_use == cq->size) { if (cq->sw_in_use == cq->size) {
pr_debug("%s cxgb4 sw cq overflow cqid %u\n", pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
__func__, cq->cqid); __func__, cq->cqid);
cq->error = 1; cq->error = 1;
BUG_ON(1); BUG_ON(1);
} }
...@@ -673,8 +669,8 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) ...@@ -673,8 +669,8 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
{ {
if (cq->sw_in_use == cq->size) { if (cq->sw_in_use == cq->size) {
pr_debug("%s cxgb4 sw cq overflow cqid %u\n", pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
__func__, cq->cqid); __func__, cq->cqid);
cq->error = 1; cq->error = 1;
BUG_ON(1); BUG_ON(1);
return NULL; return NULL;
......
config INFINIBAND_HNS config INFINIBAND_HNS
tristate "HNS RoCE Driver" tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON depends on NET_VENDOR_HISILICON
depends on (ARM64 || (COMPILE_TEST && 64BIT)) && HNS && HNS_DSAF && HNS_ENET depends on ARM64 || (COMPILE_TEST && 64BIT)
---help--- ---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
is used in Hisilicon Hi1610 and more further ICT SoC. is used in Hisilicon Hip06 and more further ICT SoC based on
platform device.
To compile this driver as a module, choose M here: the module To compile this driver as a module, choose M here: the module
will be called hns-roce. will be called hns-roce.
config INFINIBAND_HNS_HIP06
tristate "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
Hip07 SoC. These RoCE engines are platform devices.
To compile this driver as a module, choose M here: the module
will be called hns-roce-hw-v1.
config INFINIBAND_HNS_HIP08
tristate "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
To compile this driver as a module, choose M here: the module
will be called hns-roce-hw-v2.
...@@ -2,7 +2,13 @@ ...@@ -2,7 +2,13 @@
# Makefile for the Hisilicon RoCE drivers. # Makefile for the Hisilicon RoCE drivers.
# #
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_hw_v1.o hns_roce_cq.o hns_roce_alloc.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
hns-roce-hw-v1-objs := hns_roce_hw_v1.o
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
hns-roce-hw-v2-objs := hns_roce_hw_v2.o
...@@ -44,7 +44,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, ...@@ -44,7 +44,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct ib_gid_attr gid_attr; struct ib_gid_attr gid_attr;
struct hns_roce_ah *ah; struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff; u16 vlan_tag = 0xffff;
......
...@@ -67,6 +67,7 @@ void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj, ...@@ -67,6 +67,7 @@ void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
{ {
hns_roce_bitmap_free_range(bitmap, obj, 1, rr); hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
} }
EXPORT_SYMBOL_GPL(hns_roce_bitmap_free);
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
int align, unsigned long *obj) int align, unsigned long *obj)
...@@ -160,7 +161,7 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, ...@@ -160,7 +161,7 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf) struct hns_roce_buf *buf)
{ {
int i; int i;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG; u32 bits_per_long = BITS_PER_LONG;
if (buf->nbufs == 1) { if (buf->nbufs == 1) {
...@@ -171,12 +172,13 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, ...@@ -171,12 +172,13 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
for (i = 0; i < buf->nbufs; ++i) for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf) if (buf->page_list[i].buf)
dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE, dma_free_coherent(dev, PAGE_SIZE,
buf->page_list[i].buf, buf->page_list[i].buf,
buf->page_list[i].map); buf->page_list[i].map);
kfree(buf->page_list); kfree(buf->page_list);
} }
} }
EXPORT_SYMBOL_GPL(hns_roce_buf_free);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
struct hns_roce_buf *buf) struct hns_roce_buf *buf)
...@@ -184,7 +186,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, ...@@ -184,7 +186,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
int i = 0; int i = 0;
dma_addr_t t; dma_addr_t t;
struct page **pages; struct page **pages;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG; u32 bits_per_long = BITS_PER_LONG;
/* SQ/RQ buf lease than one page, SQ + RQ = 8K */ /* SQ/RQ buf lease than one page, SQ + RQ = 8K */
......
...@@ -38,69 +38,7 @@ ...@@ -38,69 +38,7 @@
#define CMD_POLL_TOKEN 0xffff #define CMD_POLL_TOKEN 0xffff
#define CMD_MAX_NUM 32 #define CMD_MAX_NUM 32
#define STATUS_MASK 0xff
#define CMD_TOKEN_MASK 0x1f #define CMD_TOKEN_MASK 0x1f
#define GO_BIT_TIMEOUT_MSECS 10000
enum {
HCR_TOKEN_OFFSET = 0x14,
HCR_STATUS_OFFSET = 0x18,
HCR_GO_BIT = 15,
};
static int cmd_pending(struct hns_roce_dev *hr_dev)
{
u32 status = readl(hr_dev->cmd.hcr + HCR_TOKEN_OFFSET);
return (!!(status & (1 << HCR_GO_BIT)));
}
/* this function should be serialized with "hcr_mutex" */
static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
u64 in_param, u64 out_param,
u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
struct device *dev = &hr_dev->pdev->dev;
u32 __iomem *hcr = (u32 *)cmd->hcr;
int ret = -EAGAIN;
unsigned long end;
u32 val = 0;
end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
while (cmd_pending(hr_dev)) {
if (time_after(jiffies, end)) {
dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
(int)end);
goto out;
}
cond_resched();
}
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
op);
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
__raw_writeq(cpu_to_le64(in_param), hcr + 0);
__raw_writeq(cpu_to_le64(out_param), hcr + 2);
__raw_writel(cpu_to_le32(in_modifier), hcr + 4);
/* Memory barrier */
wmb();
__raw_writel(cpu_to_le32(val), hcr + 5);
mmiowb();
ret = 0;
out:
return ret;
}
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param, static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u64 out_param, u32 in_modifier,
...@@ -108,12 +46,11 @@ static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -108,12 +46,11 @@ static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
int event) int event)
{ {
struct hns_roce_cmdq *cmd = &hr_dev->cmd; struct hns_roce_cmdq *cmd = &hr_dev->cmd;
int ret = -EAGAIN; int ret;
mutex_lock(&cmd->hcr_mutex); mutex_lock(&cmd->hcr_mutex);
ret = __hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
in_modifier, op_modifier, op, token, op_modifier, op, token, event);
event);
mutex_unlock(&cmd->hcr_mutex); mutex_unlock(&cmd->hcr_mutex);
return ret; return ret;
...@@ -125,10 +62,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -125,10 +62,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u8 op_modifier, u16 op, u8 op_modifier, u16 op,
unsigned long timeout) unsigned long timeout)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
u8 __iomem *hcr = hr_dev->cmd.hcr;
unsigned long end = 0;
u32 status = 0;
int ret; int ret;
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
...@@ -136,29 +70,10 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -136,29 +70,10 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
CMD_POLL_TOKEN, 0); CMD_POLL_TOKEN, 0);
if (ret) { if (ret) {
dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n"); dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
goto out; return ret;
}
end = msecs_to_jiffies(timeout) + jiffies;
while (cmd_pending(hr_dev) && time_before(jiffies, end))
cond_resched();
if (cmd_pending(hr_dev)) {
dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
ret = -ETIMEDOUT;
goto out;
} }
status = le32_to_cpu((__force __be32) return hr_dev->hw->chk_mbox(hr_dev, timeout);
__raw_readl(hcr + HCR_STATUS_OFFSET));
if ((status & STATUS_MASK) != 0x1) {
dev_err(dev, "mailbox status 0x%x!\n", status);
ret = -EBUSY;
goto out;
}
out:
return ret;
} }
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
...@@ -196,9 +111,9 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -196,9 +111,9 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
unsigned long timeout) unsigned long timeout)
{ {
struct hns_roce_cmdq *cmd = &hr_dev->cmd; struct hns_roce_cmdq *cmd = &hr_dev->cmd;
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_cmd_context *context; struct hns_roce_cmd_context *context;
int ret = 0; struct device *dev = hr_dev->dev;
int ret;
spin_lock(&cmd->context_lock); spin_lock(&cmd->context_lock);
WARN_ON(cmd->free_head < 0); WARN_ON(cmd->free_head < 0);
...@@ -269,17 +184,17 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, ...@@ -269,17 +184,17 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
in_modifier, op_modifier, op, in_modifier, op_modifier, op,
timeout); timeout);
} }
EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev) int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
mutex_init(&hr_dev->cmd.hcr_mutex); mutex_init(&hr_dev->cmd.hcr_mutex);
sema_init(&hr_dev->cmd.poll_sem, 1); sema_init(&hr_dev->cmd.poll_sem, 1);
hr_dev->cmd.use_events = 0; hr_dev->cmd.use_events = 0;
hr_dev->cmd.toggle = 1; hr_dev->cmd.toggle = 1;
hr_dev->cmd.max_cmds = CMD_MAX_NUM; hr_dev->cmd.max_cmds = CMD_MAX_NUM;
hr_dev->cmd.hcr = hr_dev->reg_base + ROCEE_MB1_REG;
hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev, hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
HNS_ROCE_MAILBOX_SIZE, HNS_ROCE_MAILBOX_SIZE,
HNS_ROCE_MAILBOX_SIZE, 0); HNS_ROCE_MAILBOX_SIZE, 0);
...@@ -356,6 +271,7 @@ struct hns_roce_cmd_mailbox ...@@ -356,6 +271,7 @@ struct hns_roce_cmd_mailbox
return mailbox; return mailbox;
} }
EXPORT_SYMBOL_GPL(hns_roce_alloc_cmd_mailbox);
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox) struct hns_roce_cmd_mailbox *mailbox)
...@@ -366,3 +282,4 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, ...@@ -366,3 +282,4 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma); dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
kfree(mailbox); kfree(mailbox);
} }
EXPORT_SYMBOL_GPL(hns_roce_free_cmd_mailbox);
...@@ -36,6 +36,56 @@ ...@@ -36,6 +36,56 @@
#define HNS_ROCE_MAILBOX_SIZE 4096 #define HNS_ROCE_MAILBOX_SIZE 4096
#define HNS_ROCE_CMD_TIMEOUT_MSECS 10000 #define HNS_ROCE_CMD_TIMEOUT_MSECS 10000
enum {
/* QPC BT commands */
HNS_ROCE_CMD_WRITE_QPC_BT0 = 0x0,
HNS_ROCE_CMD_WRITE_QPC_BT1 = 0x1,
HNS_ROCE_CMD_WRITE_QPC_BT2 = 0x2,
HNS_ROCE_CMD_READ_QPC_BT0 = 0x4,
HNS_ROCE_CMD_READ_QPC_BT1 = 0x5,
HNS_ROCE_CMD_READ_QPC_BT2 = 0x6,
HNS_ROCE_CMD_DESTROY_QPC_BT0 = 0x8,
HNS_ROCE_CMD_DESTROY_QPC_BT1 = 0x9,
HNS_ROCE_CMD_DESTROY_QPC_BT2 = 0xa,
/* QPC operation */
HNS_ROCE_CMD_MODIFY_QPC = 0x41,
HNS_ROCE_CMD_QUERY_QPC = 0x42,
/* CQC BT commands */
HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
HNS_ROCE_CMD_WRITE_CQC_BT2 = 0x12,
HNS_ROCE_CMD_READ_CQC_BT0 = 0x14,
HNS_ROCE_CMD_READ_CQC_BT1 = 0x15,
HNS_ROCE_CMD_READ_CQC_BT2 = 0x1b,
HNS_ROCE_CMD_DESTROY_CQC_BT0 = 0x18,
HNS_ROCE_CMD_DESTROY_CQC_BT1 = 0x19,
HNS_ROCE_CMD_DESTROY_CQC_BT2 = 0x1a,
/* MPT BT commands */
HNS_ROCE_CMD_WRITE_MPT_BT0 = 0x20,
HNS_ROCE_CMD_WRITE_MPT_BT1 = 0x21,
HNS_ROCE_CMD_WRITE_MPT_BT2 = 0x22,
HNS_ROCE_CMD_READ_MPT_BT0 = 0x24,
HNS_ROCE_CMD_READ_MPT_BT1 = 0x25,
HNS_ROCE_CMD_READ_MPT_BT2 = 0x26,
HNS_ROCE_CMD_DESTROY_MPT_BT0 = 0x28,
HNS_ROCE_CMD_DESTROY_MPT_BT1 = 0x29,
HNS_ROCE_CMD_DESTROY_MPT_BT2 = 0x2a,
/* SRQC BT commands */
HNS_ROCE_CMD_WRITE_SRQC_BT0 = 0x30,
HNS_ROCE_CMD_WRITE_SRQC_BT1 = 0x31,
HNS_ROCE_CMD_WRITE_SRQC_BT2 = 0x32,
HNS_ROCE_CMD_READ_SRQC_BT0 = 0x34,
HNS_ROCE_CMD_READ_SRQC_BT1 = 0x35,
HNS_ROCE_CMD_READ_SRQC_BT2 = 0x36,
HNS_ROCE_CMD_DESTROY_SRQC_BT0 = 0x38,
HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
};
enum { enum {
/* TPT commands */ /* TPT commands */
HNS_ROCE_CMD_SW2HW_MPT = 0xd, HNS_ROCE_CMD_SW2HW_MPT = 0xd,
......
...@@ -341,6 +341,7 @@ ...@@ -341,6 +341,7 @@
#define ROCEE_BT_CMD_L_REG 0x200 #define ROCEE_BT_CMD_L_REG 0x200
#define ROCEE_MB1_REG 0x210 #define ROCEE_MB1_REG 0x210
#define ROCEE_MB6_REG 0x224
#define ROCEE_DB_SQ_L_0_REG 0x230 #define ROCEE_DB_SQ_L_0_REG 0x230
#define ROCEE_DB_OTHERS_L_0_REG 0x238 #define ROCEE_DB_OTHERS_L_0_REG 0x238
#define ROCEE_QP1C_CFG0_0_REG 0x270 #define ROCEE_QP1C_CFG0_0_REG 0x270
...@@ -362,4 +363,26 @@ ...@@ -362,4 +363,26 @@
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34 #define ROCEE_ECC_UCERR_ALM0_REG 0xB34
#define ROCEE_ECC_CERR_ALM0_REG 0xB40 #define ROCEE_ECC_CERR_ALM0_REG 0xB40
/* V2 ROCEE REG */
#define ROCEE_TX_CMQ_BASEADDR_L_REG 0x07000
#define ROCEE_TX_CMQ_BASEADDR_H_REG 0x07004
#define ROCEE_TX_CMQ_DEPTH_REG 0x07008
#define ROCEE_TX_CMQ_TAIL_REG 0x07010
#define ROCEE_TX_CMQ_HEAD_REG 0x07014
#define ROCEE_RX_CMQ_BASEADDR_L_REG 0x07018
#define ROCEE_RX_CMQ_BASEADDR_H_REG 0x0701c
#define ROCEE_RX_CMQ_DEPTH_REG 0x07020
#define ROCEE_RX_CMQ_TAIL_REG 0x07024
#define ROCEE_RX_CMQ_HEAD_REG 0x07028
#define ROCEE_VF_SMAC_CFG0_REG 0x12000
#define ROCEE_VF_SMAC_CFG1_REG 0x12004
#define ROCEE_VF_SGID_CFG0_REG 0x10000
#define ROCEE_VF_SGID_CFG1_REG 0x10004
#define ROCEE_VF_SGID_CFG2_REG 0x10008
#define ROCEE_VF_SGID_CFG3_REG 0x1000c
#define ROCEE_VF_SGID_CFG4_REG 0x10010
#endif /* _HNS_ROCE_COMMON_H */ #endif /* _HNS_ROCE_COMMON_H */
...@@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq, ...@@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
dev_err(&hr_dev->pdev->dev, dev_err(hr_dev->dev,
"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n", "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
event_type, hr_cq->cqn); event_type, hr_cq->cqn);
return; return;
...@@ -85,17 +85,23 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, ...@@ -85,17 +85,23 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct hns_roce_uar *hr_uar, struct hns_roce_uar *hr_uar,
struct hns_roce_cq *hr_cq, int vector) struct hns_roce_cq *hr_cq, int vector)
{ {
struct hns_roce_cmd_mailbox *mailbox = NULL; struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_cq_table *cq_table = NULL; struct hns_roce_hem_table *mtt_table;
struct device *dev = &hr_dev->pdev->dev; struct hns_roce_cq_table *cq_table;
struct device *dev = hr_dev->dev;
dma_addr_t dma_handle; dma_addr_t dma_handle;
u64 *mtts = NULL; u64 *mtts;
int ret = 0; int ret;
cq_table = &hr_dev->cq_table; cq_table = &hr_dev->cq_table;
/* Get the physical address of cq buf */ /* Get the physical address of cq buf */
mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table, if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
mtt_table = &hr_dev->mr_table.mtt_cqe_table;
else
mtt_table = &hr_dev->mr_table.mtt_table;
mtts = hns_roce_table_find(hr_dev, mtt_table,
hr_mtt->first_seg, &dma_handle); hr_mtt->first_seg, &dma_handle);
if (!mtts) { if (!mtts) {
dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n"); dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
...@@ -182,21 +188,22 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev, ...@@ -182,21 +188,22 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{ {
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
int ret; int ret;
ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn); ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
if (ret) if (ret)
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret, dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn); hr_cq->cqn);
if (hr_dev->eq_table.eq) {
/* Waiting interrupt process procedure carried out */ /* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
/* wait for all interrupt processed */ /* wait for all interrupt processed */
if (atomic_dec_and_test(&hr_cq->refcount)) if (atomic_dec_and_test(&hr_cq->refcount))
complete(&hr_cq->free); complete(&hr_cq->free);
wait_for_completion(&hr_cq->free); wait_for_completion(&hr_cq->free);
}
spin_lock_irq(&cq_table->lock); spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn); radix_tree_delete(&cq_table->tree, hr_cq->cqn);
...@@ -205,6 +212,7 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) ...@@ -205,6 +212,7 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR); hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
} }
EXPORT_SYMBOL_GPL(hns_roce_free_cq);
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
struct ib_ucontext *context, struct ib_ucontext *context,
...@@ -218,6 +226,10 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, ...@@ -218,6 +226,10 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
if (IS_ERR(*umem)) if (IS_ERR(*umem))
return PTR_ERR(*umem); return PTR_ERR(*umem);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
else
buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem), ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
(*umem)->page_shift, &buf->hr_mtt); (*umem)->page_shift, &buf->hr_mtt);
if (ret) if (ret)
...@@ -247,6 +259,11 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev, ...@@ -247,6 +259,11 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
if (ret) if (ret)
goto out; goto out;
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
else
buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages, ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
buf->hr_buf.page_shift, &buf->hr_mtt); buf->hr_buf.page_shift, &buf->hr_mtt);
if (ret) if (ret)
...@@ -281,13 +298,13 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -281,13 +298,13 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq ucmd; struct hns_roce_ib_create_cq ucmd;
struct hns_roce_cq *hr_cq = NULL; struct hns_roce_cq *hr_cq = NULL;
struct hns_roce_uar *uar = NULL; struct hns_roce_uar *uar = NULL;
int vector = attr->comp_vector; int vector = attr->comp_vector;
int cq_entries = attr->cqe; int cq_entries = attr->cqe;
int ret = 0; int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
...@@ -295,13 +312,12 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -295,13 +312,12 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL); hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
if (!hr_cq) if (!hr_cq)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* In v1 engine, parameter verification */ if (hr_dev->caps.min_cqes)
if (cq_entries < HNS_ROCE_MIN_CQE_NUM) cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
cq_entries = HNS_ROCE_MIN_CQE_NUM;
cq_entries = roundup_pow_of_two((unsigned int)cq_entries); cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1; hr_cq->ib_cq.cqe = cq_entries - 1;
...@@ -335,8 +351,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -335,8 +351,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
} }
uar = &hr_dev->priv_uar; uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG + hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
0x1000 * uar->index; DB_REG_OFFSET * uar->index;
} }
/* Allocate cq index, fill cq_context */ /* Allocate cq index, fill cq_context */
...@@ -353,7 +369,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -353,7 +369,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
* problems if tptr is set to zero here, so we initialze it in user * problems if tptr is set to zero here, so we initialze it in user
* space. * space.
*/ */
if (!context) if (!context && hr_cq->tptr_addr)
*hr_cq->tptr_addr = 0; *hr_cq->tptr_addr = 0;
/* Get created cq handler and carry out event */ /* Get created cq handler and carry out event */
...@@ -385,6 +401,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -385,6 +401,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
kfree(hr_cq); kfree(hr_cq);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq);
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
{ {
...@@ -410,10 +427,11 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) ...@@ -410,10 +427,11 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq; struct hns_roce_cq *cq;
cq = radix_tree_lookup(&hr_dev->cq_table.tree, cq = radix_tree_lookup(&hr_dev->cq_table.tree,
...@@ -429,7 +447,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) ...@@ -429,7 +447,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{ {
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq; struct hns_roce_cq *cq;
cq = radix_tree_lookup(&cq_table->tree, cq = radix_tree_lookup(&cq_table->tree,
......
...@@ -78,6 +78,8 @@ ...@@ -78,6 +78,8 @@
#define HNS_ROCE_MAX_GID_NUM 16 #define HNS_ROCE_MAX_GID_NUM 16
#define HNS_ROCE_GID_SIZE 16 #define HNS_ROCE_GID_SIZE 16
#define HNS_ROCE_HOP_NUM_0 0xff
#define BITMAP_NO_RR 0 #define BITMAP_NO_RR 0
#define BITMAP_RR 1 #define BITMAP_RR 1
...@@ -168,6 +170,11 @@ enum { ...@@ -168,6 +170,11 @@ enum {
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07, HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
}; };
enum hns_roce_mtt_type {
MTT_TYPE_WQE,
MTT_TYPE_CQE,
};
#define HNS_ROCE_CMD_SUCCESS 1 #define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_PORT_DOWN 0 #define HNS_ROCE_PORT_DOWN 0
...@@ -232,12 +239,17 @@ struct hns_roce_hem_table { ...@@ -232,12 +239,17 @@ struct hns_roce_hem_table {
int lowmem; int lowmem;
struct mutex mutex; struct mutex mutex;
struct hns_roce_hem **hem; struct hns_roce_hem **hem;
u64 **bt_l1;
dma_addr_t *bt_l1_dma_addr;
u64 **bt_l0;
dma_addr_t *bt_l0_dma_addr;
}; };
struct hns_roce_mtt { struct hns_roce_mtt {
unsigned long first_seg; unsigned long first_seg;
int order; int order;
int page_shift; int page_shift;
enum hns_roce_mtt_type mtt_type;
}; };
/* Only support 4K page size for mr register */ /* Only support 4K page size for mr register */
...@@ -255,6 +267,19 @@ struct hns_roce_mr { ...@@ -255,6 +267,19 @@ struct hns_roce_mr {
int type; /* MR's register type */ int type; /* MR's register type */
u64 *pbl_buf;/* MR's PBL space */ u64 *pbl_buf;/* MR's PBL space */
dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
u32 pbl_size;/* PA number in the PBL */
u64 pbl_ba;/* page table address */
u32 l0_chunk_last_num;/* L0 last number */
u32 l1_chunk_last_num;/* L1 last number */
u64 **pbl_bt_l2;/* PBL BT L2 */
u64 **pbl_bt_l1;/* PBL BT L1 */
u64 *pbl_bt_l0;/* PBL BT L0 */
dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */
dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */
dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */
u32 pbl_ba_pg_sz;/* BT chunk page size */
u32 pbl_buf_pg_sz;/* buf chunk page size */
u32 pbl_hop_num;/* multi-hop number */
}; };
struct hns_roce_mr_table { struct hns_roce_mr_table {
...@@ -262,6 +287,8 @@ struct hns_roce_mr_table { ...@@ -262,6 +287,8 @@ struct hns_roce_mr_table {
struct hns_roce_buddy mtt_buddy; struct hns_roce_buddy mtt_buddy;
struct hns_roce_hem_table mtt_table; struct hns_roce_hem_table mtt_table;
struct hns_roce_hem_table mtpt_table; struct hns_roce_hem_table mtpt_table;
struct hns_roce_buddy mtt_cqe_buddy;
struct hns_roce_hem_table mtt_cqe_table;
}; };
struct hns_roce_wq { struct hns_roce_wq {
...@@ -277,6 +304,12 @@ struct hns_roce_wq { ...@@ -277,6 +304,12 @@ struct hns_roce_wq {
void __iomem *db_reg_l; void __iomem *db_reg_l;
}; };
struct hns_roce_sge {
int sge_cnt; /* SGE num */
int offset;
int sge_shift;/* SGE size */
};
struct hns_roce_buf_list { struct hns_roce_buf_list {
void *buf; void *buf;
dma_addr_t map; dma_addr_t map;
...@@ -367,7 +400,6 @@ struct hns_roce_cmd_context { ...@@ -367,7 +400,6 @@ struct hns_roce_cmd_context {
struct hns_roce_cmdq { struct hns_roce_cmdq {
struct dma_pool *pool; struct dma_pool *pool;
u8 __iomem *hcr;
struct mutex hcr_mutex; struct mutex hcr_mutex;
struct semaphore poll_sem; struct semaphore poll_sem;
/* /*
...@@ -429,6 +461,9 @@ struct hns_roce_qp { ...@@ -429,6 +461,9 @@ struct hns_roce_qp {
atomic_t refcount; atomic_t refcount;
struct completion free; struct completion free;
struct hns_roce_sge sge;
u32 next_sge;
}; };
struct hns_roce_sqp { struct hns_roce_sqp {
...@@ -477,16 +512,20 @@ struct hns_roce_caps { ...@@ -477,16 +512,20 @@ struct hns_roce_caps {
u32 max_wqes; /* 16k */ u32 max_wqes; /* 16k */
u32 max_sq_desc_sz; /* 64 */ u32 max_sq_desc_sz; /* 64 */
u32 max_rq_desc_sz; /* 64 */ u32 max_rq_desc_sz; /* 64 */
u32 max_srq_desc_sz;
int max_qp_init_rdma; int max_qp_init_rdma;
int max_qp_dest_rdma; int max_qp_dest_rdma;
int num_cqs; int num_cqs;
int max_cqes; int max_cqes;
int min_cqes;
u32 min_wqes;
int reserved_cqs; int reserved_cqs;
int num_aeq_vectors; /* 1 */ int num_aeq_vectors; /* 1 */
int num_comp_vectors; /* 32 ceq */ int num_comp_vectors; /* 32 ceq */
int num_other_vectors; int num_other_vectors;
int num_mtpts; int num_mtpts;
u32 num_mtt_segs; u32 num_mtt_segs;
u32 num_cqe_segs;
int reserved_mrws; int reserved_mrws;
int reserved_uars; int reserved_uars;
int num_pds; int num_pds;
...@@ -499,16 +538,47 @@ struct hns_roce_caps { ...@@ -499,16 +538,47 @@ struct hns_roce_caps {
int qpc_entry_sz; int qpc_entry_sz;
int irrl_entry_sz; int irrl_entry_sz;
int cqc_entry_sz; int cqc_entry_sz;
u32 pbl_ba_pg_sz;
u32 pbl_buf_pg_sz;
u32 pbl_hop_num;
int aeqe_depth; int aeqe_depth;
int ceqe_depth[HNS_ROCE_COMP_VEC_NUM]; int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
enum ib_mtu max_mtu; enum ib_mtu max_mtu;
u32 qpc_bt_num;
u32 srqc_bt_num;
u32 cqc_bt_num;
u32 mpt_bt_num;
u32 qpc_ba_pg_sz;
u32 qpc_buf_pg_sz;
u32 qpc_hop_num;
u32 srqc_ba_pg_sz;
u32 srqc_buf_pg_sz;
u32 srqc_hop_num;
u32 cqc_ba_pg_sz;
u32 cqc_buf_pg_sz;
u32 cqc_hop_num;
u32 mpt_ba_pg_sz;
u32 mpt_buf_pg_sz;
u32 mpt_hop_num;
u32 mtt_ba_pg_sz;
u32 mtt_buf_pg_sz;
u32 mtt_hop_num;
u32 cqe_ba_pg_sz;
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
}; };
struct hns_roce_hw { struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable); int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
void (*hw_profile)(struct hns_roce_dev *hr_dev); int (*cmq_init)(struct hns_roce_dev *hr_dev);
void (*cmq_exit)(struct hns_roce_dev *hr_dev);
int (*hw_profile)(struct hns_roce_dev *hr_dev);
int (*hw_init)(struct hns_roce_dev *hr_dev); int (*hw_init)(struct hns_roce_dev *hr_dev);
void (*hw_exit)(struct hns_roce_dev *hr_dev); void (*hw_exit)(struct hns_roce_dev *hr_dev);
int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event);
int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
union ib_gid *gid); union ib_gid *gid);
void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
...@@ -519,8 +589,11 @@ struct hns_roce_hw { ...@@ -519,8 +589,11 @@ struct hns_roce_hw {
void (*write_cqc)(struct hns_roce_dev *hr_dev, void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector); dma_addr_t dma_handle, int nent, u32 vector);
int (*set_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, int step_idx);
int (*clear_hem)(struct hns_roce_dev *hr_dev, int (*clear_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj); struct hns_roce_hem_table *table, int obj,
int step_idx);
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
...@@ -535,12 +608,13 @@ struct hns_roce_hw { ...@@ -535,12 +608,13 @@ struct hns_roce_hw {
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr); int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
int (*destroy_cq)(struct ib_cq *ibcq); int (*destroy_cq)(struct ib_cq *ibcq);
void *priv;
}; };
struct hns_roce_dev { struct hns_roce_dev {
struct ib_device ib_dev; struct ib_device ib_dev;
struct platform_device *pdev; struct platform_device *pdev;
struct pci_dev *pci_dev;
struct device *dev;
struct hns_roce_uar priv_uar; struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock; spinlock_t sm_lock;
...@@ -569,9 +643,12 @@ struct hns_roce_dev { ...@@ -569,9 +643,12 @@ struct hns_roce_dev {
int cmd_mod; int cmd_mod;
int loop_idc; int loop_idc;
u32 sdb_offset;
u32 odb_offset;
dma_addr_t tptr_dma_addr; /*only for hw v1*/ dma_addr_t tptr_dma_addr; /*only for hw v1*/
u32 tptr_size; /*only for hw v1*/ u32 tptr_size; /*only for hw v1*/
struct hns_roce_hw *hw; const struct hns_roce_hw *hw;
void *priv;
}; };
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
...@@ -723,6 +800,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -723,6 +800,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq); struct ib_cq *ib_cq);
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
...@@ -749,7 +827,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); ...@@ -749,7 +827,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
int hns_roce_init(struct hns_roce_dev *hr_dev);
extern struct hns_roce_hw hns_roce_hw_v1; void hns_roce_exit(struct hns_roce_dev *hr_dev);
#endif /* _HNS_ROCE_DEVICE_H */ #endif /* _HNS_ROCE_DEVICE_H */
...@@ -42,8 +42,162 @@ ...@@ -42,8 +42,162 @@
#define DMA_ADDR_T_SHIFT 12 #define DMA_ADDR_T_SHIFT 12
#define BT_BA_SHIFT 32 #define BT_BA_SHIFT 32
struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages, bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
gfp_t gfp_mask) {
if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) ||
(hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
(hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
(hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
(hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
(hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT))
return true;
return false;
}
EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop);
static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
u32 bt_chunk_num)
{
int i;
for (i = 0; i < bt_chunk_num; i++)
if (hem[start_idx + i])
return false;
return true;
}
static bool hns_roce_check_bt_null(u64 **bt, u64 start_idx, u32 bt_chunk_num)
{
int i;
for (i = 0; i < bt_chunk_num; i++)
if (bt[start_idx + i])
return false;
return true;
}
static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
{
if (check_whether_bt_num_3(table_type, hop_num))
return 3;
else if (check_whether_bt_num_2(table_type, hop_num))
return 2;
else if (check_whether_bt_num_1(table_type, hop_num))
return 1;
else
return 0;
}
int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long *obj,
struct hns_roce_hem_mhop *mhop)
{
struct device *dev = hr_dev->dev;
u32 chunk_ba_num;
u32 table_idx;
u32 bt_num;
u32 chunk_size;
switch (table->type) {
case HEM_TYPE_QPC:
mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
mhop->hop_num = hr_dev->caps.qpc_hop_num;
break;
case HEM_TYPE_MTPT:
mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
mhop->hop_num = hr_dev->caps.mpt_hop_num;
break;
case HEM_TYPE_CQC:
mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
mhop->hop_num = hr_dev->caps.cqc_hop_num;
break;
case HEM_TYPE_SRQC:
mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
mhop->hop_num = hr_dev->caps.srqc_hop_num;
break;
case HEM_TYPE_MTT:
mhop->buf_chunk_size = 1 << (hr_dev->caps.mtt_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8;
mhop->hop_num = hr_dev->caps.mtt_hop_num;
break;
case HEM_TYPE_CQE:
mhop->buf_chunk_size = 1 << (hr_dev->caps.cqe_buf_pg_sz
+ PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+ PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8;
mhop->hop_num = hr_dev->caps.cqe_hop_num;
break;
default:
dev_err(dev, "Table %d not support multi-hop addressing!\n",
table->type);
return -EINVAL;
}
if (!obj)
return 0;
/*
* QPC/MTPT/CQC/SRQC alloc hem for buffer pages.
* MTT/CQE alloc hem for bt pages.
*/
bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
chunk_ba_num = mhop->bt_chunk_size / 8;
chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
mhop->bt_chunk_size;
table_idx = (*obj & (table->num_obj - 1)) /
(chunk_size / table->obj_size);
switch (bt_num) {
case 3:
mhop->l2_idx = table_idx & (chunk_ba_num - 1);
mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
mhop->l0_idx = table_idx / chunk_ba_num / chunk_ba_num;
break;
case 2:
mhop->l1_idx = table_idx & (chunk_ba_num - 1);
mhop->l0_idx = table_idx / chunk_ba_num;
break;
case 1:
mhop->l0_idx = table_idx;
break;
default:
dev_err(dev, "Table %d not support hop_num = %d!\n",
table->type, mhop->hop_num);
return -EINVAL;
}
if (mhop->l0_idx >= mhop->ba_l0_num)
mhop->l0_idx %= mhop->ba_l0_num;
return 0;
}
EXPORT_SYMBOL_GPL(hns_roce_calc_hem_mhop);
static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
int npages,
unsigned long hem_alloc_size,
gfp_t gfp_mask)
{ {
struct hns_roce_hem_chunk *chunk = NULL; struct hns_roce_hem_chunk *chunk = NULL;
struct hns_roce_hem *hem; struct hns_roce_hem *hem;
...@@ -61,7 +215,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages, ...@@ -61,7 +215,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
hem->refcount = 0; hem->refcount = 0;
INIT_LIST_HEAD(&hem->chunk_list); INIT_LIST_HEAD(&hem->chunk_list);
order = get_order(HNS_ROCE_HEM_ALLOC_SIZE); order = get_order(hem_alloc_size);
while (npages > 0) { while (npages > 0) {
if (!chunk) { if (!chunk) {
...@@ -84,7 +238,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages, ...@@ -84,7 +238,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
* memory, directly return fail. * memory, directly return fail.
*/ */
mem = &chunk->mem[chunk->npages]; mem = &chunk->mem[chunk->npages];
buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order, buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask); &sg_dma_address(mem), gfp_mask);
if (!buf) if (!buf)
goto fail; goto fail;
...@@ -115,7 +269,7 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem) ...@@ -115,7 +269,7 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) { list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&hr_dev->pdev->dev, dma_free_coherent(hr_dev->dev,
chunk->mem[i].length, chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])), lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i])); sg_dma_address(&chunk->mem[i]));
...@@ -128,8 +282,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem) ...@@ -128,8 +282,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj) struct hns_roce_hem_table *table, unsigned long obj)
{ {
struct device *dev = &hr_dev->pdev->dev;
spinlock_t *lock = &hr_dev->bt_cmd_lock; spinlock_t *lock = &hr_dev->bt_cmd_lock;
struct device *dev = hr_dev->dev;
unsigned long end = 0; unsigned long end = 0;
unsigned long flags; unsigned long flags;
struct hns_roce_hem_iter iter; struct hns_roce_hem_iter iter;
...@@ -209,13 +363,184 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, ...@@ -209,13 +363,184 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long obj)
{
struct device *dev = hr_dev->dev;
struct hns_roce_hem_mhop mhop;
struct hns_roce_hem_iter iter;
u32 buf_chunk_size;
u32 bt_chunk_size;
u32 chunk_ba_num;
u32 hop_num;
u32 size;
u32 bt_num;
u64 hem_idx;
u64 bt_l1_idx = 0;
u64 bt_l0_idx = 0;
u64 bt_ba;
unsigned long mhop_obj = obj;
int bt_l1_allocated = 0;
int bt_l0_allocated = 0;
int step_idx;
int ret;
ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
if (ret)
return ret;
buf_chunk_size = mhop.buf_chunk_size;
bt_chunk_size = mhop.bt_chunk_size;
hop_num = mhop.hop_num;
chunk_ba_num = bt_chunk_size / 8;
bt_num = hns_roce_get_bt_num(table->type, hop_num);
switch (bt_num) {
case 3:
hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
bt_l0_idx = mhop.l0_idx;
break;
case 2:
hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
bt_l0_idx = mhop.l0_idx;
break;
case 1:
hem_idx = mhop.l0_idx;
break;
default:
dev_err(dev, "Table %d not support hop_num = %d!\n",
table->type, hop_num);
return -EINVAL;
}
mutex_lock(&table->mutex);
if (table->hem[hem_idx]) {
++table->hem[hem_idx]->refcount;
goto out;
}
/* alloc L1 BA's chunk */
if ((check_whether_bt_num_3(table->type, hop_num) ||
check_whether_bt_num_2(table->type, hop_num)) &&
!table->bt_l0[bt_l0_idx]) {
table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size,
&(table->bt_l0_dma_addr[bt_l0_idx]),
GFP_KERNEL);
if (!table->bt_l0[bt_l0_idx]) {
ret = -ENOMEM;
goto out;
}
bt_l0_allocated = 1;
/* set base address to hardware */
if (table->type < HEM_TYPE_MTT) {
step_idx = 0;
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed!\n");
goto err_dma_alloc_l1;
}
}
}
/* alloc L2 BA's chunk */
if (check_whether_bt_num_3(table->type, hop_num) &&
!table->bt_l1[bt_l1_idx]) {
table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size,
&(table->bt_l1_dma_addr[bt_l1_idx]),
GFP_KERNEL);
if (!table->bt_l1[bt_l1_idx]) {
ret = -ENOMEM;
goto err_dma_alloc_l1;
}
bt_l1_allocated = 1;
*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) =
table->bt_l1_dma_addr[bt_l1_idx];
/* set base address to hardware */
step_idx = 1;
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed!\n");
goto err_alloc_hem_buf;
}
}
/*
* alloc buffer space chunk for QPC/MTPT/CQC/SRQC.
* alloc bt space chunk for MTT/CQE.
*/
size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev,
size >> PAGE_SHIFT,
size,
(table->lowmem ? GFP_KERNEL :
GFP_HIGHUSER) | __GFP_NOWARN);
if (!table->hem[hem_idx]) {
ret = -ENOMEM;
goto err_alloc_hem_buf;
}
hns_roce_hem_first(table->hem[hem_idx], &iter);
bt_ba = hns_roce_hem_addr(&iter);
if (table->type < HEM_TYPE_MTT) {
if (hop_num == 2) {
*(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba;
step_idx = 2;
} else if (hop_num == 1) {
*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
step_idx = 1;
} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
step_idx = 0;
}
/* set HEM base address to hardware */
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed!\n");
goto err_alloc_hem_buf;
}
} else if (hop_num == 2) {
*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
}
++table->hem[hem_idx]->refcount;
goto out;
err_alloc_hem_buf:
if (bt_l1_allocated) {
dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx],
table->bt_l1_dma_addr[bt_l1_idx]);
table->bt_l1[bt_l1_idx] = NULL;
}
err_dma_alloc_l1:
if (bt_l0_allocated) {
dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx],
table->bt_l0_dma_addr[bt_l0_idx]);
table->bt_l0[bt_l0_idx] = NULL;
}
out:
mutex_unlock(&table->mutex);
return ret;
}
int hns_roce_table_get(struct hns_roce_dev *hr_dev, int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj) struct hns_roce_hem_table *table, unsigned long obj)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
int ret = 0; int ret = 0;
unsigned long i; unsigned long i;
if (hns_roce_check_whether_mhop(hr_dev, table->type))
return hns_roce_table_mhop_get(hr_dev, table, obj);
i = (obj & (table->num_obj - 1)) / (HNS_ROCE_TABLE_CHUNK_SIZE / i = (obj & (table->num_obj - 1)) / (HNS_ROCE_TABLE_CHUNK_SIZE /
table->obj_size); table->obj_size);
...@@ -228,6 +553,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev, ...@@ -228,6 +553,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
table->hem[i] = hns_roce_alloc_hem(hr_dev, table->hem[i] = hns_roce_alloc_hem(hr_dev,
HNS_ROCE_TABLE_CHUNK_SIZE >> PAGE_SHIFT, HNS_ROCE_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
HNS_ROCE_HEM_ALLOC_SIZE,
(table->lowmem ? GFP_KERNEL : (table->lowmem ? GFP_KERNEL :
GFP_HIGHUSER) | __GFP_NOWARN); GFP_HIGHUSER) | __GFP_NOWARN);
if (!table->hem[i]) { if (!table->hem[i]) {
...@@ -237,6 +563,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev, ...@@ -237,6 +563,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
/* Set HEM base address(128K/page, pa) to Hardware */ /* Set HEM base address(128K/page, pa) to Hardware */
if (hns_roce_set_hem(hr_dev, table, obj)) { if (hns_roce_set_hem(hr_dev, table, obj)) {
hns_roce_free_hem(hr_dev, table->hem[i]);
table->hem[i] = NULL;
ret = -ENODEV; ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed.\n"); dev_err(dev, "set HEM base address to HW failed.\n");
goto out; goto out;
...@@ -248,12 +576,131 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev, ...@@ -248,12 +576,131 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long obj,
int check_refcount)
{
struct device *dev = hr_dev->dev;
struct hns_roce_hem_mhop mhop;
unsigned long mhop_obj = obj;
u32 bt_chunk_size;
u32 chunk_ba_num;
u32 hop_num;
u32 start_idx;
u32 bt_num;
u64 hem_idx;
u64 bt_l1_idx = 0;
int ret;
ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
if (ret)
return;
bt_chunk_size = mhop.bt_chunk_size;
hop_num = mhop.hop_num;
chunk_ba_num = bt_chunk_size / 8;
bt_num = hns_roce_get_bt_num(table->type, hop_num);
switch (bt_num) {
case 3:
hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
break;
case 2:
hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
break;
case 1:
hem_idx = mhop.l0_idx;
break;
default:
dev_err(dev, "Table %d not support hop_num = %d!\n",
table->type, hop_num);
return;
}
mutex_lock(&table->mutex);
if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) {
mutex_unlock(&table->mutex);
return;
}
if (table->type < HEM_TYPE_MTT && hop_num == 1) {
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
dev_warn(dev, "Clear HEM base address failed.\n");
} else if (table->type < HEM_TYPE_MTT && hop_num == 2) {
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2))
dev_warn(dev, "Clear HEM base address failed.\n");
} else if (table->type < HEM_TYPE_MTT &&
hop_num == HNS_ROCE_HOP_NUM_0) {
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
dev_warn(dev, "Clear HEM base address failed.\n");
}
/*
* free buffer space chunk for QPC/MTPT/CQC/SRQC.
* free bt space chunk for MTT/CQE.
*/
hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
table->hem[hem_idx] = NULL;
if (check_whether_bt_num_2(table->type, hop_num)) {
start_idx = mhop.l0_idx * chunk_ba_num;
if (hns_roce_check_hem_null(table->hem, start_idx,
chunk_ba_num)) {
if (table->type < HEM_TYPE_MTT &&
hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
dev_warn(dev, "Clear HEM base address failed.\n");
dma_free_coherent(dev, bt_chunk_size,
table->bt_l0[mhop.l0_idx],
table->bt_l0_dma_addr[mhop.l0_idx]);
table->bt_l0[mhop.l0_idx] = NULL;
}
} else if (check_whether_bt_num_3(table->type, hop_num)) {
start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
mhop.l1_idx * chunk_ba_num;
if (hns_roce_check_hem_null(table->hem, start_idx,
chunk_ba_num)) {
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
dev_warn(dev, "Clear HEM base address failed.\n");
dma_free_coherent(dev, bt_chunk_size,
table->bt_l1[bt_l1_idx],
table->bt_l1_dma_addr[bt_l1_idx]);
table->bt_l1[bt_l1_idx] = NULL;
start_idx = mhop.l0_idx * chunk_ba_num;
if (hns_roce_check_bt_null(table->bt_l1, start_idx,
chunk_ba_num)) {
if (hr_dev->hw->clear_hem(hr_dev, table, obj,
0))
dev_warn(dev, "Clear HEM base address failed.\n");
dma_free_coherent(dev, bt_chunk_size,
table->bt_l0[mhop.l0_idx],
table->bt_l0_dma_addr[mhop.l0_idx]);
table->bt_l0[mhop.l0_idx] = NULL;
}
}
}
mutex_unlock(&table->mutex);
}
void hns_roce_table_put(struct hns_roce_dev *hr_dev, void hns_roce_table_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj) struct hns_roce_hem_table *table, unsigned long obj)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
unsigned long i; unsigned long i;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
hns_roce_table_mhop_put(hr_dev, table, obj, 1);
return;
}
i = (obj & (table->num_obj - 1)) / i = (obj & (table->num_obj - 1)) /
(HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size); (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
...@@ -261,7 +708,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, ...@@ -261,7 +708,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
if (--table->hem[i]->refcount == 0) { if (--table->hem[i]->refcount == 0) {
/* Clear HEM base address */ /* Clear HEM base address */
if (hr_dev->hw->clear_hem(hr_dev, table, obj)) if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
dev_warn(dev, "Clear HEM base address failed.\n"); dev_warn(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]); hns_roce_free_hem(hr_dev, table->hem[i]);
...@@ -271,23 +718,46 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, ...@@ -271,23 +718,46 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
mutex_unlock(&table->mutex); mutex_unlock(&table->mutex);
} }
void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj, void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
dma_addr_t *dma_handle) struct hns_roce_hem_table *table,
unsigned long obj, dma_addr_t *dma_handle)
{ {
struct hns_roce_hem_chunk *chunk; struct hns_roce_hem_chunk *chunk;
unsigned long idx; struct hns_roce_hem_mhop mhop;
int i;
int offset, dma_offset;
struct hns_roce_hem *hem; struct hns_roce_hem *hem;
struct page *page = NULL; struct page *page = NULL;
unsigned long mhop_obj = obj;
unsigned long idx;
int offset, dma_offset;
int i, j;
u32 hem_idx = 0;
if (!table->lowmem) if (!table->lowmem)
return NULL; return NULL;
mutex_lock(&table->mutex); mutex_lock(&table->mutex);
idx = (obj & (table->num_obj - 1)) * table->obj_size;
hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE]; if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE; idx = (obj & (table->num_obj - 1)) * table->obj_size;
hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE];
dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE;
} else {
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
/* mtt mhop */
i = mhop.l0_idx;
j = mhop.l1_idx;
if (mhop.hop_num == 2)
hem_idx = i * (mhop.bt_chunk_size / 8) + j;
else if (mhop.hop_num == 1 ||
mhop.hop_num == HNS_ROCE_HOP_NUM_0)
hem_idx = i;
hem = table->hem[hem_idx];
dma_offset = offset = (obj & (table->num_obj - 1)) *
table->obj_size % mhop.bt_chunk_size;
if (mhop.hop_num == 2)
dma_offset = offset = 0;
}
if (!hem) if (!hem)
goto out; goto out;
...@@ -314,14 +784,21 @@ void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj, ...@@ -314,14 +784,21 @@ void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
mutex_unlock(&table->mutex); mutex_unlock(&table->mutex);
return page ? lowmem_page_address(page) + offset : NULL; return page ? lowmem_page_address(page) + offset : NULL;
} }
EXPORT_SYMBOL_GPL(hns_roce_table_find);
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev, int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, struct hns_roce_hem_table *table,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
struct hns_roce_hem_mhop mhop;
unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size; unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
unsigned long i = 0; unsigned long i;
int ret = 0; int ret;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
inc = mhop.bt_chunk_size / table->obj_size;
}
/* Allocate MTT entry memory according to chunk(128K) */ /* Allocate MTT entry memory according to chunk(128K) */
for (i = start; i <= end; i += inc) { for (i = start; i <= end; i += inc) {
...@@ -344,10 +821,17 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev, ...@@ -344,10 +821,17 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, struct hns_roce_hem_table *table,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
struct hns_roce_hem_mhop mhop;
unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
unsigned long i; unsigned long i;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
inc = mhop.bt_chunk_size / table->obj_size;
}
for (i = start; i <= end; for (i = start; i <= end;
i += HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size) i += inc)
hns_roce_table_put(hr_dev, table, i); hns_roce_table_put(hr_dev, table, i);
} }
...@@ -356,15 +840,119 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -356,15 +840,119 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
unsigned long obj_size, unsigned long nobj, unsigned long obj_size, unsigned long nobj,
int use_lowmem) int use_lowmem)
{ {
struct device *dev = hr_dev->dev;
unsigned long obj_per_chunk; unsigned long obj_per_chunk;
unsigned long num_hem; unsigned long num_hem;
obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size; if (!hns_roce_check_whether_mhop(hr_dev, type)) {
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size;
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
if (!table->hem)
return -ENOMEM;
} else {
unsigned long buf_chunk_size;
unsigned long bt_chunk_size;
unsigned long bt_chunk_num;
unsigned long num_bt_l0 = 0;
u32 hop_num;
switch (type) {
case HEM_TYPE_QPC:
buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.qpc_bt_num;
hop_num = hr_dev->caps.qpc_hop_num;
break;
case HEM_TYPE_MTPT:
buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.mpt_bt_num;
hop_num = hr_dev->caps.mpt_hop_num;
break;
case HEM_TYPE_CQC:
buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.cqc_bt_num;
hop_num = hr_dev->caps.cqc_hop_num;
break;
case HEM_TYPE_SRQC:
buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.srqc_bt_num;
hop_num = hr_dev->caps.srqc_hop_num;
break;
case HEM_TYPE_MTT:
buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.mtt_hop_num;
break;
case HEM_TYPE_CQE:
buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.cqe_hop_num;
break;
default:
dev_err(dev,
"Table %d not support to init hem table here!\n",
type);
return -EINVAL;
}
obj_per_chunk = buf_chunk_size / obj_size;
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
bt_chunk_num = bt_chunk_size / 8;
if (table->type >= HEM_TYPE_MTT)
num_bt_l0 = bt_chunk_num;
table->hem = kcalloc(num_hem, sizeof(*table->hem),
GFP_KERNEL);
if (!table->hem)
goto err_kcalloc_hem_buf;
if (check_whether_bt_num_3(table->type, hop_num)) {
unsigned long num_bt_l1;
num_bt_l1 = (num_hem + bt_chunk_num - 1) /
bt_chunk_num;
table->bt_l1 = kcalloc(num_bt_l1,
sizeof(*table->bt_l1),
GFP_KERNEL);
if (!table->bt_l1)
goto err_kcalloc_bt_l1;
table->bt_l1_dma_addr = kcalloc(num_bt_l1,
sizeof(*table->bt_l1_dma_addr),
GFP_KERNEL);
if (!table->bt_l1_dma_addr)
goto err_kcalloc_l1_dma;
}
table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL); if (check_whether_bt_num_2(table->type, hop_num) ||
if (!table->hem) check_whether_bt_num_3(table->type, hop_num)) {
return -ENOMEM; table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
GFP_KERNEL);
if (!table->bt_l0)
goto err_kcalloc_bt_l0;
table->bt_l0_dma_addr = kcalloc(num_bt_l0,
sizeof(*table->bt_l0_dma_addr),
GFP_KERNEL);
if (!table->bt_l0_dma_addr)
goto err_kcalloc_l0_dma;
}
}
table->type = type; table->type = type;
table->num_hem = num_hem; table->num_hem = num_hem;
...@@ -374,18 +962,72 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -374,18 +962,72 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
mutex_init(&table->mutex); mutex_init(&table->mutex);
return 0; return 0;
err_kcalloc_l0_dma:
kfree(table->bt_l0);
table->bt_l0 = NULL;
err_kcalloc_bt_l0:
kfree(table->bt_l1_dma_addr);
table->bt_l1_dma_addr = NULL;
err_kcalloc_l1_dma:
kfree(table->bt_l1);
table->bt_l1 = NULL;
err_kcalloc_bt_l1:
kfree(table->hem);
table->hem = NULL;
err_kcalloc_hem_buf:
return -ENOMEM;
}
void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table)
{
struct hns_roce_hem_mhop mhop;
u32 buf_chunk_size;
int i;
u64 obj;
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
mhop.bt_chunk_size;
for (i = 0; i < table->num_hem; ++i) {
obj = i * buf_chunk_size / table->obj_size;
if (table->hem[i])
hns_roce_table_mhop_put(hr_dev, table, obj, 0);
}
kfree(table->hem);
table->hem = NULL;
kfree(table->bt_l1);
table->bt_l1 = NULL;
kfree(table->bt_l1_dma_addr);
table->bt_l1_dma_addr = NULL;
kfree(table->bt_l0);
table->bt_l0 = NULL;
kfree(table->bt_l0_dma_addr);
table->bt_l0_dma_addr = NULL;
} }
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table) struct hns_roce_hem_table *table)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
unsigned long i; unsigned long i;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
hns_roce_cleanup_mhop_hem_table(hr_dev, table);
return;
}
for (i = 0; i < table->num_hem; ++i) for (i = 0; i < table->num_hem; ++i)
if (table->hem[i]) { if (table->hem[i]) {
if (hr_dev->hw->clear_hem(hr_dev, table, if (hr_dev->hw->clear_hem(hr_dev, table,
i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)) i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size, 0))
dev_err(dev, "Clear HEM base address failed.\n"); dev_err(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]); hns_roce_free_hem(hr_dev, table->hem[i]);
...@@ -401,4 +1043,7 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) ...@@ -401,4 +1043,7 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_cqe_table);
} }
...@@ -47,6 +47,7 @@ enum { ...@@ -47,6 +47,7 @@ enum {
/* UNMAP HEM */ /* UNMAP HEM */
HEM_TYPE_MTT, HEM_TYPE_MTT,
HEM_TYPE_CQE,
HEM_TYPE_IRRL, HEM_TYPE_IRRL,
}; };
...@@ -54,6 +55,18 @@ enum { ...@@ -54,6 +55,18 @@ enum {
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \ ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
(sizeof(struct scatterlist))) (sizeof(struct scatterlist)))
#define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2)
#define check_whether_bt_num_2(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == 1) || \
(type >= HEM_TYPE_MTT && hop_num == 2))
#define check_whether_bt_num_1(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
(type >= HEM_TYPE_MTT && hop_num == 1) || \
(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
enum { enum {
HNS_ROCE_HEM_PAGE_SHIFT = 12, HNS_ROCE_HEM_PAGE_SHIFT = 12,
HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT, HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
...@@ -77,12 +90,23 @@ struct hns_roce_hem_iter { ...@@ -77,12 +90,23 @@ struct hns_roce_hem_iter {
int page_idx; int page_idx;
}; };
struct hns_roce_hem_mhop {
u32 hop_num;
u32 buf_chunk_size;
u32 bt_chunk_size;
u32 ba_l0_num;
u32 l0_idx;/* level 0 base address table index */
u32 l1_idx;/* level 1 base address table index */
u32 l2_idx;/* level 2 base address table index */
};
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem); void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
int hns_roce_table_get(struct hns_roce_dev *hr_dev, int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj); struct hns_roce_hem_table *table, unsigned long obj);
void hns_roce_table_put(struct hns_roce_dev *hr_dev, void hns_roce_table_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj); struct hns_roce_hem_table *table, unsigned long obj);
void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj, void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj,
dma_addr_t *dma_handle); dma_addr_t *dma_handle);
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev, int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, struct hns_roce_hem_table *table,
...@@ -97,6 +121,10 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -97,6 +121,10 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table); struct hns_roce_hem_table *table);
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long *obj,
struct hns_roce_hem_mhop *mhop);
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
static inline void hns_roce_hem_first(struct hns_roce_hem *hem, static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
struct hns_roce_hem_iter *iter) struct hns_roce_hem_iter *iter)
...@@ -105,7 +133,7 @@ static inline void hns_roce_hem_first(struct hns_roce_hem *hem, ...@@ -105,7 +133,7 @@ static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
iter->chunk = list_empty(&hem->chunk_list) ? NULL : iter->chunk = list_empty(&hem->chunk_list) ? NULL :
list_entry(hem->chunk_list.next, list_entry(hem->chunk_list.next,
struct hns_roce_hem_chunk, list); struct hns_roce_hem_chunk, list);
iter->page_idx = 0; iter->page_idx = 0;
} }
static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter) static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_platform.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include "hns_roce_common.h" #include "hns_roce_common.h"
#include "hns_roce_device.h" #include "hns_roce_device.h"
...@@ -472,7 +473,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept, ...@@ -472,7 +473,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
dma_addr_t sdb_dma_addr; dma_addr_t sdb_dma_addr;
u32 val; u32 val;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table; db = &priv->db_table;
/* Configure extend SDB threshold */ /* Configure extend SDB threshold */
...@@ -511,7 +512,7 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept, ...@@ -511,7 +512,7 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
dma_addr_t odb_dma_addr; dma_addr_t odb_dma_addr;
u32 val; u32 val;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table; db = &priv->db_table;
/* Configure extend ODB threshold */ /* Configure extend ODB threshold */
...@@ -547,7 +548,7 @@ static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod, ...@@ -547,7 +548,7 @@ static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
dma_addr_t odb_dma_addr; dma_addr_t odb_dma_addr;
int ret = 0; int ret = 0;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table; db = &priv->db_table;
db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL); db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
...@@ -668,7 +669,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) ...@@ -668,7 +669,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
u8 port = 0; u8 port = 0;
u8 sl; u8 sl;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr; free_mr = &priv->free_mr;
/* Reserved cq for loop qp */ /* Reserved cq for loop qp */
...@@ -816,7 +817,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) ...@@ -816,7 +817,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
int ret; int ret;
int i; int i;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr; free_mr = &priv->free_mr;
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
...@@ -850,7 +851,7 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev) ...@@ -850,7 +851,7 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
u32 odb_evt_mod; u32 odb_evt_mod;
int ret = 0; int ret = 0;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table; db = &priv->db_table;
memset(db, 0, sizeof(*db)); memset(db, 0, sizeof(*db));
...@@ -906,7 +907,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) ...@@ -906,7 +907,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
unsigned long end = unsigned long end =
msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies; msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr; free_mr = &priv->free_mr;
lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work), lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
...@@ -982,7 +983,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) ...@@ -982,7 +983,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
hr_dev = to_hr_dev(mr_work->ib_dev); hr_dev = to_hr_dev(mr_work->ib_dev);
dev = &hr_dev->pdev->dev; dev = &hr_dev->pdev->dev;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr; free_mr = &priv->free_mr;
mr_free_cq = free_mr->mr_free_cq; mr_free_cq = free_mr->mr_free_cq;
...@@ -1038,7 +1039,7 @@ int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) ...@@ -1038,7 +1039,7 @@ int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
int npages; int npages;
int ret = 0; int ret = 0;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr; free_mr = &priv->free_mr;
if (mr->enabled) { if (mr->enabled) {
...@@ -1103,7 +1104,7 @@ static void hns_roce_db_free(struct hns_roce_dev *hr_dev) ...@@ -1103,7 +1104,7 @@ static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
struct hns_roce_db_table *db; struct hns_roce_db_table *db;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table; db = &priv->db_table;
if (db->sdb_ext_mod) { if (db->sdb_ext_mod) {
...@@ -1133,7 +1134,7 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev) ...@@ -1133,7 +1134,7 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
struct hns_roce_raq_table *raq; struct hns_roce_raq_table *raq;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
raq = &priv->raq_table; raq = &priv->raq_table;
raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL); raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
...@@ -1210,7 +1211,7 @@ static void hns_roce_raq_free(struct hns_roce_dev *hr_dev) ...@@ -1210,7 +1211,7 @@ static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
struct hns_roce_raq_table *raq; struct hns_roce_raq_table *raq;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
raq = &priv->raq_table; raq = &priv->raq_table;
dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf, dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
...@@ -1244,7 +1245,7 @@ static int hns_roce_bt_init(struct hns_roce_dev *hr_dev) ...@@ -1244,7 +1245,7 @@ static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
int ret; int ret;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev, priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map, HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
...@@ -1286,7 +1287,7 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev) ...@@ -1286,7 +1287,7 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map); priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
...@@ -1304,7 +1305,7 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev) ...@@ -1304,7 +1305,7 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
struct hns_roce_buf_list *tptr_buf; struct hns_roce_buf_list *tptr_buf;
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
tptr_buf = &priv->tptr_table.tptr_buf; tptr_buf = &priv->tptr_table.tptr_buf;
/* /*
...@@ -1330,7 +1331,7 @@ static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev) ...@@ -1330,7 +1331,7 @@ static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
struct hns_roce_buf_list *tptr_buf; struct hns_roce_buf_list *tptr_buf;
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
tptr_buf = &priv->tptr_table.tptr_buf; tptr_buf = &priv->tptr_table.tptr_buf;
dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
...@@ -1344,7 +1345,7 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev) ...@@ -1344,7 +1345,7 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
int ret = 0; int ret = 0;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr; free_mr = &priv->free_mr;
free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr"); free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
...@@ -1368,7 +1369,7 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev) ...@@ -1368,7 +1369,7 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
struct hns_roce_free_mr *free_mr; struct hns_roce_free_mr *free_mr;
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr; free_mr = &priv->free_mr;
flush_workqueue(free_mr->free_mr_wq); flush_workqueue(free_mr->free_mr_wq);
...@@ -1432,7 +1433,7 @@ static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev) ...@@ -1432,7 +1433,7 @@ static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
struct hns_roce_des_qp *des_qp; struct hns_roce_des_qp *des_qp;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
des_qp = &priv->des_qp; des_qp = &priv->des_qp;
des_qp->requeue_flag = 1; des_qp->requeue_flag = 1;
...@@ -1450,7 +1451,7 @@ static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev) ...@@ -1450,7 +1451,7 @@ static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
struct hns_roce_des_qp *des_qp; struct hns_roce_des_qp *des_qp;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
des_qp = &priv->des_qp; des_qp = &priv->des_qp;
des_qp->requeue_flag = 0; des_qp->requeue_flag = 0;
...@@ -1458,7 +1459,7 @@ static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev) ...@@ -1458,7 +1459,7 @@ static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
destroy_workqueue(des_qp->qp_wq); destroy_workqueue(des_qp->qp_wq);
} }
void hns_roce_v1_profile(struct hns_roce_dev *hr_dev) int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
{ {
int i = 0; int i = 0;
struct hns_roce_caps *caps = &hr_dev->caps; struct hns_roce_caps *caps = &hr_dev->caps;
...@@ -1474,7 +1475,9 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev) ...@@ -1474,7 +1475,9 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM; caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM; caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM; caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM; caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V1_SG_NUM; caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
caps->max_rq_sg = HNS_ROCE_V1_SG_NUM; caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
...@@ -1524,6 +1527,8 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev) ...@@ -1524,6 +1527,8 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev, caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
ROCEE_ACK_DELAY_REG)); ROCEE_ACK_DELAY_REG));
caps->max_mtu = IB_MTU_2048; caps->max_mtu = IB_MTU_2048;
return 0;
} }
int hns_roce_v1_init(struct hns_roce_dev *hr_dev) int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
...@@ -1616,6 +1621,79 @@ void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) ...@@ -1616,6 +1621,79 @@ void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
hns_roce_db_free(hr_dev); hns_roce_db_free(hr_dev);
} }
static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
{
u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
return (!!(status & (1 << HCR_GO_BIT)));
}
int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u8 op_modifier,
u16 op, u16 token, int event)
{
u32 *hcr = (u32 *)(hr_dev->reg_base + ROCEE_MB1_REG);
unsigned long end;
u32 val = 0;
end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
while (hns_roce_v1_cmd_pending(hr_dev)) {
if (time_after(jiffies, end)) {
dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
(int)jiffies, (int)end);
return -EAGAIN;
}
cond_resched();
}
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
op);
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
__raw_writeq(cpu_to_le64(in_param), hcr + 0);
__raw_writeq(cpu_to_le64(out_param), hcr + 2);
__raw_writel(cpu_to_le32(in_modifier), hcr + 4);
/* Memory barrier */
wmb();
__raw_writel(cpu_to_le32(val), hcr + 5);
mmiowb();
return 0;
}
static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
unsigned long timeout)
{
u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
unsigned long end = 0;
u32 status = 0;
end = msecs_to_jiffies(timeout) + jiffies;
while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
cond_resched();
if (hns_roce_v1_cmd_pending(hr_dev)) {
dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
return -ETIMEDOUT;
}
status = le32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_STATUS_OFFSET));
if ((status & STATUS_MASK) != 0x1) {
dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
return -EBUSY;
}
return 0;
}
void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index, void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
union ib_gid *gid) union ib_gid *gid)
{ {
...@@ -1941,7 +2019,7 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev, ...@@ -1941,7 +2019,7 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
dma_addr_t tptr_dma_addr; dma_addr_t tptr_dma_addr;
int offset; int offset;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
tptr_buf = &priv->tptr_table.tptr_buf; tptr_buf = &priv->tptr_table.tptr_buf;
cq_context = mb_buf; cq_context = mb_buf;
...@@ -2280,7 +2358,7 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -2280,7 +2358,7 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
} }
int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj) struct hns_roce_hem_table *table, int obj, int step_idx)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
...@@ -2289,7 +2367,7 @@ int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, ...@@ -2289,7 +2367,7 @@ int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
void __iomem *bt_cmd; void __iomem *bt_cmd;
u64 bt_ba = 0; u64 bt_ba = 0;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
switch (table->type) { switch (table->type) {
case HEM_TYPE_QPC: case HEM_TYPE_QPC:
...@@ -2448,7 +2526,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -2448,7 +2526,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return -ENOMEM; return -ENOMEM;
/* Search QP buf's MTTs */ /* Search QP buf's MTTs */
mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table, mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
hr_qp->mtt.first_seg, &dma_handle); hr_qp->mtt.first_seg, &dma_handle);
if (!mtts) { if (!mtts) {
dev_err(dev, "qp buf pa find failed\n"); dev_err(dev, "qp buf pa find failed\n");
...@@ -2595,7 +2673,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -2595,7 +2673,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return -ENOMEM; return -ENOMEM;
/* Search qp buf's mtts */ /* Search qp buf's mtts */
mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table, mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
hr_qp->mtt.first_seg, &dma_handle); hr_qp->mtt.first_seg, &dma_handle);
if (mtts == NULL) { if (mtts == NULL) {
dev_err(dev, "qp buf pa find failed\n"); dev_err(dev, "qp buf pa find failed\n");
...@@ -2603,8 +2681,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -2603,8 +2681,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
} }
/* Search IRRL's mtts */ /* Search IRRL's mtts */
mtts_2 = hns_roce_table_find(&hr_dev->qp_table.irrl_table, hr_qp->qpn, mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
&dma_handle_2); hr_qp->qpn, &dma_handle_2);
if (mtts_2 == NULL) { if (mtts_2 == NULL) {
dev_err(dev, "qp irrl_table find failed\n"); dev_err(dev, "qp irrl_table find failed\n");
goto out; goto out;
...@@ -3143,7 +3221,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -3143,7 +3221,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
if (ibqp->uobject) { if (ibqp->uobject) {
hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_qp->rq.db_reg_l = hr_dev->reg_base +
ROCEE_DB_OTHERS_L_0_REG + hr_dev->odb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index; DB_REG_OFFSET * hr_dev->priv_uar.index;
} }
...@@ -3664,7 +3742,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) ...@@ -3664,7 +3742,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
qp_work_entry = container_of(work, struct hns_roce_qp_work, work); qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
hr_dev = to_hr_dev(qp_work_entry->ib_dev); hr_dev = to_hr_dev(qp_work_entry->ib_dev);
dev = &hr_dev->pdev->dev; dev = &hr_dev->pdev->dev;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
hr_qp = qp_work_entry->qp; hr_qp = qp_work_entry->qp;
qpn = hr_qp->qpn; qpn = hr_qp->qpn;
...@@ -3781,7 +3859,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) ...@@ -3781,7 +3859,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt; qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt;
qp_work->sche_cnt = qp_work_entry.sche_cnt; qp_work->sche_cnt = qp_work_entry.sche_cnt;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
queue_work(priv->des_qp.qp_wq, &qp_work->work); queue_work(priv->des_qp.qp_wq, &qp_work->work);
dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn); dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
} }
...@@ -3841,13 +3919,13 @@ int hns_roce_v1_destroy_cq(struct ib_cq *ibcq) ...@@ -3841,13 +3919,13 @@ int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
return ret; return ret;
} }
struct hns_roce_v1_priv hr_v1_priv; static const struct hns_roce_hw hns_roce_hw_v1 = {
struct hns_roce_hw hns_roce_hw_v1 = {
.reset = hns_roce_v1_reset, .reset = hns_roce_v1_reset,
.hw_profile = hns_roce_v1_profile, .hw_profile = hns_roce_v1_profile,
.hw_init = hns_roce_v1_init, .hw_init = hns_roce_v1_init,
.hw_exit = hns_roce_v1_exit, .hw_exit = hns_roce_v1_exit,
.post_mbox = hns_roce_v1_post_mbox,
.chk_mbox = hns_roce_v1_chk_mbox,
.set_gid = hns_roce_v1_set_gid, .set_gid = hns_roce_v1_set_gid,
.set_mac = hns_roce_v1_set_mac, .set_mac = hns_roce_v1_set_mac,
.set_mtu = hns_roce_v1_set_mtu, .set_mtu = hns_roce_v1_set_mtu,
...@@ -3863,5 +3941,258 @@ struct hns_roce_hw hns_roce_hw_v1 = { ...@@ -3863,5 +3941,258 @@ struct hns_roce_hw hns_roce_hw_v1 = {
.poll_cq = hns_roce_v1_poll_cq, .poll_cq = hns_roce_v1_poll_cq,
.dereg_mr = hns_roce_v1_dereg_mr, .dereg_mr = hns_roce_v1_dereg_mr,
.destroy_cq = hns_roce_v1_destroy_cq, .destroy_cq = hns_roce_v1_destroy_cq,
.priv = &hr_v1_priv,
}; };
static const struct of_device_id hns_roce_of_match[] = {
{ .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
{},
};
MODULE_DEVICE_TABLE(of, hns_roce_of_match);
static const struct acpi_device_id hns_roce_acpi_match[] = {
{ "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
{},
};
MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
static int hns_roce_node_match(struct device *dev, void *fwnode)
{
return dev->fwnode == fwnode;
}
static struct
platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
{
struct device *dev;
/* get the 'device' corresponding to the matching 'fwnode' */
dev = bus_find_device(&platform_bus_type, NULL,
fwnode, hns_roce_node_match);
/* get the platform device */
return dev ? to_platform_device(dev) : NULL;
}
static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
{
struct device *dev = &hr_dev->pdev->dev;
struct platform_device *pdev = NULL;
struct net_device *netdev = NULL;
struct device_node *net_node;
struct resource *res;
int port_cnt = 0;
u8 phy_port;
int ret;
int i;
/* check if we are compatible with the underlying SoC */
if (dev_of_node(dev)) {
const struct of_device_id *of_id;
of_id = of_match_node(hns_roce_of_match, dev->of_node);
if (!of_id) {
dev_err(dev, "device is not compatible!\n");
return -ENXIO;
}
hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
if (!hr_dev->hw) {
dev_err(dev, "couldn't get H/W specific DT data!\n");
return -ENXIO;
}
} else if (is_acpi_device_node(dev->fwnode)) {
const struct acpi_device_id *acpi_id;
acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
if (!acpi_id) {
dev_err(dev, "device is not compatible!\n");
return -ENXIO;
}
hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
if (!hr_dev->hw) {
dev_err(dev, "couldn't get H/W specific ACPI data!\n");
return -ENXIO;
}
} else {
dev_err(dev, "can't read compatibility data from DT or ACPI\n");
return -ENXIO;
}
/* get the mapped register base address */
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "memory resource not found!\n");
return -EINVAL;
}
hr_dev->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base);
/* read the node_guid of IB device from the DT or ACPI */
ret = device_property_read_u8_array(dev, "node-guid",
(u8 *)&hr_dev->ib_dev.node_guid,
GUID_LEN);
if (ret) {
dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
return ret;
}
/* get the RoCE associated ethernet ports or netdevices */
for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
if (dev_of_node(dev)) {
net_node = of_parse_phandle(dev->of_node, "eth-handle",
i);
if (!net_node)
continue;
pdev = of_find_device_by_node(net_node);
} else if (is_acpi_device_node(dev->fwnode)) {
struct acpi_reference_args args;
struct fwnode_handle *fwnode;
ret = acpi_node_get_property_reference(dev->fwnode,
"eth-handle",
i, &args);
if (ret)
continue;
fwnode = acpi_fwnode_handle(args.adev);
pdev = hns_roce_find_pdev(fwnode);
} else {
dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO;
}
if (pdev) {
netdev = platform_get_drvdata(pdev);
phy_port = (u8)i;
if (netdev) {
hr_dev->iboe.netdevs[port_cnt] = netdev;
hr_dev->iboe.phy_port[port_cnt] = phy_port;
} else {
dev_err(dev, "no netdev found with pdev %s\n",
pdev->name);
return -ENODEV;
}
port_cnt++;
}
}
if (port_cnt == 0) {
dev_err(dev, "unable to get eth-handle for available ports!\n");
return -EINVAL;
}
hr_dev->caps.num_ports = port_cnt;
/* cmd issue mode: 0 is poll, 1 is event */
hr_dev->cmd_mod = 1;
hr_dev->loop_idc = 0;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
/* read the interrupt names from the DT or ACPI */
ret = device_property_read_string_array(dev, "interrupt-names",
hr_dev->irq_names,
HNS_ROCE_MAX_IRQ_NUM);
if (ret < 0) {
dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
return ret;
}
/* fetch the interrupt numbers */
for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
if (hr_dev->irq[i] <= 0) {
dev_err(dev, "platform get of irq[=%d] failed!\n", i);
return -EINVAL;
}
}
return 0;
}
/**
* hns_roce_probe - RoCE driver entrance
* @pdev: pointer to platform device
* Return : int
*
*/
static int hns_roce_probe(struct platform_device *pdev)
{
int ret;
struct hns_roce_dev *hr_dev;
struct device *dev = &pdev->dev;
hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
if (!hr_dev)
return -ENOMEM;
hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
if (!hr_dev->priv) {
ret = -ENOMEM;
goto error_failed_kzalloc;
}
hr_dev->pdev = pdev;
hr_dev->dev = dev;
platform_set_drvdata(pdev, hr_dev);
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
dev_err(dev, "Not usable DMA addressing mode\n");
ret = -EIO;
goto error_failed_get_cfg;
}
ret = hns_roce_get_cfg(hr_dev);
if (ret) {
dev_err(dev, "Get Configuration failed!\n");
goto error_failed_get_cfg;
}
ret = hns_roce_init(hr_dev);
if (ret) {
dev_err(dev, "RoCE engine init failed!\n");
goto error_failed_get_cfg;
}
return 0;
error_failed_get_cfg:
kfree(hr_dev->priv);
error_failed_kzalloc:
ib_dealloc_device(&hr_dev->ib_dev);
return ret;
}
/**
* hns_roce_remove - remove RoCE device
* @pdev: pointer to platform device
*/
static int hns_roce_remove(struct platform_device *pdev)
{
struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
hns_roce_exit(hr_dev);
kfree(hr_dev->priv);
ib_dealloc_device(&hr_dev->ib_dev);
return 0;
}
static struct platform_driver hns_roce_driver = {
.probe = hns_roce_probe,
.remove = hns_roce_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = hns_roce_of_match,
.acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
},
};
module_platform_driver(hns_roce_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
...@@ -948,6 +948,11 @@ struct hns_roce_qp_context { ...@@ -948,6 +948,11 @@ struct hns_roce_qp_context {
#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \ #define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S) (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
#define STATUS_MASK 0xff
#define GO_BIT_TIMEOUT_MSECS 10000
#define HCR_STATUS_OFFSET 0x18
#define HCR_GO_BIT 15
struct hns_roce_rq_db { struct hns_roce_rq_db {
u32 u32_4; u32 u32_4;
u32 u32_8; u32 u32_8;
......
/*
* Copyright (c) 2016-2017 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/acpi.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <rdma/ib_umem.h>
#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
struct ib_sge *sg)
{
dseg->lkey = cpu_to_le32(sg->lkey);
dseg->addr = cpu_to_le64(sg->addr);
dseg->len = cpu_to_le32(sg->length);
}
static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
unsigned int sge_ind = 0;
unsigned int wqe_sz = 0;
unsigned long flags;
unsigned int ind;
void *wqe = NULL;
int ret = 0;
int nreq;
int i;
if (unlikely(ibqp->qp_type != IB_QPT_RC)) {
dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
*bad_wr = NULL;
return -EOPNOTSUPP;
}
if (unlikely(qp->state != IB_QPS_RTS && qp->state != IB_QPS_SQD)) {
dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
*bad_wr = wr;
return -EINVAL;
}
spin_lock_irqsave(&qp->sq.lock, flags);
ind = qp->sq_next_wqe;
sge_ind = qp->next_sge;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
ret = -ENOMEM;
*bad_wr = wr;
goto out;
}
if (unlikely(wr->num_sge > qp->sq.max_gs)) {
dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
wr->num_sge, qp->sq.max_gs);
ret = -EINVAL;
*bad_wr = wr;
goto out;
}
wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
wr->wr_id;
rc_sq_wqe = wqe;
memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
for (i = 0; i < wr->num_sge; i++)
rc_sq_wqe->msg_len += wr->sg_list[i].length;
rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
switch (wr->opcode) {
case IB_WR_RDMA_READ:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_RDMA_READ);
rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_RDMA_WRITE:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_SEND:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND);
break;
case IB_WR_SEND_WITH_INV:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
break;
case IB_WR_SEND_WITH_IMM:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
break;
case IB_WR_LOCAL_INV:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_LOCAL_INV);
break;
case IB_WR_ATOMIC_CMP_AND_SWP:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
break;
case IB_WR_ATOMIC_FETCH_AND_ADD:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
break;
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
break;
case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
break;
default:
roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_MASK);
break;
}
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S, 1);
wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
dseg = wqe;
if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
if (rc_sq_wqe->msg_len >
hr_dev->caps.max_sq_inline) {
ret = -EINVAL;
*bad_wr = wr;
dev_err(dev, "inline len(1-%d)=%d, illegal",
rc_sq_wqe->msg_len,
hr_dev->caps.max_sq_inline);
goto out;
}
for (i = 0; i < wr->num_sge; i++) {
memcpy(wqe, ((void *)wr->sg_list[i].addr),
wr->sg_list[i].length);
wqe += wr->sg_list[i].length;
wqe_sz += wr->sg_list[i].length;
}
roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
} else {
if (wr->num_sge <= 2) {
for (i = 0; i < wr->num_sge; i++)
set_data_seg_v2(dseg + i,
wr->sg_list + i);
} else {
roce_set_field(rc_sq_wqe->byte_20,
V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
sge_ind & (qp->sge.sge_cnt - 1));
for (i = 0; i < 2; i++)
set_data_seg_v2(dseg + i,
wr->sg_list + i);
dseg = get_send_extend_sge(qp,
sge_ind & (qp->sge.sge_cnt - 1));
for (i = 0; i < wr->num_sge - 2; i++) {
set_data_seg_v2(dseg + i,
wr->sg_list + 2 + i);
sge_ind++;
}
}
roce_set_field(rc_sq_wqe->byte_16,
V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
wr->num_sge);
wqe_sz += wr->num_sge *
sizeof(struct hns_roce_v2_wqe_data_seg);
}
ind++;
}
out:
if (likely(nreq)) {
qp->sq.head += nreq;
/* Memory barrier */
wmb();
sq_db.byte_4 = 0;
sq_db.parameter = 0;
roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
V2_DB_PARAMETER_CONS_IDX_S,
qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
V2_DB_PARAMETER_SL_S, qp->sl);
hns_roce_write64_k((__be32 *)&sq_db, qp->sq.db_reg_l);
qp->sq_next_wqe = ind;
qp->next_sge = sge_ind;
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
return ret;
}
static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db rq_db;
unsigned long flags;
void *wqe = NULL;
int ret = 0;
int nreq;
int ind;
int i;
spin_lock_irqsave(&hr_qp->rq.lock, flags);
ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
if (hr_qp->state == IB_QPS_RESET || hr_qp->state == IB_QPS_ERR) {
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
*bad_wr = wr;
return -EINVAL;
}
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
hr_qp->ibqp.recv_cq)) {
ret = -ENOMEM;
*bad_wr = wr;
goto out;
}
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL;
*bad_wr = wr;
goto out;
}
wqe = get_recv_wqe(hr_qp, ind);
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
for (i = 0; i < wr->num_sge; i++) {
if (!wr->sg_list[i].length)
continue;
set_data_seg_v2(dseg, wr->sg_list + i);
dseg++;
}
if (i < hr_qp->rq.max_gs) {
dseg[i].lkey = cpu_to_be32(HNS_ROCE_INVALID_LKEY);
dseg[i].addr = 0;
}
hr_qp->rq.wrid[ind] = wr->wr_id;
ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
}
out:
if (likely(nreq)) {
hr_qp->rq.head += nreq;
/* Memory barrier */
wmb();
rq_db.byte_4 = 0;
rq_db.parameter = 0;
roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_TAG_M,
V2_DB_BYTE_4_TAG_S, hr_qp->qpn);
roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_CMD_M,
V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_RQ_DB);
roce_set_field(rq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
V2_DB_PARAMETER_CONS_IDX_S, hr_qp->rq.head);
hns_roce_write64_k((__be32 *)&rq_db, hr_qp->rq.db_reg_l);
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
return ret;
}
static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
{
int ntu = ring->next_to_use;
int ntc = ring->next_to_clean;
int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
return ring->desc_num - used - 1;
}
static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
ring->desc = kzalloc(size, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
ring->desc_dma_addr = 0;
kfree(ring->desc);
ring->desc = NULL;
return -ENOMEM;
}
return 0;
}
static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_cmq_ring *ring)
{
dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
ring->desc_num * sizeof(struct hns_roce_cmq_desc),
DMA_BIDIRECTIONAL);
kfree(ring->desc);
}
static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
&priv->cmq.csq : &priv->cmq.crq;
ring->flag = ring_type;
ring->next_to_clean = 0;
ring->next_to_use = 0;
return hns_roce_alloc_cmq_desc(hr_dev, ring);
}
static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
&priv->cmq.csq : &priv->cmq.crq;
dma_addr_t dma = ring->desc_dma_addr;
if (ring_type == TYPE_CSQ) {
roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
upper_32_bits(dma));
roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
(ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
HNS_ROCE_CMQ_ENABLE);
roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
} else {
roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
upper_32_bits(dma));
roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
(ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
HNS_ROCE_CMQ_ENABLE);
roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
}
}
static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
int ret;
/* Setup the queue entries for command queue */
priv->cmq.csq.desc_num = 1024;
priv->cmq.crq.desc_num = 1024;
/* Setup the lock for command queue */
spin_lock_init(&priv->cmq.csq.lock);
spin_lock_init(&priv->cmq.crq.lock);
/* Setup Tx write back timeout */
priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
/* Init CSQ */
ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
if (ret) {
dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
return ret;
}
/* Init CRQ */
ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
if (ret) {
dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
goto err_crq;
}
/* Init CSQ REG */
hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
/* Init CRQ REG */
hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
return 0;
err_crq:
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
return ret;
}
static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
}
void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
enum hns_roce_opcode_type opcode,
bool is_read)
{
memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
desc->opcode = cpu_to_le16(opcode);
desc->flag =
cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
if (is_read)
desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
else
desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
}
static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
return head == priv->cmq.csq.next_to_use;
}
static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
struct hns_roce_cmq_desc *desc;
u16 ntc = csq->next_to_clean;
u32 head;
int clean = 0;
desc = &csq->desc[ntc];
head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
while (head != ntc) {
memset(desc, 0, sizeof(*desc));
ntc++;
if (ntc == csq->desc_num)
ntc = 0;
desc = &csq->desc[ntc];
clean++;
}
csq->next_to_clean = ntc;
return clean;
}
int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
struct hns_roce_cmq_desc *desc_to_use;
bool complete = false;
u32 timeout = 0;
int handle = 0;
u16 desc_ret;
int ret = 0;
int ntc;
spin_lock_bh(&csq->lock);
if (num > hns_roce_cmq_space(csq)) {
spin_unlock_bh(&csq->lock);
return -EBUSY;
}
/*
* Record the location of desc in the cmq for this time
* which will be use for hardware to write back
*/
ntc = csq->next_to_use;
while (handle < num) {
desc_to_use = &csq->desc[csq->next_to_use];
*desc_to_use = desc[handle];
dev_dbg(hr_dev->dev, "set cmq desc:\n");
csq->next_to_use++;
if (csq->next_to_use == csq->desc_num)
csq->next_to_use = 0;
handle++;
}
/* Write to hardware */
roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
/*
* If the command is sync, wait for the firmware to write back,
* if multi descriptors to be sent, use the first one to check
*/
if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
do {
if (hns_roce_cmq_csq_done(hr_dev))
break;
usleep_range(1000, 2000);
timeout++;
} while (timeout < priv->cmq.tx_timeout);
}
if (hns_roce_cmq_csq_done(hr_dev)) {
complete = true;
handle = 0;
while (handle < num) {
/* get the result of hardware write back */
desc_to_use = &csq->desc[ntc];
desc[handle] = *desc_to_use;
dev_dbg(hr_dev->dev, "Get cmq desc:\n");
desc_ret = desc[handle].retval;
if (desc_ret == CMD_EXEC_SUCCESS)
ret = 0;
else
ret = -EIO;
priv->cmq.last_status = desc_ret;
ntc++;
handle++;
if (ntc == csq->desc_num)
ntc = 0;
}
}
if (!complete)
ret = -EAGAIN;
/* clean the command send queue */
handle = hns_roce_cmq_csq_clean(hr_dev);
if (handle != num)
dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
handle, num);
spin_unlock_bh(&csq->lock);
return ret;
}
int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
{
struct hns_roce_query_version *resp;
struct hns_roce_cmq_desc desc;
int ret;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
resp = (struct hns_roce_query_version *)desc.data;
hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
return 0;
}
static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cfg_global_param *req;
struct hns_roce_cmq_desc desc;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
false);
req = (struct hns_roce_cfg_global_param *)desc.data;
memset(req, 0, sizeof(*req));
roce_set_field(req->time_cfg_udp_port,
CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
roce_set_field(req->time_cfg_udp_port,
CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc[2];
struct hns_roce_pf_res *res;
int ret;
int i;
for (i = 0; i < 2; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i],
HNS_ROCE_OPC_QUERY_PF_RES, true);
if (i == 0)
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
}
ret = hns_roce_cmq_send(hr_dev, desc, 2);
if (ret)
return ret;
res = (struct hns_roce_pf_res *)desc[0].data;
hr_dev->caps.qpc_bt_num = roce_get_field(res->qpc_bt_idx_num,
PF_RES_DATA_1_PF_QPC_BT_NUM_M,
PF_RES_DATA_1_PF_QPC_BT_NUM_S);
hr_dev->caps.srqc_bt_num = roce_get_field(res->srqc_bt_idx_num,
PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
hr_dev->caps.cqc_bt_num = roce_get_field(res->cqc_bt_idx_num,
PF_RES_DATA_3_PF_CQC_BT_NUM_M,
PF_RES_DATA_3_PF_CQC_BT_NUM_S);
hr_dev->caps.mpt_bt_num = roce_get_field(res->mpt_bt_idx_num,
PF_RES_DATA_4_PF_MPT_BT_NUM_M,
PF_RES_DATA_4_PF_MPT_BT_NUM_S);
return 0;
}
static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc[2];
struct hns_roce_vf_res_a *req_a;
struct hns_roce_vf_res_b *req_b;
int i;
req_a = (struct hns_roce_vf_res_a *)desc[0].data;
req_b = (struct hns_roce_vf_res_b *)desc[1].data;
memset(req_a, 0, sizeof(*req_a));
memset(req_b, 0, sizeof(*req_b));
for (i = 0; i < 2; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i],
HNS_ROCE_OPC_ALLOC_VF_RES, false);
if (i == 0)
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
if (i == 0) {
roce_set_field(req_a->vf_qpc_bt_idx_num,
VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
roce_set_field(req_a->vf_qpc_bt_idx_num,
VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
HNS_ROCE_VF_QPC_BT_NUM);
roce_set_field(req_a->vf_srqc_bt_idx_num,
VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
roce_set_field(req_a->vf_srqc_bt_idx_num,
VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
HNS_ROCE_VF_SRQC_BT_NUM);
roce_set_field(req_a->vf_cqc_bt_idx_num,
VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
roce_set_field(req_a->vf_cqc_bt_idx_num,
VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
HNS_ROCE_VF_CQC_BT_NUM);
roce_set_field(req_a->vf_mpt_bt_idx_num,
VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
roce_set_field(req_a->vf_mpt_bt_idx_num,
VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
HNS_ROCE_VF_MPT_BT_NUM);
roce_set_field(req_a->vf_eqc_bt_idx_num,
VF_RES_A_DATA_5_VF_EQC_IDX_M,
VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
roce_set_field(req_a->vf_eqc_bt_idx_num,
VF_RES_A_DATA_5_VF_EQC_NUM_M,
VF_RES_A_DATA_5_VF_EQC_NUM_S,
HNS_ROCE_VF_EQC_NUM);
} else {
roce_set_field(req_b->vf_smac_idx_num,
VF_RES_B_DATA_1_VF_SMAC_IDX_M,
VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
roce_set_field(req_b->vf_smac_idx_num,
VF_RES_B_DATA_1_VF_SMAC_NUM_M,
VF_RES_B_DATA_1_VF_SMAC_NUM_S,
HNS_ROCE_VF_SMAC_NUM);
roce_set_field(req_b->vf_sgid_idx_num,
VF_RES_B_DATA_2_VF_SGID_IDX_M,
VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
roce_set_field(req_b->vf_sgid_idx_num,
VF_RES_B_DATA_2_VF_SGID_NUM_M,
VF_RES_B_DATA_2_VF_SGID_NUM_S,
HNS_ROCE_VF_SGID_NUM);
roce_set_field(req_b->vf_qid_idx_sl_num,
VF_RES_B_DATA_3_VF_QID_IDX_M,
VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
roce_set_field(req_b->vf_qid_idx_sl_num,
VF_RES_B_DATA_3_VF_SL_NUM_M,
VF_RES_B_DATA_3_VF_SL_NUM_S,
HNS_ROCE_VF_SL_NUM);
}
}
return hns_roce_cmq_send(hr_dev, desc, 2);
}
static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
{
u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
struct hns_roce_cfg_bt_attr *req;
struct hns_roce_cmq_desc desc;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
req = (struct hns_roce_cfg_bt_attr *)desc.data;
memset(req, 0, sizeof(*req));
roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
hr_dev->caps.qpc_ba_pg_sz);
roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
hr_dev->caps.qpc_buf_pg_sz);
roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
hr_dev->caps.srqc_ba_pg_sz);
roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
hr_dev->caps.srqc_buf_pg_sz);
roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
hr_dev->caps.cqc_ba_pg_sz);
roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
hr_dev->caps.cqc_buf_pg_sz);
roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
hr_dev->caps.mpt_ba_pg_sz);
roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
hr_dev->caps.mpt_buf_pg_sz);
roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
{
struct hns_roce_caps *caps = &hr_dev->caps;
int ret;
ret = hns_roce_cmq_query_hw_info(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
ret);
return ret;
}
ret = hns_roce_config_global_param(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
ret);
}
/* Get pf resource owned by every pf */
ret = hns_roce_query_pf_resource(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
ret);
return ret;
}
ret = hns_roce_alloc_vf_resource(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
ret);
return ret;
}
hr_dev->vendor_part_id = 0;
hr_dev->sys_image_guid = 0;
caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
caps->num_aeq_vectors = 1;
caps->num_comp_vectors = 63;
caps->num_other_vectors = 0;
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
caps->reserved_lkey = 0;
caps->reserved_pds = 0;
caps->reserved_mrws = 1;
caps->reserved_uars = 0;
caps->reserved_cqs = 0;
caps->qpc_ba_pg_sz = 0;
caps->qpc_buf_pg_sz = 0;
caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
caps->srqc_ba_pg_sz = 0;
caps->srqc_buf_pg_sz = 0;
caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_ba_pg_sz = 0;
caps->cqc_buf_pg_sz = 0;
caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
caps->mpt_ba_pg_sz = 0;
caps->mpt_buf_pg_sz = 0;
caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
caps->pbl_ba_pg_sz = 0;
caps->pbl_buf_pg_sz = 0;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->mtt_ba_pg_sz = 0;
caps->mtt_buf_pg_sz = 0;
caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
caps->cqe_ba_pg_sz = 0;
caps->cqe_buf_pg_sz = 0;
caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = 2;
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
ret = hns_roce_v2_set_bt(hr_dev);
if (ret)
dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
ret);
return ret;
}
static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
{
u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
}
static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
{
u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
return status & HNS_ROCE_HW_MB_STATUS_MASK;
}
static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u8 op_modifier,
u16 op, u16 token, int event)
{
struct device *dev = hr_dev->dev;
u32 *hcr = (u32 *)(hr_dev->reg_base + ROCEE_VF_MB_CFG0_REG);
unsigned long end;
u32 val0 = 0;
u32 val1 = 0;
end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
while (hns_roce_v2_cmd_pending(hr_dev)) {
if (time_after(jiffies, end)) {
dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
(int)end);
return -EAGAIN;
}
cond_resched();
}
roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK,
HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier);
roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK,
HNS_ROCE_VF_MB4_CMD_SHIFT, op);
roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK,
HNS_ROCE_VF_MB5_EVENT_SHIFT, event);
roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
__raw_writeq(cpu_to_le64(in_param), hcr + 0);
__raw_writeq(cpu_to_le64(out_param), hcr + 2);
/* Memory barrier */
wmb();
__raw_writel(cpu_to_le32(val0), hcr + 4);
__raw_writel(cpu_to_le32(val1), hcr + 5);
mmiowb();
return 0;
}
static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
unsigned long timeout)
{
struct device *dev = hr_dev->dev;
unsigned long end = 0;
u32 status;
end = msecs_to_jiffies(timeout) + jiffies;
while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
cond_resched();
if (hns_roce_v2_cmd_pending(hr_dev)) {
dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
return -ETIMEDOUT;
}
status = hns_roce_v2_cmd_complete(hr_dev);
if (status != 0x1) {
dev_err(dev, "mailbox status 0x%x!\n", status);
return -EBUSY;
}
return 0;
}
static void hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
int gid_index, union ib_gid *gid)
{
u32 *p;
u32 val;
p = (u32 *)&gid->raw[0];
roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG0_REG +
0x20 * gid_index);
p = (u32 *)&gid->raw[4];
roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG1_REG +
0x20 * gid_index);
p = (u32 *)&gid->raw[8];
roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG2_REG +
0x20 * gid_index);
p = (u32 *)&gid->raw[0xc];
roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG3_REG +
0x20 * gid_index);
val = roce_read(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index);
roce_set_field(val, ROCEE_VF_SGID_CFG4_SGID_TYPE_M,
ROCEE_VF_SGID_CFG4_SGID_TYPE_S, 0);
roce_write(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index, val);
}
static void hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
u8 *addr)
{
u16 reg_smac_h;
u32 reg_smac_l;
u32 val;
reg_smac_l = *(u32 *)(&addr[0]);
roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_VF_SMAC_CFG0_REG +
0x08 * phy_port);
val = roce_read(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port);
reg_smac_h = *(u16 *)(&addr[4]);
roce_set_field(val, ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M,
ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S, reg_smac_h);
roce_write(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port, val);
}
static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
struct scatterlist *sg;
u64 *pages;
int entry;
int i;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, mr->pbl_ba_pg_sz);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
(mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
(mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
(mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
(mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
mr->type == MR_TYPE_MR ? 0 : 1);
mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
mpt_entry->lkey = cpu_to_le32(mr->key);
mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
if (mr->type == MR_TYPE_DMA)
return 0;
mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
V2_MPT_BYTE_48_PBL_BA_H_S,
upper_32_bits(mr->pbl_ba >> 3));
mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
pages = (u64 *)__get_free_page(GFP_KERNEL);
if (!pages)
return -ENOMEM;
i = 0;
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
pages[i] = ((u64)sg_dma_address(sg)) >> 6;
/* Record the first 2 entry directly to MTPT table */
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
break;
i++;
}
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
V2_MPT_BYTE_56_PA0_H_S,
upper_32_bits(pages[0]));
mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
free_page((unsigned long)pages);
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, mr->pbl_buf_pg_sz);
mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
return 0;
}
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
!!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
}
static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
{
return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
}
static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
struct hns_roce_v2_cq_db cq_db;
cq_db.byte_4 = 0;
cq_db.parameter = 0;
roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_TAG_M,
V2_CQ_DB_BYTE_4_TAG_S, hr_cq->cqn);
roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_CMD_M,
V2_CQ_DB_BYTE_4_CMD_S, HNS_ROCE_V2_CQ_DB_PTR);
roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CONS_IDX_M,
V2_CQ_DB_PARAMETER_CONS_IDX_S,
cons_index & ((hr_cq->cq_depth << 1) - 1));
roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CMD_SN_M,
V2_CQ_DB_PARAMETER_CMD_SN_S, 1);
hns_roce_write64_k((__be32 *)&cq_db, hr_cq->cq_db_l);
}
static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
struct hns_roce_srq *srq)
{
struct hns_roce_v2_cqe *cqe, *dest;
u32 prod_index;
int nfreed = 0;
u8 owner_bit;
for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
++prod_index) {
if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
break;
}
/*
* Now backwards through the CQ, removing CQ entries
* that match our QP by overwriting them with next entries.
*/
while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
V2_CQE_BYTE_16_LCL_QPN_S) &
HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
/* In v1 engine, not support SRQ */
++nfreed;
} else if (nfreed) {
dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
hr_cq->ib_cq.cqe);
owner_bit = roce_get_bit(dest->byte_4,
V2_CQE_BYTE_4_OWNER_S);
memcpy(dest, cqe, sizeof(*cqe));
roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
owner_bit);
}
}
if (nfreed) {
hr_cq->cons_index += nfreed;
/*
* Make sure update of buffer contents is done before
* updating consumer index.
*/
wmb();
hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
}
}
static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
struct hns_roce_srq *srq)
{
spin_lock_irq(&hr_cq->lock);
__hns_roce_v2_cq_clean(hr_cq, qpn, srq);
spin_unlock_irq(&hr_cq->lock);
}
static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
u64 *mtts, dma_addr_t dma_handle, int nent,
u32 vector)
{
struct hns_roce_v2_cq_context *cq_context;
cq_context = mb_buf;
memset(cq_context, 0, sizeof(*cq_context));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
V2_CQC_BYTE_4_CEQN_S, vector);
cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
cq_context->cqe_cur_blk_addr =
cpu_to_le32(cq_context->cqe_cur_blk_addr);
roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_HOP_NUM_M,
V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
hr_dev->caps.cqe_ba_pg_sz);
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
hr_dev->caps.cqe_buf_pg_sz);
cq_context->cqe_ba = (u32)(dma_handle >> 3);
roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
}
static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
enum ib_cq_notify_flags flags)
{
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
u32 notification_flag;
u32 doorbell[2];
doorbell[0] = 0;
doorbell[1] = 0;
notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
/*
* flags = 0; Notification Flag = 1, next
* flags = 1; Notification Flag = 0, solocited
*/
roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
hr_cq->cqn);
roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
HNS_ROCE_V2_CQ_DB_NTR);
roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
V2_CQ_DB_PARAMETER_CONS_IDX_S,
hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
V2_CQ_DB_PARAMETER_CMD_SN_S, 1);
roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
notification_flag);
hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
return 0;
}
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
struct hns_roce_dev *hr_dev;
struct hns_roce_v2_cqe *cqe;
struct hns_roce_qp *hr_qp;
struct hns_roce_wq *wq;
int is_send;
u16 wqe_ctr;
u32 opcode;
u32 status;
int qpn;
/* Find cqe according to consumer index */
cqe = next_cqe_sw_v2(hr_cq);
if (!cqe)
return -EAGAIN;
++hr_cq->cons_index;
/* Memory barrier */
rmb();
/* 0->SQ, 1->RQ */
is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
V2_CQE_BYTE_16_LCL_QPN_S);
if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
hr_dev = to_hr_dev(hr_cq->ib_cq.device);
hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (unlikely(!hr_qp)) {
dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
return -EINVAL;
}
*cur_qp = hr_qp;
}
wc->qp = &(*cur_qp)->ibqp;
wc->vendor_err = 0;
status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
V2_CQE_BYTE_4_STATUS_S);
switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
case HNS_ROCE_CQE_V2_SUCCESS:
wc->status = IB_WC_SUCCESS;
break;
case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
wc->status = IB_WC_LOC_LEN_ERR;
break;
case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
wc->status = IB_WC_LOC_QP_OP_ERR;
break;
case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
wc->status = IB_WC_LOC_PROT_ERR;
break;
case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
wc->status = IB_WC_WR_FLUSH_ERR;
break;
case HNS_ROCE_CQE_V2_MW_BIND_ERR:
wc->status = IB_WC_MW_BIND_ERR;
break;
case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
wc->status = IB_WC_BAD_RESP_ERR;
break;
case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
wc->status = IB_WC_LOC_ACCESS_ERR;
break;
case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
wc->status = IB_WC_REM_INV_REQ_ERR;
break;
case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
wc->status = IB_WC_REM_ACCESS_ERR;
break;
case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
wc->status = IB_WC_REM_OP_ERR;
break;
case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
wc->status = IB_WC_RETRY_EXC_ERR;
break;
case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
wc->status = IB_WC_RNR_RETRY_EXC_ERR;
break;
case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
wc->status = IB_WC_REM_ABORT_ERR;
break;
default:
wc->status = IB_WC_GENERAL_ERR;
break;
}
/* CQE status error, directly return */
if (wc->status != IB_WC_SUCCESS)
return 0;
if (is_send) {
wc->wc_flags = 0;
/* SQ corresponding to CQE */
switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
case HNS_ROCE_SQ_OPCODE_SEND:
wc->opcode = IB_WC_SEND;
break;
case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
wc->opcode = IB_WC_SEND;
break;
case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
wc->opcode = IB_WC_SEND;
wc->wc_flags |= IB_WC_WITH_IMM;
break;
case HNS_ROCE_SQ_OPCODE_RDMA_READ:
wc->opcode = IB_WC_RDMA_READ;
wc->byte_len = le32_to_cpu(cqe->byte_cnt);
break;
case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
wc->opcode = IB_WC_RDMA_WRITE;
wc->wc_flags |= IB_WC_WITH_IMM;
break;
case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
wc->opcode = IB_WC_LOCAL_INV;
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
break;
case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
wc->opcode = IB_WC_COMP_SWAP;
wc->byte_len = 8;
break;
case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
wc->opcode = IB_WC_FETCH_ADD;
wc->byte_len = 8;
break;
case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
wc->opcode = IB_WC_MASKED_COMP_SWAP;
wc->byte_len = 8;
break;
case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
wc->opcode = IB_WC_MASKED_FETCH_ADD;
wc->byte_len = 8;
break;
case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
wc->opcode = IB_WC_REG_MR;
break;
case HNS_ROCE_SQ_OPCODE_BIND_MW:
wc->opcode = IB_WC_REG_MR;
break;
default:
wc->status = IB_WC_GENERAL_ERR;
break;
}
wq = &(*cur_qp)->sq;
if ((*cur_qp)->sq_signal_bits) {
/*
* If sg_signal_bit is 1,
* firstly tail pointer updated to wqe
* which current cqe correspond to
*/
wqe_ctr = (u16)roce_get_field(cqe->byte_4,
V2_CQE_BYTE_4_WQE_INDX_M,
V2_CQE_BYTE_4_WQE_INDX_S);
wq->tail += (wqe_ctr - (u16)wq->tail) &
(wq->wqe_cnt - 1);
}
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
} else {
/* RQ correspond to CQE */
wc->byte_len = le32_to_cpu(cqe->byte_cnt);
opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
V2_CQE_BYTE_4_OPCODE_S);
switch (opcode & 0x1f) {
case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
break;
case HNS_ROCE_V2_OPCODE_SEND:
wc->opcode = IB_WC_RECV;
wc->wc_flags = 0;
break;
case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
break;
case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
wc->ex.invalidate_rkey = cqe->rkey_immtdata;
break;
default:
wc->status = IB_WC_GENERAL_ERR;
break;
}
/* Update tail pointer, record wr_id */
wq = &(*cur_qp)->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
V2_CQE_BYTE_32_SL_S);
wc->src_qp = (u8)roce_get_field(cqe->byte_32,
V2_CQE_BYTE_32_RMT_QPN_M,
V2_CQE_BYTE_32_RMT_QPN_S);
wc->wc_flags |= (roce_get_bit(cqe->byte_32,
V2_CQE_BYTE_32_GRH_S) ?
IB_WC_GRH : 0);
}
return 0;
}
static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
struct ib_wc *wc)
{
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
struct hns_roce_qp *cur_qp = NULL;
unsigned long flags;
int npolled;
spin_lock_irqsave(&hr_cq->lock, flags);
for (npolled = 0; npolled < num_entries; ++npolled) {
if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
break;
}
if (npolled) {
/* Memory barrier */
wmb();
hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
}
spin_unlock_irqrestore(&hr_cq->lock, flags);
return npolled;
}
static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
int step_idx)
{
struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_hem_iter iter;
struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
unsigned long mhop_obj = obj;
int i, j, k;
int ret = 0;
u64 hem_idx = 0;
u64 l1_idx = 0;
u64 bt_ba = 0;
u32 chunk_ba_num;
u32 hop_num;
u16 op = 0xff;
if (!hns_roce_check_whether_mhop(hr_dev, table->type))
return 0;
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
i = mhop.l0_idx;
j = mhop.l1_idx;
k = mhop.l2_idx;
hop_num = mhop.hop_num;
chunk_ba_num = mhop.bt_chunk_size / 8;
if (hop_num == 2) {
hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
k;
l1_idx = i * chunk_ba_num + j;
} else if (hop_num == 1) {
hem_idx = i * chunk_ba_num + j;
} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
hem_idx = i;
}
switch (table->type) {
case HEM_TYPE_QPC:
op = HNS_ROCE_CMD_WRITE_QPC_BT0;
break;
case HEM_TYPE_MTPT:
op = HNS_ROCE_CMD_WRITE_MPT_BT0;
break;
case HEM_TYPE_CQC:
op = HNS_ROCE_CMD_WRITE_CQC_BT0;
break;
case HEM_TYPE_SRQC:
op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
break;
default:
dev_warn(dev, "Table %d not to be written by mailbox!\n",
table->type);
return 0;
}
op += step_idx;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
if (check_whether_last_step(hop_num, step_idx)) {
hem = table->hem[hem_idx];
for (hns_roce_hem_first(hem, &iter);
!hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
bt_ba = hns_roce_hem_addr(&iter);
/* configure the ba, tag, and op */
ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
obj, 0, op,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
} else {
if (step_idx == 0)
bt_ba = table->bt_l0_dma_addr[i];
else if (step_idx == 1 && hop_num == 2)
bt_ba = table->bt_l1_dma_addr[l1_idx];
/* configure the ba, tag, and op */
ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
}
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
int step_idx)
{
struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
int ret = 0;
u16 op = 0xff;
if (!hns_roce_check_whether_mhop(hr_dev, table->type))
return 0;
switch (table->type) {
case HEM_TYPE_QPC:
op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
break;
case HEM_TYPE_MTPT:
op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
break;
case HEM_TYPE_CQC:
op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
break;
case HEM_TYPE_SRQC:
op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
break;
default:
dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
table->type);
return 0;
}
op += step_idx;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
/* configure the tag and op */
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt,
enum ib_qp_state cur_state,
enum ib_qp_state new_state,
struct hns_roce_v2_qp_context *context,
struct hns_roce_qp *hr_qp)
{
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memcpy(mailbox->buf, context, sizeof(*context) * 2);
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
HNS_ROCE_CMD_MODIFY_QPC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
/*
* In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, 0);
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
V2_QPC_BYTE_20_RQWS_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
ilog2((unsigned int)hr_qp->sq.wqe_cnt));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
ilog2((unsigned int)hr_qp->rq.wqe_cnt));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
/* No VLAN need to set 0xFFF */
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
V2_QPC_BYTE_24_VLAN_IDX_S, 0xfff);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
V2_QPC_BYTE_24_VLAN_IDX_S, 0);
/*
* Set some fields in context to zero, Because the default values
* of all fields in context are zero, we need not set them to 0 again.
* but we should set the relevant fields of context mask to 0.
*/
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M,
V2_QPC_BYTE_60_MAPID_S, 0);
roce_set_bit(qpc_mask->byte_60_qpst_mapid,
V2_QPC_BYTE_60_INNER_MAP_IND_S, 0);
roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S,
0);
roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S,
0);
roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S,
0);
roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
0);
roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
!!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
!!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
!!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, 0);
if (ibqp->srq) {
roce_set_field(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
to_hr_srq(ibqp->srq)->srqn);
roce_set_field(qpc_mask->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQ_EN_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQ_EN_S, 0);
}
roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
V2_QPC_BYTE_92_SRQ_INFO_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
roce_set_field(qpc_mask->byte_104_rq_sge,
V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
qpc_mask->rq_rnr_timer = 0;
qpc_mask->rx_msg_len = 0;
qpc_mask->rx_rkey_pkt_info = 0;
qpc_mask->rx_va = 0;
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0);
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
roce_set_field(qpc_mask->byte_144_raq,
V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
0);
roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
V2_QPC_BYTE_148_RQ_MSN_S, 0);
roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
V2_QPC_BYTE_152_RAQ_PSN_S, 0);
roce_set_field(qpc_mask->byte_152_raq,
V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
roce_set_field(qpc_mask->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
roce_set_field(qpc_mask->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
ilog2((unsigned int)hr_qp->sq.wqe_cnt));
roce_set_field(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
roce_set_bit(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
roce_set_field(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
roce_set_field(qpc_mask->byte_172_sq_psn,
V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
0);
roce_set_field(qpc_mask->byte_176_msg_pktn,
V2_QPC_BYTE_176_MSG_USE_PKTN_M,
V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
roce_set_field(qpc_mask->byte_176_msg_pktn,
V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
roce_set_field(qpc_mask->byte_184_irrl_idx,
V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
qpc_mask->cur_sge_offset = 0;
roce_set_field(qpc_mask->byte_192_ext_sge,
V2_QPC_BYTE_192_CUR_SGE_IDX_M,
V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
roce_set_field(qpc_mask->byte_192_ext_sge,
V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
roce_set_field(qpc_mask->byte_200_sq_max,
V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
V2_QPC_BYTE_212_CHECK_FLG_S, 0);
qpc_mask->sq_timer = 0;
roce_set_field(qpc_mask->byte_220_retry_psn_msn,
V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
roce_set_field(qpc_mask->byte_232_irrl_sge,
V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
qpc_mask->irrl_cur_sge_offset = 0;
roce_set_field(qpc_mask->byte_240_irrl_tail,
V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
roce_set_field(qpc_mask->byte_240_irrl_tail,
V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
roce_set_field(qpc_mask->byte_240_irrl_tail,
V2_QPC_BYTE_240_RX_ACK_MSN_M,
V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
V2_QPC_BYTE_248_IRRL_PSN_S, 0);
roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
0);
roce_set_field(qpc_mask->byte_248_ack_psn,
V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
0);
roce_set_bit(qpc_mask->byte_248_ack_psn,
V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
0);
hr_qp->access_flags = attr->qp_access_flags;
hr_qp->pkey_index = attr->pkey_index;
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, 0);
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
V2_QPC_BYTE_252_ERR_TYPE_S, 0);
roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
V2_QPC_BYTE_256_RQ_CQE_IDX_M,
V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
}
static void modify_qp_init_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
/*
* In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
if (attr_mask & IB_QP_ACCESS_FLAGS) {
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
!!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
0);
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
!!(attr->qp_access_flags &
IB_ACCESS_REMOTE_WRITE));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
0);
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
!!(attr->qp_access_flags &
IB_ACCESS_REMOTE_ATOMIC));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
0);
} else {
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
!!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
0);
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
!!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
0);
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
!!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
0);
}
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
ilog2((unsigned int)hr_qp->sq.wqe_cnt));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
ilog2((unsigned int)hr_qp->rq.wqe_cnt));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, 0);
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, 0);
if (ibqp->srq) {
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQ_EN_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQ_EN_S, 0);
roce_set_field(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
to_hr_srq(ibqp->srq)->srqn);
roce_set_field(qpc_mask->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
}
if (attr_mask & IB_QP_PKEY_INDEX)
context->qkey_xrcd = attr->pkey_index;
else
context->qkey_xrcd = hr_qp->pkey_index;
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, 0);
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, 0);
roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
ilog2((unsigned int)hr_qp->sq.wqe_cnt));
roce_set_field(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
}
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = hr_dev->dev;
dma_addr_t dma_handle_2;
dma_addr_t dma_handle;
u32 page_size;
u8 port_num;
u64 *mtts_2;
u64 *mtts;
u8 *dmac;
u8 *smac;
int port;
/* Search qp buf's mtts */
mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
hr_qp->mtt.first_seg, &dma_handle);
if (!mtts) {
dev_err(dev, "qp buf pa find failed\n");
return -EINVAL;
}
/* Search IRRL's mtts */
mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
hr_qp->qpn, &dma_handle_2);
if (!mtts_2) {
dev_err(dev, "qp irrl_table find failed\n");
return -EINVAL;
}
if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
(attr_mask & IB_QP_PKEY_INDEX) || (attr_mask & IB_QP_QKEY)) {
dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
return -EINVAL;
}
dmac = (u8 *)attr->ah_attr.roce.dmac;
context->wqe_sge_ba = (u32)(dma_handle >> 3);
qpc_mask->wqe_sge_ba = 0;
/*
* In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
V2_QPC_BYTE_12_SQ_HOP_NUM_S,
hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
0 : hr_dev->caps.mtt_hop_num);
roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S,
hr_qp->sq.max_gs > 2 ? hr_dev->caps.mtt_hop_num : 0);
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_HOP_NUM_M,
V2_QPC_BYTE_20_RQ_HOP_NUM_S,
hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
0 : hr_dev->caps.mtt_hop_num);
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_HOP_NUM_M,
V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
hr_dev->caps.mtt_ba_pg_sz);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
hr_dev->caps.mtt_buf_pg_sz);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_MIN_RNR_TIME_M,
V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_MIN_RNR_TIME_M,
V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
>> PAGE_ADDR_SHIFT);
qpc_mask->rq_cur_blk_addr = 0;
roce_set_field(context->byte_92_srq_info,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
mtts[hr_qp->rq.offset / page_size]
>> (32 + PAGE_ADDR_SHIFT));
roce_set_field(qpc_mask->byte_92_srq_info,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
>> PAGE_ADDR_SHIFT);
qpc_mask->rq_nxt_blk_addr = 0;
roce_set_field(context->byte_104_rq_sge,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
mtts[hr_qp->rq.offset / page_size + 1]
>> (32 + PAGE_ADDR_SHIFT));
roce_set_field(qpc_mask->byte_104_rq_sge,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
roce_set_field(context->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
roce_set_field(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
context->irrl_ba = (u32)dma_handle_2;
qpc_mask->irrl_ba = 0;
roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
V2_QPC_BYTE_208_IRRL_BA_S,
(dma_handle_2 >> 32) & V2_QPC_BYTE_208_IRRL_BA_M);
roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
V2_QPC_BYTE_208_IRRL_BA_S, 0);
roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
hr_qp->sq_signal_bits);
roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
0);
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
smac = (u8 *)hr_dev->dev_addr[port];
/* when dmac equals smac or loop_idc is 1, it should loopback */
if (ether_addr_equal_unaligned(dmac, smac) ||
hr_dev->loop_idc == 0x1) {
roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
}
roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
V2_QPC_BYTE_140_RR_MAX_S,
ilog2((unsigned int)attr->max_dest_rd_atomic));
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
V2_QPC_BYTE_140_RR_MAX_S, 0);
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, 0);
/* Configure GID index */
port_num = rdma_ah_get_port_num(&attr->ah_attr);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S,
hns_get_gid_index(hr_dev, port_num - 1,
grh->sgid_index));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S, 0);
memcpy(&(context->dmac), dmac, 4);
roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
qpc_mask->dmac = 0;
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, 0);
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
V2_QPC_BYTE_28_FL_S, grh->flow_label);
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
V2_QPC_BYTE_28_FL_S, 0);
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, grh->traffic_class);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
roce_set_field(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
context->rq_rnr_timer = 0;
qpc_mask->rq_rnr_timer = 0;
roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
V2_QPC_BYTE_152_RAQ_PSN_S, 0);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_LP_SGEN_INI_M,
V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
roce_set_field(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_LP_SGEN_INI_M,
V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
V2_QPC_BYTE_208_SR_MAX_S,
ilog2((unsigned int)attr->max_rd_atomic));
roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
V2_QPC_BYTE_208_SR_MAX_S, 0);
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, 0);
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
return 0;
}
static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = hr_dev->dev;
dma_addr_t dma_handle;
u64 *mtts;
/* Search qp buf's mtts */
mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
hr_qp->mtt.first_seg, &dma_handle);
if (!mtts) {
dev_err(dev, "qp buf pa find failed\n");
return -EINVAL;
}
/* If exist optional param, return error */
if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
(attr_mask & IB_QP_QKEY) || (attr_mask & IB_QP_PATH_MIG_STATE) ||
(attr_mask & IB_QP_CUR_STATE) ||
(attr_mask & IB_QP_MIN_RNR_TIMER)) {
dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
return -EINVAL;
}
/*
* In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
roce_set_field(context->byte_60_qpst_mapid,
V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
roce_set_field(qpc_mask->byte_60_qpst_mapid,
V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
mtts[0] >> (32 + PAGE_ADDR_SHIFT));
qpc_mask->sq_cur_blk_addr = 0;
roce_set_field(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
roce_set_field(context->byte_232_irrl_sge,
V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
mtts[0] >> (32 + PAGE_ADDR_SHIFT));
qpc_mask->rx_sq_cur_blk_addr = 0;
roce_set_field(qpc_mask->byte_232_irrl_sge,
V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
/*
* Set some fields in context to zero, Because the default values
* of all fields in context are zero, we need not set them to 0 again.
* but we should set the relevant fields of context mask to 0.
*/
roce_set_field(qpc_mask->byte_232_irrl_sge,
V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
roce_set_field(qpc_mask->byte_240_irrl_tail,
V2_QPC_BYTE_240_RX_ACK_MSN_M,
V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
roce_set_field(context->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RX_ACK_EPSN_M,
V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RX_ACK_EPSN_M,
V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
roce_set_field(qpc_mask->byte_248_ack_psn,
V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
roce_set_bit(qpc_mask->byte_248_ack_psn,
V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
roce_set_field(qpc_mask->byte_248_ack_psn,
V2_QPC_BYTE_248_IRRL_PSN_M,
V2_QPC_BYTE_248_IRRL_PSN_S, 0);
roce_set_field(qpc_mask->byte_240_irrl_tail,
V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
roce_set_field(context->byte_220_retry_psn_msn,
V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_220_retry_psn_msn,
V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
roce_set_field(context->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
roce_set_field(qpc_mask->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
roce_set_field(context->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
roce_set_field(qpc_mask->byte_220_retry_psn_msn,
V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
roce_set_bit(qpc_mask->byte_248_ack_psn,
V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
V2_QPC_BYTE_212_CHECK_FLG_S, 0);
roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S, 0);
roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
roce_set_field(context->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RNR_NUM_INIT_M,
V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
roce_set_field(qpc_mask->byte_244_rnr_rxack,
V2_QPC_BYTE_244_RNR_NUM_INIT_M,
V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
V2_QPC_BYTE_244_RNR_CNT_S, 0);
roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
V2_QPC_BYTE_212_LSN_S, 0x100);
roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
V2_QPC_BYTE_212_LSN_S, 0);
if (attr->timeout < 0xf)
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
V2_QPC_BYTE_28_AT_S, 0xf);
else
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
V2_QPC_BYTE_28_AT_S, attr->timeout);
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
V2_QPC_BYTE_28_AT_S, 0);
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S,
rdma_ah_get_sl(&attr->ah_attr));
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, 0);
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
return 0;
}
static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_qp_context *context;
struct hns_roce_v2_qp_context *qpc_mask;
struct device *dev = hr_dev->dev;
int ret = -EINVAL;
context = kzalloc(2 * sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
qpc_mask = context + 1;
/*
* In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
memset(qpc_mask, 0xff, sizeof(*qpc_mask));
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
} else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) ||
(cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) ||
(cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) ||
(cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) ||
(cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) {
/* Nothing */
;
} else {
dev_err(dev, "Illegal state for QP!\n");
goto out;
}
/* Every status migrate must change state */
roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, new_state);
roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, 0);
/* SW pass context to HW */
ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
context, hr_qp);
if (ret) {
dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
goto out;
}
hr_qp->state = new_state;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) {
hr_qp->port = attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
}
if (new_state == IB_QPS_RESET && !ibqp->uobject) {
hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
if (ibqp->send_cq != ibqp->recv_cq)
hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
hr_qp->qpn, NULL);
hr_qp->rq.head = 0;
hr_qp->rq.tail = 0;
hr_qp->sq.head = 0;
hr_qp->sq.tail = 0;
hr_qp->sq_next_wqe = 0;
hr_qp->next_sge = 0;
}
out:
kfree(context);
return ret;
}
static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
{
switch (state) {
case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
case HNS_ROCE_QP_ST_SQ_DRAINING:
case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
default: return -1;
}
}
static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct hns_roce_v2_qp_context *hr_context)
{
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
HNS_ROCE_CMD_QUERY_QPC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
goto out;
}
memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_qp_context *context;
struct device *dev = hr_dev->dev;
int tmp_qp_state;
int state;
int ret;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
memset(qp_attr, 0, sizeof(*qp_attr));
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
mutex_lock(&hr_qp->mutex);
if (hr_qp->state == IB_QPS_RESET) {
qp_attr->qp_state = IB_QPS_RESET;
goto done;
}
ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
if (ret) {
dev_err(dev, "query qpc error\n");
ret = -EINVAL;
goto out;
}
state = roce_get_field(context->byte_60_qpst_mapid,
V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
if (tmp_qp_state == -1) {
dev_err(dev, "Illegal ib_qp_state\n");
ret = -EINVAL;
goto out;
}
hr_qp->state = (u8)tmp_qp_state;
qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S);
qp_attr->path_mig_state = IB_MIG_ARMED;
if (hr_qp->ibqp.qp_type == IB_QPT_UD)
qp_attr->qkey = V2_QKEY_VAL;
qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
V2_QPC_BYTE_108_RX_REQ_EPSN_S);
qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
V2_QPC_BYTE_172_SQ_CUR_PSN_M,
V2_QPC_BYTE_172_SQ_CUR_PSN_S);
qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S);
qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RRE_S)) << 2) |
((roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RWE_S)) << 1) |
((roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_ATE_S)) << 3);
if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
hr_qp->ibqp.qp_type == IB_QPT_UC) {
struct ib_global_route *grh =
rdma_ah_retrieve_grh(&qp_attr->ah_attr);
rdma_ah_set_sl(&qp_attr->ah_attr,
roce_get_field(context->byte_28_at_fl,
V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S));
grh->flow_label = roce_get_field(context->byte_28_at_fl,
V2_QPC_BYTE_28_FL_M,
V2_QPC_BYTE_28_FL_S);
grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S);
grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S);
grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S);
memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
}
qp_attr->port_num = hr_qp->port + 1;
qp_attr->sq_draining = 0;
qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
V2_QPC_BYTE_208_SR_MAX_M,
V2_QPC_BYTE_208_SR_MAX_S);
qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
V2_QPC_BYTE_140_RR_MAX_M,
V2_QPC_BYTE_140_RR_MAX_S);
qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_MIN_RNR_TIME_M,
V2_QPC_BYTE_80_MIN_RNR_TIME_S);
qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
V2_QPC_BYTE_28_AT_M,
V2_QPC_BYTE_28_AT_S);
qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S);
qp_attr->rnr_retry = context->rq_rnr_timer;
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
if (!ibqp->uobject) {
qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
} else {
qp_attr->cap.max_send_wr = 0;
qp_attr->cap.max_send_sge = 0;
}
qp_init_attr->cap = qp_attr->cap;
out:
mutex_unlock(&hr_qp->mutex);
kfree(context);
return ret;
}
static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
int is_user)
{
struct hns_roce_cq *send_cq, *recv_cq;
struct device *dev = hr_dev->dev;
int ret;
if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
/* Modify qp to reset before destroying qp */
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
if (ret) {
dev_err(dev, "modify QP %06lx to ERR failed.\n",
hr_qp->qpn);
return ret;
}
}
send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
hns_roce_lock_cqs(send_cq, recv_cq);
if (!is_user) {
__hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
to_hr_srq(hr_qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
__hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
}
hns_roce_qp_remove(hr_dev, hr_qp);
hns_roce_unlock_cqs(send_cq, recv_cq);
hns_roce_qp_free(hr_dev, hr_qp);
/* Not special_QP, free their QPN */
if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
(hr_qp->ibqp.qp_type == IB_QPT_UC) ||
(hr_qp->ibqp.qp_type == IB_QPT_UD))
hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
if (is_user) {
ib_umem_release(hr_qp->umem);
} else {
kfree(hr_qp->sq.wrid);
kfree(hr_qp->rq.wrid);
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
return 0;
}
static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
int ret;
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
if (ret) {
dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
return ret;
}
if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
kfree(hr_to_hr_sqp(hr_qp));
else
kfree(hr_qp);
return 0;
}
static const struct hns_roce_hw hns_roce_hw_v2 = {
.cmq_init = hns_roce_v2_cmq_init,
.cmq_exit = hns_roce_v2_cmq_exit,
.hw_profile = hns_roce_v2_profile,
.post_mbox = hns_roce_v2_post_mbox,
.chk_mbox = hns_roce_v2_chk_mbox,
.set_gid = hns_roce_v2_set_gid,
.set_mac = hns_roce_v2_set_mac,
.write_mtpt = hns_roce_v2_write_mtpt,
.write_cqc = hns_roce_v2_write_cqc,
.set_hem = hns_roce_v2_set_hem,
.clear_hem = hns_roce_v2_clear_hem,
.modify_qp = hns_roce_v2_modify_qp,
.query_qp = hns_roce_v2_query_qp,
.destroy_qp = hns_roce_v2_destroy_qp,
.post_send = hns_roce_v2_post_send,
.post_recv = hns_roce_v2_post_recv,
.req_notify_cq = hns_roce_v2_req_notify_cq,
.poll_cq = hns_roce_v2_poll_cq,
};
static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
/* required last entry */
{0, }
};
static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
const struct pci_device_id *id;
id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
if (!id) {
dev_err(hr_dev->dev, "device is not compatible!\n");
return -ENXIO;
}
hr_dev->hw = &hns_roce_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
hr_dev->odb_offset = hr_dev->sdb_offset;
/* Get info from NIC driver. */
hr_dev->reg_base = handle->rinfo.roce_io_base;
hr_dev->caps.num_ports = 1;
hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
hr_dev->iboe.phy_port[0] = 0;
/* cmd issue mode: 0 is poll, 1 is event */
hr_dev->cmd_mod = 0;
hr_dev->loop_idc = 0;
return 0;
}
static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
{
struct hns_roce_dev *hr_dev;
int ret;
hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
if (!hr_dev)
return -ENOMEM;
hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
if (!hr_dev->priv) {
ret = -ENOMEM;
goto error_failed_kzalloc;
}
hr_dev->pci_dev = handle->pdev;
hr_dev->dev = &handle->pdev->dev;
handle->priv = hr_dev;
ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
if (ret) {
dev_err(hr_dev->dev, "Get Configuration failed!\n");
goto error_failed_get_cfg;
}
ret = hns_roce_init(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
goto error_failed_get_cfg;
}
return 0;
error_failed_get_cfg:
kfree(hr_dev->priv);
error_failed_kzalloc:
ib_dealloc_device(&hr_dev->ib_dev);
return ret;
}
static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
bool reset)
{
struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
hns_roce_exit(hr_dev);
kfree(hr_dev->priv);
ib_dealloc_device(&hr_dev->ib_dev);
}
static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
.init_instance = hns_roce_hw_v2_init_instance,
.uninit_instance = hns_roce_hw_v2_uninit_instance,
};
static struct hnae3_client hns_roce_hw_v2_client = {
.name = "hns_roce_hw_v2",
.type = HNAE3_CLIENT_ROCE,
.ops = &hns_roce_hw_v2_ops,
};
static int __init hns_roce_hw_v2_init(void)
{
return hnae3_register_client(&hns_roce_hw_v2_client);
}
static void __exit hns_roce_hw_v2_exit(void)
{
hnae3_unregister_client(&hns_roce_hw_v2_client);
}
module_init(hns_roce_hw_v2_init);
module_exit(hns_roce_hw_v2_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
/*
* Copyright (c) 2016-2017 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _HNS_ROCE_HW_V2_H
#define _HNS_ROCE_HW_V2_H
#include <linux/bitops.h>
#define HNS_ROCE_VF_QPC_BT_NUM 256
#define HNS_ROCE_VF_SRQC_BT_NUM 64
#define HNS_ROCE_VF_CQC_BT_NUM 64
#define HNS_ROCE_VF_MPT_BT_NUM 64
#define HNS_ROCE_VF_EQC_NUM 64
#define HNS_ROCE_VF_SMAC_NUM 32
#define HNS_ROCE_VF_SGID_NUM 32
#define HNS_ROCE_VF_SL_NUM 8
#define HNS_ROCE_V2_MAX_QP_NUM 0x2000
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_CQ_NUM 0x8000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x100000
#define HNS_ROCE_V2_MAX_CQE_SEGS 0x10000
#define HNS_ROCE_V2_MAX_PD_NUM 0x400000
#define HNS_ROCE_V2_MAX_QP_INIT_RDMA 128
#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
#define HNS_ROCE_V2_QPC_ENTRY_SZ 256
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
#define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100
#define HNS_ROCE_CMQ_TX_TIMEOUT 200
#define HNS_ROCE_CONTEXT_HOP_NUM 1
#define HNS_ROCE_MTT_HOP_NUM 1
#define HNS_ROCE_CQE_HOP_NUM 1
#define HNS_ROCE_PBL_HOP_NUM 2
#define HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT 0
#define HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT 1
#define HNS_ROCE_CMD_FLAG_NEXT_SHIFT 2
#define HNS_ROCE_CMD_FLAG_WR_OR_RD_SHIFT 3
#define HNS_ROCE_CMD_FLAG_NO_INTR_SHIFT 4
#define HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT 5
#define HNS_ROCE_CMD_FLAG_IN BIT(HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT)
#define HNS_ROCE_CMD_FLAG_OUT BIT(HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT)
#define HNS_ROCE_CMD_FLAG_NEXT BIT(HNS_ROCE_CMD_FLAG_NEXT_SHIFT)
#define HNS_ROCE_CMD_FLAG_WR BIT(HNS_ROCE_CMD_FLAG_WR_OR_RD_SHIFT)
#define HNS_ROCE_CMD_FLAG_NO_INTR BIT(HNS_ROCE_CMD_FLAG_NO_INTR_SHIFT)
#define HNS_ROCE_CMD_FLAG_ERR_INTR BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT)
#define HNS_ROCE_CMQ_DESC_NUM_S 3
#define HNS_ROCE_CMQ_EN_B 16
#define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B)
#define check_whether_last_step(hop_num, step_idx) \
((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
(step_idx == 1 && hop_num == 1) || \
(step_idx == 2 && hop_num == 2))
#define V2_CQ_DB_REQ_NOT_SOL 0
#define V2_CQ_DB_REQ_NOT 1
#define V2_CQ_STATE_VALID 1
#define V2_QKEY_VAL 0x80010000
#define GID_LEN_V2 16
#define HNS_ROCE_V2_CQE_QPN_MASK 0x3ffff
enum {
HNS_ROCE_V2_WQE_OP_SEND = 0x0,
HNS_ROCE_V2_WQE_OP_SEND_WITH_INV = 0x1,
HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM = 0x2,
HNS_ROCE_V2_WQE_OP_RDMA_WRITE = 0x3,
HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM = 0x4,
HNS_ROCE_V2_WQE_OP_RDMA_READ = 0x5,
HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP = 0x6,
HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD = 0x7,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP = 0x8,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD = 0x9,
HNS_ROCE_V2_WQE_OP_FAST_REG_PMR = 0xa,
HNS_ROCE_V2_WQE_OP_LOCAL_INV = 0xb,
HNS_ROCE_V2_WQE_OP_BIND_MW_TYPE = 0xc,
HNS_ROCE_V2_WQE_OP_MASK = 0x1f,
};
enum {
HNS_ROCE_SQ_OPCODE_SEND = 0x0,
HNS_ROCE_SQ_OPCODE_SEND_WITH_INV = 0x1,
HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM = 0x2,
HNS_ROCE_SQ_OPCODE_RDMA_WRITE = 0x3,
HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM = 0x4,
HNS_ROCE_SQ_OPCODE_RDMA_READ = 0x5,
HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP = 0x6,
HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD = 0x7,
HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP = 0x8,
HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD = 0x9,
HNS_ROCE_SQ_OPCODE_FAST_REG_WR = 0xa,
HNS_ROCE_SQ_OPCODE_LOCAL_INV = 0xb,
HNS_ROCE_SQ_OPCODE_BIND_MW = 0xc,
};
enum {
/* rq operations */
HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM = 0x0,
HNS_ROCE_V2_OPCODE_SEND = 0x1,
HNS_ROCE_V2_OPCODE_SEND_WITH_IMM = 0x2,
HNS_ROCE_V2_OPCODE_SEND_WITH_INV = 0x3,
};
enum {
HNS_ROCE_V2_SQ_DB = 0x0,
HNS_ROCE_V2_RQ_DB = 0x1,
HNS_ROCE_V2_SRQ_DB = 0x2,
HNS_ROCE_V2_CQ_DB_PTR = 0x3,
HNS_ROCE_V2_CQ_DB_NTR = 0x4,
};
enum {
HNS_ROCE_CQE_V2_SUCCESS = 0x00,
HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR = 0x01,
HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR = 0x02,
HNS_ROCE_CQE_V2_LOCAL_PROT_ERR = 0x04,
HNS_ROCE_CQE_V2_WR_FLUSH_ERR = 0x05,
HNS_ROCE_CQE_V2_MW_BIND_ERR = 0x06,
HNS_ROCE_CQE_V2_BAD_RESP_ERR = 0x10,
HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR = 0x11,
HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR = 0x12,
HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR = 0x13,
HNS_ROCE_CQE_V2_REMOTE_OP_ERR = 0x14,
HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR = 0x15,
HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR = 0x16,
HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR = 0x22,
HNS_ROCE_V2_CQE_STATUS_MASK = 0xff,
};
/* CMQ command */
enum hns_roce_opcode_type {
HNS_ROCE_OPC_QUERY_HW_VER = 0x8000,
HNS_ROCE_OPC_CFG_GLOBAL_PARAM = 0x8001,
HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004,
HNS_ROCE_OPC_QUERY_PF_RES = 0x8400,
HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
};
enum {
TYPE_CRQ,
TYPE_CSQ,
};
enum hns_roce_cmd_return_status {
CMD_EXEC_SUCCESS = 0,
CMD_NO_AUTH = 1,
CMD_NOT_EXEC = 2,
CMD_QUEUE_FULL = 3,
};
struct hns_roce_v2_cq_context {
u32 byte_4_pg_ceqn;
u32 byte_8_cqn;
u32 cqe_cur_blk_addr;
u32 byte_16_hop_addr;
u32 cqe_nxt_blk_addr;
u32 byte_24_pgsz_addr;
u32 byte_28_cq_pi;
u32 byte_32_cq_ci;
u32 cqe_ba;
u32 byte_40_cqe_ba;
u32 byte_44_db_record;
u32 db_record_addr;
u32 byte_52_cqe_cnt;
u32 byte_56_cqe_period_maxcnt;
u32 cqe_report_timer;
u32 byte_64_se_cqe_idx;
};
#define V2_CQC_BYTE_4_CQ_ST_S 0
#define V2_CQC_BYTE_4_CQ_ST_M GENMASK(1, 0)
#define V2_CQC_BYTE_4_POLL_S 2
#define V2_CQC_BYTE_4_SE_S 3
#define V2_CQC_BYTE_4_OVER_IGNORE_S 4
#define V2_CQC_BYTE_4_COALESCE_S 5
#define V2_CQC_BYTE_4_ARM_ST_S 6
#define V2_CQC_BYTE_4_ARM_ST_M GENMASK(7, 6)
#define V2_CQC_BYTE_4_SHIFT_S 8
#define V2_CQC_BYTE_4_SHIFT_M GENMASK(12, 8)
#define V2_CQC_BYTE_4_CMD_SN_S 13
#define V2_CQC_BYTE_4_CMD_SN_M GENMASK(14, 13)
#define V2_CQC_BYTE_4_CEQN_S 15
#define V2_CQC_BYTE_4_CEQN_M GENMASK(23, 15)
#define V2_CQC_BYTE_4_PAGE_OFFSET_S 24
#define V2_CQC_BYTE_4_PAGE_OFFSET_M GENMASK(31, 24)
#define V2_CQC_BYTE_8_CQN_S 0
#define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0)
#define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S 0
#define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M GENMASK(19, 0)
#define V2_CQC_BYTE_16_CQE_HOP_NUM_S 30
#define V2_CQC_BYTE_16_CQE_HOP_NUM_M GENMASK(31, 30)
#define V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S 0
#define V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M GENMASK(19, 0)
#define V2_CQC_BYTE_24_CQE_BA_PG_SZ_S 24
#define V2_CQC_BYTE_24_CQE_BA_PG_SZ_M GENMASK(27, 24)
#define V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S 28
#define V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M GENMASK(31, 28)
#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S 0
#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M GENMASK(23, 0)
#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S 0
#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M GENMASK(23, 0)
#define V2_CQC_BYTE_40_CQE_BA_S 0
#define V2_CQC_BYTE_40_CQE_BA_M GENMASK(28, 0)
#define V2_CQC_BYTE_44_DB_RECORD_EN_S 0
#define V2_CQC_BYTE_52_CQE_CNT_S 0
#define V2_CQC_BYTE_52_CQE_CNT_M GENMASK(23, 0)
#define V2_CQC_BYTE_56_CQ_MAX_CNT_S 0
#define V2_CQC_BYTE_56_CQ_MAX_CNT_M GENMASK(15, 0)
#define V2_CQC_BYTE_56_CQ_PERIOD_S 16
#define V2_CQC_BYTE_56_CQ_PERIOD_M GENMASK(31, 16)
#define V2_CQC_BYTE_64_SE_CQE_IDX_S 0
#define V2_CQC_BYTE_64_SE_CQE_IDX_M GENMASK(23, 0)
enum{
V2_MPT_ST_VALID = 0x1,
};
enum hns_roce_v2_qp_state {
HNS_ROCE_QP_ST_RST,
HNS_ROCE_QP_ST_INIT,
HNS_ROCE_QP_ST_RTR,
HNS_ROCE_QP_ST_RTS,
HNS_ROCE_QP_ST_SQER,
HNS_ROCE_QP_ST_SQD,
HNS_ROCE_QP_ST_ERR,
HNS_ROCE_QP_ST_SQ_DRAINING,
HNS_ROCE_QP_NUM_ST
};
struct hns_roce_v2_qp_context {
u32 byte_4_sqpn_tst;
u32 wqe_sge_ba;
u32 byte_12_sq_hop;
u32 byte_16_buf_ba_pg_sz;
u32 byte_20_smac_sgid_idx;
u32 byte_24_mtu_tc;
u32 byte_28_at_fl;
u8 dgid[GID_LEN_V2];
u32 dmac;
u32 byte_52_udpspn_dmac;
u32 byte_56_dqpn_err;
u32 byte_60_qpst_mapid;
u32 qkey_xrcd;
u32 byte_68_rq_db;
u32 rq_db_record_addr;
u32 byte_76_srqn_op_en;
u32 byte_80_rnr_rx_cqn;
u32 byte_84_rq_ci_pi;
u32 rq_cur_blk_addr;
u32 byte_92_srq_info;
u32 byte_96_rx_reqmsn;
u32 rq_nxt_blk_addr;
u32 byte_104_rq_sge;
u32 byte_108_rx_reqepsn;
u32 rq_rnr_timer;
u32 rx_msg_len;
u32 rx_rkey_pkt_info;
u64 rx_va;
u32 byte_132_trrl;
u32 trrl_ba;
u32 byte_140_raq;
u32 byte_144_raq;
u32 byte_148_raq;
u32 byte_152_raq;
u32 byte_156_raq;
u32 byte_160_sq_ci_pi;
u32 sq_cur_blk_addr;
u32 byte_168_irrl_idx;
u32 byte_172_sq_psn;
u32 byte_176_msg_pktn;
u32 sq_cur_sqe_blk_addr;
u32 byte_184_irrl_idx;
u32 cur_sge_offset;
u32 byte_192_ext_sge;
u32 byte_196_sq_psn;
u32 byte_200_sq_max;
u32 irrl_ba;
u32 byte_208_irrl;
u32 byte_212_lsn;
u32 sq_timer;
u32 byte_220_retry_psn_msn;
u32 byte_224_retry_msg;
u32 rx_sq_cur_blk_addr;
u32 byte_232_irrl_sge;
u32 irrl_cur_sge_offset;
u32 byte_240_irrl_tail;
u32 byte_244_rnr_rxack;
u32 byte_248_ack_psn;
u32 byte_252_err_txcqn;
u32 byte_256_sqflush_rqcqe;
};
#define V2_QPC_BYTE_4_TST_S 0
#define V2_QPC_BYTE_4_TST_M GENMASK(2, 0)
#define V2_QPC_BYTE_4_SGE_SHIFT_S 3
#define V2_QPC_BYTE_4_SGE_SHIFT_M GENMASK(7, 3)
#define V2_QPC_BYTE_4_SQPN_S 8
#define V2_QPC_BYTE_4_SQPN_M GENMASK(31, 8)
#define V2_QPC_BYTE_12_WQE_SGE_BA_S 0
#define V2_QPC_BYTE_12_WQE_SGE_BA_M GENMASK(28, 0)
#define V2_QPC_BYTE_12_SQ_HOP_NUM_S 29
#define V2_QPC_BYTE_12_SQ_HOP_NUM_M GENMASK(30, 29)
#define V2_QPC_BYTE_12_RSVD_LKEY_EN_S 31
#define V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S 0
#define V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M GENMASK(3, 0)
#define V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S 4
#define V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M GENMASK(7, 4)
#define V2_QPC_BYTE_16_PD_S 8
#define V2_QPC_BYTE_16_PD_M GENMASK(31, 8)
#define V2_QPC_BYTE_20_RQ_HOP_NUM_S 0
#define V2_QPC_BYTE_20_RQ_HOP_NUM_M GENMASK(1, 0)
#define V2_QPC_BYTE_20_SGE_HOP_NUM_S 2
#define V2_QPC_BYTE_20_SGE_HOP_NUM_M GENMASK(3, 2)
#define V2_QPC_BYTE_20_RQWS_S 4
#define V2_QPC_BYTE_20_RQWS_M GENMASK(7, 4)
#define V2_QPC_BYTE_20_SQ_SHIFT_S 8
#define V2_QPC_BYTE_20_SQ_SHIFT_M GENMASK(11, 8)
#define V2_QPC_BYTE_20_RQ_SHIFT_S 12
#define V2_QPC_BYTE_20_RQ_SHIFT_M GENMASK(15, 12)
#define V2_QPC_BYTE_20_SGID_IDX_S 16
#define V2_QPC_BYTE_20_SGID_IDX_M GENMASK(23, 16)
#define V2_QPC_BYTE_20_SMAC_IDX_S 24
#define V2_QPC_BYTE_20_SMAC_IDX_M GENMASK(31, 24)
#define V2_QPC_BYTE_24_HOP_LIMIT_S 0
#define V2_QPC_BYTE_24_HOP_LIMIT_M GENMASK(7, 0)
#define V2_QPC_BYTE_24_TC_S 8
#define V2_QPC_BYTE_24_TC_M GENMASK(15, 8)
#define V2_QPC_BYTE_24_VLAN_IDX_S 16
#define V2_QPC_BYTE_24_VLAN_IDX_M GENMASK(27, 16)
#define V2_QPC_BYTE_24_MTU_S 28
#define V2_QPC_BYTE_24_MTU_M GENMASK(31, 28)
#define V2_QPC_BYTE_28_FL_S 0
#define V2_QPC_BYTE_28_FL_M GENMASK(19, 0)
#define V2_QPC_BYTE_28_SL_S 20
#define V2_QPC_BYTE_28_SL_M GENMASK(23, 20)
#define V2_QPC_BYTE_28_CNP_TX_FLAG_S 24
#define V2_QPC_BYTE_28_CE_FLAG_S 25
#define V2_QPC_BYTE_28_LBI_S 26
#define V2_QPC_BYTE_28_AT_S 27
#define V2_QPC_BYTE_28_AT_M GENMASK(31, 27)
#define V2_QPC_BYTE_52_DMAC_S 0
#define V2_QPC_BYTE_52_DMAC_M GENMASK(15, 0)
#define V2_QPC_BYTE_52_UDPSPN_S 16
#define V2_QPC_BYTE_52_UDPSPN_M GENMASK(31, 16)
#define V2_QPC_BYTE_56_DQPN_S 0
#define V2_QPC_BYTE_56_DQPN_M GENMASK(23, 0)
#define V2_QPC_BYTE_56_SQ_TX_ERR_S 24
#define V2_QPC_BYTE_56_SQ_RX_ERR_S 25
#define V2_QPC_BYTE_56_RQ_TX_ERR_S 26
#define V2_QPC_BYTE_56_RQ_RX_ERR_S 27
#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
#define V2_QPC_BYTE_60_MAPID_S 0
#define V2_QPC_BYTE_60_MAPID_M GENMASK(12, 0)
#define V2_QPC_BYTE_60_INNER_MAP_IND_S 13
#define V2_QPC_BYTE_60_SQ_MAP_IND_S 14
#define V2_QPC_BYTE_60_RQ_MAP_IND_S 15
#define V2_QPC_BYTE_60_TEMPID_S 16
#define V2_QPC_BYTE_60_TEMPID_M GENMASK(22, 16)
#define V2_QPC_BYTE_60_EXT_MAP_IND_S 23
#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S 24
#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M GENMASK(26, 24)
#define V2_QPC_BYTE_60_SQ_RLS_IND_S 27
#define V2_QPC_BYTE_60_SQ_EXT_IND_S 28
#define V2_QPC_BYTE_60_QP_ST_S 29
#define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29)
#define V2_QPC_BYTE_68_RQ_RECORD_EN_S 0
#define V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S 1
#define V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M GENMASK(31, 1)
#define V2_QPC_BYTE_76_SRQN_S 0
#define V2_QPC_BYTE_76_SRQN_M GENMASK(23, 0)
#define V2_QPC_BYTE_76_SRQ_EN_S 24
#define V2_QPC_BYTE_76_RRE_S 25
#define V2_QPC_BYTE_76_RWE_S 26
#define V2_QPC_BYTE_76_ATE_S 27
#define V2_QPC_BYTE_76_RQIE_S 28
#define V2_QPC_BYTE_80_RX_CQN_S 0
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
#define V2_QPC_BYTE_80_MIN_RNR_TIME_S 27
#define V2_QPC_BYTE_80_MIN_RNR_TIME_M GENMASK(31, 27)
#define V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S 0
#define V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M GENMASK(15, 0)
#define V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S 16
#define V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M GENMASK(31, 16)
#define V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S 0
#define V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M GENMASK(19, 0)
#define V2_QPC_BYTE_92_SRQ_INFO_S 20
#define V2_QPC_BYTE_92_SRQ_INFO_M GENMASK(31, 20)
#define V2_QPC_BYTE_96_RX_REQ_MSN_S 0
#define V2_QPC_BYTE_96_RX_REQ_MSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S 0
#define V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M GENMASK(19, 0)
#define V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S 24
#define V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M GENMASK(31, 24)
#define V2_QPC_BYTE_108_INV_CREDIT_S 0
#define V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S 3
#define V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S 4
#define V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M GENMASK(6, 4)
#define V2_QPC_BYTE_108_RX_REQ_RNR_S 7
#define V2_QPC_BYTE_108_RX_REQ_EPSN_S 8
#define V2_QPC_BYTE_108_RX_REQ_EPSN_M GENMASK(31, 8)
#define V2_QPC_BYTE_132_TRRL_HEAD_MAX_S 0
#define V2_QPC_BYTE_132_TRRL_HEAD_MAX_M GENMASK(7, 0)
#define V2_QPC_BYTE_132_TRRL_TAIL_MAX_S 8
#define V2_QPC_BYTE_132_TRRL_TAIL_MAX_M GENMASK(15, 8)
#define V2_QPC_BYTE_132_TRRL_BA_S 16
#define V2_QPC_BYTE_132_TRRL_BA_M GENMASK(31, 16)
#define V2_QPC_BYTE_140_TRRL_BA_S 0
#define V2_QPC_BYTE_140_TRRL_BA_M GENMASK(11, 0)
#define V2_QPC_BYTE_140_RR_MAX_S 12
#define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12)
#define V2_QPC_BYTE_140_RSVD_RAQ_MAP_S 15
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16)
#define V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S 24
#define V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M GENMASK(31, 24)
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S 24
#define V2_QPC_BYTE_144_RAQ_CREDIT_S 25
#define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25)
#define V2_QPC_BYTE_144_RESP_RTY_FLG_S 31
#define V2_QPC_BYTE_148_RQ_MSN_S 0
#define V2_QPC_BYTE_148_RQ_MSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24
#define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24)
#define V2_QPC_BYTE_152_RAQ_PSN_S 8
#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8)
#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24
#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24)
#define V2_QPC_BYTE_156_RAQ_USE_PKTN_S 0
#define V2_QPC_BYTE_156_RAQ_USE_PKTN_M GENMASK(23, 0)
#define V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S 0
#define V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M GENMASK(15, 0)
#define V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S 16
#define V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M GENMASK(31, 16)
#define V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S 0
#define V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
#define V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S 20
#define V2_QPC_BYTE_168_LP_SGEN_INI_S 21
#define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 21)
#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_S 24
#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_M GENMASK(27, 24)
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28)
#define V2_QPC_BYTE_172_ACK_REQ_FREQ_S 0
#define V2_QPC_BYTE_172_ACK_REQ_FREQ_M GENMASK(5, 0)
#define V2_QPC_BYTE_172_MSG_RNR_FLG_S 6
#define V2_QPC_BYTE_172_FRE_S 7
#define V2_QPC_BYTE_172_SQ_CUR_PSN_S 8
#define V2_QPC_BYTE_172_SQ_CUR_PSN_M GENMASK(31, 8)
#define V2_QPC_BYTE_176_MSG_USE_PKTN_S 0
#define V2_QPC_BYTE_176_MSG_USE_PKTN_M GENMASK(23, 0)
#define V2_QPC_BYTE_176_IRRL_HEAD_PRE_S 24
#define V2_QPC_BYTE_176_IRRL_HEAD_PRE_M GENMASK(31, 24)
#define V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S 0
#define V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M GENMASK(19, 0)
#define V2_QPC_BYTE_184_IRRL_IDX_MSB_S 20
#define V2_QPC_BYTE_184_IRRL_IDX_MSB_M GENMASK(31, 20)
#define V2_QPC_BYTE_192_CUR_SGE_IDX_S 0
#define V2_QPC_BYTE_192_CUR_SGE_IDX_M GENMASK(23, 0)
#define V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S 24
#define V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M GENMASK(31, 24)
#define V2_QPC_BYTE_196_IRRL_HEAD_S 0
#define V2_QPC_BYTE_196_IRRL_HEAD_M GENMASK(7, 0)
#define V2_QPC_BYTE_196_SQ_MAX_PSN_S 8
#define V2_QPC_BYTE_196_SQ_MAX_PSN_M GENMASK(31, 8)
#define V2_QPC_BYTE_200_SQ_MAX_IDX_S 0
#define V2_QPC_BYTE_200_SQ_MAX_IDX_M GENMASK(15, 0)
#define V2_QPC_BYTE_200_LCL_OPERATED_CNT_S 16
#define V2_QPC_BYTE_200_LCL_OPERATED_CNT_M GENMASK(31, 16)
#define V2_QPC_BYTE_208_IRRL_BA_S 0
#define V2_QPC_BYTE_208_IRRL_BA_M GENMASK(25, 0)
#define V2_QPC_BYTE_208_PKT_RNR_FLG_S 26
#define V2_QPC_BYTE_208_PKT_RTY_FLG_S 27
#define V2_QPC_BYTE_208_RMT_E2E_S 28
#define V2_QPC_BYTE_208_SR_MAX_S 29
#define V2_QPC_BYTE_208_SR_MAX_M GENMASK(31, 29)
#define V2_QPC_BYTE_212_LSN_S 0
#define V2_QPC_BYTE_212_LSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_212_RETRY_NUM_INIT_S 24
#define V2_QPC_BYTE_212_RETRY_NUM_INIT_M GENMASK(26, 24)
#define V2_QPC_BYTE_212_CHECK_FLG_S 27
#define V2_QPC_BYTE_212_CHECK_FLG_M GENMASK(28, 27)
#define V2_QPC_BYTE_212_RETRY_CNT_S 29
#define V2_QPC_BYTE_212_RETRY_CNT_M GENMASK(31, 29)
#define V2_QPC_BYTE_220_RETRY_MSG_MSN_S 0
#define V2_QPC_BYTE_220_RETRY_MSG_MSN_M GENMASK(15, 0)
#define V2_QPC_BYTE_220_RETRY_MSG_PSN_S 16
#define V2_QPC_BYTE_220_RETRY_MSG_PSN_M GENMASK(31, 16)
#define V2_QPC_BYTE_224_RETRY_MSG_PSN_S 0
#define V2_QPC_BYTE_224_RETRY_MSG_PSN_M GENMASK(7, 0)
#define V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S 8
#define V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M GENMASK(31, 8)
#define V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S 0
#define V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20)
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0)
#define V2_QPC_BYTE_240_IRRL_TAIL_RD_S 8
#define V2_QPC_BYTE_240_IRRL_TAIL_RD_M GENMASK(15, 8)
#define V2_QPC_BYTE_240_RX_ACK_MSN_S 16
#define V2_QPC_BYTE_240_RX_ACK_MSN_M GENMASK(31, 16)
#define V2_QPC_BYTE_244_RX_ACK_EPSN_S 0
#define V2_QPC_BYTE_244_RX_ACK_EPSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_244_RNR_NUM_INIT_S 24
#define V2_QPC_BYTE_244_RNR_NUM_INIT_M GENMASK(26, 24)
#define V2_QPC_BYTE_244_RNR_CNT_S 27
#define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27)
#define V2_QPC_BYTE_248_IRRL_PSN_S 0
#define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0)
#define V2_QPC_BYTE_248_ACK_PSN_ERR_S 24
#define V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S 25
#define V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M GENMASK(26, 25)
#define V2_QPC_BYTE_248_IRRL_PSN_VLD_S 27
#define V2_QPC_BYTE_248_RNR_RETRY_FLAG_S 28
#define V2_QPC_BYTE_248_CQ_ERR_IND_S 31
#define V2_QPC_BYTE_252_TX_CQN_S 0
#define V2_QPC_BYTE_252_TX_CQN_M GENMASK(23, 0)
#define V2_QPC_BYTE_252_SIG_TYPE_S 24
#define V2_QPC_BYTE_252_ERR_TYPE_S 25
#define V2_QPC_BYTE_252_ERR_TYPE_M GENMASK(31, 25)
#define V2_QPC_BYTE_256_RQ_CQE_IDX_S 0
#define V2_QPC_BYTE_256_RQ_CQE_IDX_M GENMASK(15, 0)
#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16
#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16)
struct hns_roce_v2_cqe {
u32 byte_4;
u32 rkey_immtdata;
u32 byte_12;
u32 byte_16;
u32 byte_cnt;
u32 smac;
u32 byte_28;
u32 byte_32;
};
#define V2_CQE_BYTE_4_OPCODE_S 0
#define V2_CQE_BYTE_4_OPCODE_M GENMASK(4, 0)
#define V2_CQE_BYTE_4_RQ_INLINE_S 5
#define V2_CQE_BYTE_4_S_R_S 6
#define V2_CQE_BYTE_4_OWNER_S 7
#define V2_CQE_BYTE_4_STATUS_S 8
#define V2_CQE_BYTE_4_STATUS_M GENMASK(15, 8)
#define V2_CQE_BYTE_4_WQE_INDX_S 16
#define V2_CQE_BYTE_4_WQE_INDX_M GENMASK(31, 16)
#define V2_CQE_BYTE_12_XRC_SRQN_S 0
#define V2_CQE_BYTE_12_XRC_SRQN_M GENMASK(23, 0)
#define V2_CQE_BYTE_16_LCL_QPN_S 0
#define V2_CQE_BYTE_16_LCL_QPN_M GENMASK(23, 0)
#define V2_CQE_BYTE_16_SUB_STATUS_S 24
#define V2_CQE_BYTE_16_SUB_STATUS_M GENMASK(31, 24)
#define V2_CQE_BYTE_28_SMAC_4_S 0
#define V2_CQE_BYTE_28_SMAC_4_M GENMASK(7, 0)
#define V2_CQE_BYTE_28_SMAC_5_S 8
#define V2_CQE_BYTE_28_SMAC_5_M GENMASK(15, 8)
#define V2_CQE_BYTE_28_PORT_TYPE_S 16
#define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16)
#define V2_CQE_BYTE_32_RMT_QPN_S 0
#define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0)
#define V2_CQE_BYTE_32_SL_S 24
#define V2_CQE_BYTE_32_SL_M GENMASK(26, 24)
#define V2_CQE_BYTE_32_PORTN_S 27
#define V2_CQE_BYTE_32_PORTN_M GENMASK(29, 27)
#define V2_CQE_BYTE_32_GRH_S 30
#define V2_CQE_BYTE_32_LPK_S 31
struct hns_roce_v2_mpt_entry {
__le32 byte_4_pd_hop_st;
__le32 byte_8_mw_cnt_en;
__le32 byte_12_mw_pa;
__le32 bound_lkey;
__le32 len_l;
__le32 len_h;
__le32 lkey;
__le32 va_l;
__le32 va_h;
__le32 pbl_size;
__le32 pbl_ba_l;
__le32 byte_48_mode_ba;
__le32 pa0_l;
__le32 byte_56_pa0_h;
__le32 pa1_l;
__le32 byte_64_buf_pa1;
};
#define V2_MPT_BYTE_4_MPT_ST_S 0
#define V2_MPT_BYTE_4_MPT_ST_M GENMASK(1, 0)
#define V2_MPT_BYTE_4_PBL_HOP_NUM_S 2
#define V2_MPT_BYTE_4_PBL_HOP_NUM_M GENMASK(3, 2)
#define V2_MPT_BYTE_4_PBL_BA_PG_SZ_S 4
#define V2_MPT_BYTE_4_PBL_BA_PG_SZ_M GENMASK(7, 4)
#define V2_MPT_BYTE_4_PD_S 8
#define V2_MPT_BYTE_4_PD_M GENMASK(31, 8)
#define V2_MPT_BYTE_8_RA_EN_S 0
#define V2_MPT_BYTE_8_R_INV_EN_S 1
#define V2_MPT_BYTE_8_L_INV_EN_S 2
#define V2_MPT_BYTE_8_BIND_EN_S 3
#define V2_MPT_BYTE_8_ATOMIC_EN_S 4
#define V2_MPT_BYTE_8_RR_EN_S 5
#define V2_MPT_BYTE_8_RW_EN_S 6
#define V2_MPT_BYTE_8_LW_EN_S 7
#define V2_MPT_BYTE_12_PA_S 1
#define V2_MPT_BYTE_12_INNER_PA_VLD_S 7
#define V2_MPT_BYTE_12_MW_BIND_QPN_S 8
#define V2_MPT_BYTE_12_MW_BIND_QPN_M GENMASK(31, 8)
#define V2_MPT_BYTE_48_PBL_BA_H_S 0
#define V2_MPT_BYTE_48_PBL_BA_H_M GENMASK(28, 0)
#define V2_MPT_BYTE_48_BLK_MODE_S 29
#define V2_MPT_BYTE_56_PA0_H_S 0
#define V2_MPT_BYTE_56_PA0_H_M GENMASK(25, 0)
#define V2_MPT_BYTE_64_PA1_H_S 0
#define V2_MPT_BYTE_64_PA1_H_M GENMASK(25, 0)
#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S 28
#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M GENMASK(31, 28)
#define V2_DB_BYTE_4_TAG_S 0
#define V2_DB_BYTE_4_TAG_M GENMASK(23, 0)
#define V2_DB_BYTE_4_CMD_S 24
#define V2_DB_BYTE_4_CMD_M GENMASK(27, 24)
#define V2_DB_PARAMETER_CONS_IDX_S 0
#define V2_DB_PARAMETER_CONS_IDX_M GENMASK(15, 0)
#define V2_DB_PARAMETER_SL_S 16
#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
struct hns_roce_v2_cq_db {
u32 byte_4;
u32 parameter;
};
#define V2_CQ_DB_BYTE_4_TAG_S 0
#define V2_CQ_DB_BYTE_4_TAG_M GENMASK(23, 0)
#define V2_CQ_DB_BYTE_4_CMD_S 24
#define V2_CQ_DB_BYTE_4_CMD_M GENMASK(27, 24)
#define V2_CQ_DB_PARAMETER_CONS_IDX_S 0
#define V2_CQ_DB_PARAMETER_CONS_IDX_M GENMASK(23, 0)
#define V2_CQ_DB_PARAMETER_CMD_SN_S 25
#define V2_CQ_DB_PARAMETER_CMD_SN_M GENMASK(26, 25)
#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
struct hns_roce_v2_rc_send_wqe {
u32 byte_4;
u32 msg_len;
u32 inv_key_immtdata;
u32 byte_16;
u32 byte_20;
u32 rkey;
u64 va;
};
#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0
#define V2_RC_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
#define V2_RC_SEND_WQE_BYTE_4_OWNER_S 7
#define V2_RC_SEND_WQE_BYTE_4_CQE_S 8
#define V2_RC_SEND_WQE_BYTE_4_FENCE_S 9
#define V2_RC_SEND_WQE_BYTE_4_SO_S 10
#define V2_RC_SEND_WQE_BYTE_4_SE_S 11
#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0)
#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S 24
#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24)
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
struct hns_roce_v2_wqe_data_seg {
__be32 len;
__be32 lkey;
__be64 addr;
};
struct hns_roce_v2_db {
u32 byte_4;
u32 parameter;
};
struct hns_roce_query_version {
__le16 rocee_vendor_id;
__le16 rocee_hw_version;
__le32 rsv[5];
};
struct hns_roce_cfg_global_param {
__le32 time_cfg_udp_port;
__le32 rsv[5];
};
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S 0
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M GENMASK(9, 0)
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S 16
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M GENMASK(31, 16)
struct hns_roce_pf_res {
__le32 rsv;
__le32 qpc_bt_idx_num;
__le32 srqc_bt_idx_num;
__le32 cqc_bt_idx_num;
__le32 mpt_bt_idx_num;
__le32 eqc_bt_idx_num;
};
#define PF_RES_DATA_1_PF_QPC_BT_IDX_S 0
#define PF_RES_DATA_1_PF_QPC_BT_IDX_M GENMASK(10, 0)
#define PF_RES_DATA_1_PF_QPC_BT_NUM_S 16
#define PF_RES_DATA_1_PF_QPC_BT_NUM_M GENMASK(27, 16)
#define PF_RES_DATA_2_PF_SRQC_BT_IDX_S 0
#define PF_RES_DATA_2_PF_SRQC_BT_IDX_M GENMASK(8, 0)
#define PF_RES_DATA_2_PF_SRQC_BT_NUM_S 16
#define PF_RES_DATA_2_PF_SRQC_BT_NUM_M GENMASK(25, 16)
#define PF_RES_DATA_3_PF_CQC_BT_IDX_S 0
#define PF_RES_DATA_3_PF_CQC_BT_IDX_M GENMASK(8, 0)
#define PF_RES_DATA_3_PF_CQC_BT_NUM_S 16
#define PF_RES_DATA_3_PF_CQC_BT_NUM_M GENMASK(25, 16)
#define PF_RES_DATA_4_PF_MPT_BT_IDX_S 0
#define PF_RES_DATA_4_PF_MPT_BT_IDX_M GENMASK(8, 0)
#define PF_RES_DATA_4_PF_MPT_BT_NUM_S 16
#define PF_RES_DATA_4_PF_MPT_BT_NUM_M GENMASK(25, 16)
#define PF_RES_DATA_5_PF_EQC_BT_IDX_S 0
#define PF_RES_DATA_5_PF_EQC_BT_IDX_M GENMASK(8, 0)
#define PF_RES_DATA_5_PF_EQC_BT_NUM_S 16
#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16)
struct hns_roce_vf_res_a {
u32 vf_id;
u32 vf_qpc_bt_idx_num;
u32 vf_srqc_bt_idx_num;
u32 vf_cqc_bt_idx_num;
u32 vf_mpt_bt_idx_num;
u32 vf_eqc_bt_idx_num;
};
#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_S 0
#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_M GENMASK(10, 0)
#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_S 16
#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_M GENMASK(27, 16)
#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S 0
#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M GENMASK(8, 0)
#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S 16
#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M GENMASK(25, 16)
#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_S 0
#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_M GENMASK(8, 0)
#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_S 16
#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_M GENMASK(25, 16)
#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_S 0
#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_M GENMASK(8, 0)
#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_S 16
#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_M GENMASK(25, 16)
#define VF_RES_A_DATA_5_VF_EQC_IDX_S 0
#define VF_RES_A_DATA_5_VF_EQC_IDX_M GENMASK(8, 0)
#define VF_RES_A_DATA_5_VF_EQC_NUM_S 16
#define VF_RES_A_DATA_5_VF_EQC_NUM_M GENMASK(25, 16)
struct hns_roce_vf_res_b {
u32 rsv0;
u32 vf_smac_idx_num;
u32 vf_sgid_idx_num;
u32 vf_qid_idx_sl_num;
u32 rsv[2];
};
#define VF_RES_B_DATA_0_VF_ID_S 0
#define VF_RES_B_DATA_0_VF_ID_M GENMASK(7, 0)
#define VF_RES_B_DATA_1_VF_SMAC_IDX_S 0
#define VF_RES_B_DATA_1_VF_SMAC_IDX_M GENMASK(7, 0)
#define VF_RES_B_DATA_1_VF_SMAC_NUM_S 8
#define VF_RES_B_DATA_1_VF_SMAC_NUM_M GENMASK(16, 8)
#define VF_RES_B_DATA_2_VF_SGID_IDX_S 0
#define VF_RES_B_DATA_2_VF_SGID_IDX_M GENMASK(7, 0)
#define VF_RES_B_DATA_2_VF_SGID_NUM_S 8
#define VF_RES_B_DATA_2_VF_SGID_NUM_M GENMASK(16, 8)
#define VF_RES_B_DATA_3_VF_QID_IDX_S 0
#define VF_RES_B_DATA_3_VF_QID_IDX_M GENMASK(9, 0)
#define VF_RES_B_DATA_3_VF_SL_NUM_S 16
#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
/* Reg field definition */
#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S 0
#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M GENMASK(15, 0)
#define ROCEE_VF_SGID_CFG4_SGID_TYPE_S 0
#define ROCEE_VF_SGID_CFG4_SGID_TYPE_M GENMASK(1, 0)
struct hns_roce_cfg_bt_attr {
u32 vf_qpc_cfg;
u32 vf_srqc_cfg;
u32 vf_cqc_cfg;
u32 vf_mpt_cfg;
u32 rsv[2];
};
#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M GENMASK(3, 0)
#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S 4
#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M GENMASK(7, 4)
#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S 8
#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M GENMASK(9, 8)
#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S 0
#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M GENMASK(3, 0)
#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S 4
#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M GENMASK(7, 4)
#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S 8
#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M GENMASK(9, 8)
#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S 0
#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M GENMASK(3, 0)
#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S 4
#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M GENMASK(7, 4)
#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S 8
#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M GENMASK(9, 8)
#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S 0
#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M GENMASK(3, 0)
#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S 4
#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M GENMASK(7, 4)
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
struct hns_roce_cmq_desc {
u16 opcode;
u16 flag;
u16 retval;
u16 rsv;
u32 data[6];
};
#define ROCEE_VF_MB_CFG0_REG 0x40
#define ROCEE_VF_MB_STATUS_REG 0x58
#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
#define HNS_ROCE_HW_RUN_BIT_SHIFT 31
#define HNS_ROCE_HW_MB_STATUS_MASK 0xFF
#define HNS_ROCE_VF_MB4_TAG_MASK 0xFFFFFF00
#define HNS_ROCE_VF_MB4_TAG_SHIFT 8
#define HNS_ROCE_VF_MB4_CMD_MASK 0xFF
#define HNS_ROCE_VF_MB4_CMD_SHIFT 0
#define HNS_ROCE_VF_MB5_EVENT_MASK 0x10000
#define HNS_ROCE_VF_MB5_EVENT_SHIFT 16
#define HNS_ROCE_VF_MB5_TOKEN_MASK 0xFFFF
#define HNS_ROCE_VF_MB5_TOKEN_SHIFT 0
struct hns_roce_v2_cmq_ring {
dma_addr_t desc_dma_addr;
struct hns_roce_cmq_desc *desc;
u32 head;
u32 tail;
u16 buf_size;
u16 desc_num;
int next_to_use;
int next_to_clean;
u8 flag;
spinlock_t lock; /* command queue lock */
};
struct hns_roce_v2_cmq {
struct hns_roce_v2_cmq_ring csq;
struct hns_roce_v2_cmq_ring crq;
u16 tx_timeout;
u16 last_status;
};
struct hns_roce_v2_priv {
struct hns_roce_v2_cmq cmq;
};
#endif
...@@ -57,6 +57,7 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index) ...@@ -57,6 +57,7 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
{ {
return gid_index * hr_dev->caps.num_ports + port; return gid_index * hr_dev->caps.num_ports + port;
} }
EXPORT_SYMBOL_GPL(hns_get_gid_index);
static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr) static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
{ {
...@@ -116,7 +117,7 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num, ...@@ -116,7 +117,7 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
unsigned long event) unsigned long event)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct net_device *netdev; struct net_device *netdev;
netdev = hr_dev->iboe.netdevs[port]; netdev = hr_dev->iboe.netdevs[port];
...@@ -174,8 +175,9 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev) ...@@ -174,8 +175,9 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
u8 i; u8 i;
for (i = 0; i < hr_dev->caps.num_ports; i++) { for (i = 0; i < hr_dev->caps.num_ports; i++) {
hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i], if (hr_dev->hw->set_mtu)
hr_dev->caps.max_mtu); hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
hr_dev->caps.max_mtu);
hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr); hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
} }
...@@ -200,7 +202,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev, ...@@ -200,7 +202,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_qp_wr = hr_dev->caps.max_wqes; props->max_qp_wr = hr_dev->caps.max_wqes;
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_RC_RNR_NAK_GEN; IB_DEVICE_RC_RNR_NAK_GEN;
props->max_sge = hr_dev->caps.max_sq_sg; props->max_sge = max(hr_dev->caps.max_sq_sg, hr_dev->caps.max_rq_sg);
props->max_sge_rd = 1; props->max_sge_rd = 1;
props->max_cq = hr_dev->caps.num_cqs; props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes; props->max_cqe = hr_dev->caps.max_cqes;
...@@ -238,7 +240,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, ...@@ -238,7 +240,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr *props) struct ib_port_attr *props)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct net_device *net_dev; struct net_device *net_dev;
unsigned long flags; unsigned long flags;
enum ib_mtu mtu; enum ib_mtu mtu;
...@@ -379,7 +381,8 @@ static int hns_roce_mmap(struct ib_ucontext *context, ...@@ -379,7 +381,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
to_hr_ucontext(context)->uar.pfn, to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot)) PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN; return -EAGAIN;
} else if (vma->vm_pgoff == 1 && hr_dev->hw_rev == HNS_ROCE_HW_VER1) { } else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr &&
hr_dev->tptr_size) {
/* vm_pgoff: 1 -- TPTR */ /* vm_pgoff: 1 -- TPTR */
if (io_remap_pfn_range(vma, vma->vm_start, if (io_remap_pfn_range(vma, vma->vm_start,
hr_dev->tptr_dma_addr >> PAGE_SHIFT, hr_dev->tptr_dma_addr >> PAGE_SHIFT,
...@@ -426,7 +429,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -426,7 +429,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
int ret; int ret;
struct hns_roce_ib_iboe *iboe = NULL; struct hns_roce_ib_iboe *iboe = NULL;
struct ib_device *ib_dev = NULL; struct ib_device *ib_dev = NULL;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
iboe = &hr_dev->iboe; iboe = &hr_dev->iboe;
spin_lock_init(&iboe->lock); spin_lock_init(&iboe->lock);
...@@ -531,173 +534,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -531,173 +534,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
return ret; return ret;
} }
static const struct of_device_id hns_roce_of_match[] = {
{ .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
{},
};
MODULE_DEVICE_TABLE(of, hns_roce_of_match);
static const struct acpi_device_id hns_roce_acpi_match[] = {
{ "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
{},
};
MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
static int hns_roce_node_match(struct device *dev, void *fwnode)
{
return dev->fwnode == fwnode;
}
static struct
platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
{
struct device *dev;
/* get the 'device'corresponding to matching 'fwnode' */
dev = bus_find_device(&platform_bus_type, NULL,
fwnode, hns_roce_node_match);
/* get the platform device */
return dev ? to_platform_device(dev) : NULL;
}
static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
{
int i;
int ret;
u8 phy_port;
int port_cnt = 0;
struct device *dev = &hr_dev->pdev->dev;
struct device_node *net_node;
struct net_device *netdev = NULL;
struct platform_device *pdev = NULL;
struct resource *res;
/* check if we are compatible with the underlying SoC */
if (dev_of_node(dev)) {
const struct of_device_id *of_id;
of_id = of_match_node(hns_roce_of_match, dev->of_node);
if (!of_id) {
dev_err(dev, "device is not compatible!\n");
return -ENXIO;
}
hr_dev->hw = (struct hns_roce_hw *)of_id->data;
if (!hr_dev->hw) {
dev_err(dev, "couldn't get H/W specific DT data!\n");
return -ENXIO;
}
} else if (is_acpi_device_node(dev->fwnode)) {
const struct acpi_device_id *acpi_id;
acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
if (!acpi_id) {
dev_err(dev, "device is not compatible!\n");
return -ENXIO;
}
hr_dev->hw = (struct hns_roce_hw *) acpi_id->driver_data;
if (!hr_dev->hw) {
dev_err(dev, "couldn't get H/W specific ACPI data!\n");
return -ENXIO;
}
} else {
dev_err(dev, "can't read compatibility data from DT or ACPI\n");
return -ENXIO;
}
/* get the mapped register base address */
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "memory resource not found!\n");
return -EINVAL;
}
hr_dev->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base);
/* read the node_guid of IB device from the DT or ACPI */
ret = device_property_read_u8_array(dev, "node-guid",
(u8 *)&hr_dev->ib_dev.node_guid,
GUID_LEN);
if (ret) {
dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
return ret;
}
/* get the RoCE associated ethernet ports or netdevices */
for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
if (dev_of_node(dev)) {
net_node = of_parse_phandle(dev->of_node, "eth-handle",
i);
if (!net_node)
continue;
pdev = of_find_device_by_node(net_node);
} else if (is_acpi_device_node(dev->fwnode)) {
struct acpi_reference_args args;
struct fwnode_handle *fwnode;
ret = acpi_node_get_property_reference(dev->fwnode,
"eth-handle",
i, &args);
if (ret)
continue;
fwnode = acpi_fwnode_handle(args.adev);
pdev = hns_roce_find_pdev(fwnode);
} else {
dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO;
}
if (pdev) {
netdev = platform_get_drvdata(pdev);
phy_port = (u8)i;
if (netdev) {
hr_dev->iboe.netdevs[port_cnt] = netdev;
hr_dev->iboe.phy_port[port_cnt] = phy_port;
} else {
dev_err(dev, "no netdev found with pdev %s\n",
pdev->name);
return -ENODEV;
}
port_cnt++;
}
}
if (port_cnt == 0) {
dev_err(dev, "unable to get eth-handle for available ports!\n");
return -EINVAL;
}
hr_dev->caps.num_ports = port_cnt;
/* cmd issue mode: 0 is poll, 1 is event */
hr_dev->cmd_mod = 1;
hr_dev->loop_idc = 0;
/* read the interrupt names from the DT or ACPI */
ret = device_property_read_string_array(dev, "interrupt-names",
hr_dev->irq_names,
HNS_ROCE_MAX_IRQ_NUM);
if (ret < 0) {
dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
return ret;
}
/* fetch the interrupt numbers */
for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
if (hr_dev->irq[i] <= 0) {
dev_err(dev, "platform get of irq[=%d] failed!\n", i);
return -EINVAL;
}
}
return 0;
}
static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
{ {
int ret; int ret;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table, ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz, HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
...@@ -707,6 +547,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ...@@ -707,6 +547,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
return ret; return ret;
} }
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->mr_table.mtt_cqe_table,
HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz,
hr_dev->caps.num_cqe_segs, 1);
if (ret) {
dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n");
goto err_unmap_cqe;
}
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table, ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz, HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
hr_dev->caps.num_mtpts, 1); hr_dev->caps.num_mtpts, 1);
...@@ -754,6 +605,12 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ...@@ -754,6 +605,12 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
err_unmap_mtt: err_unmap_mtt:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_cqe_table);
err_unmap_cqe:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
return ret; return ret;
} }
...@@ -766,7 +623,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ...@@ -766,7 +623,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
{ {
int ret; int ret;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
spin_lock_init(&hr_dev->sm_lock); spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->bt_cmd_lock); spin_lock_init(&hr_dev->bt_cmd_lock);
...@@ -826,56 +683,45 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -826,56 +683,45 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
return ret; return ret;
} }
/** int hns_roce_init(struct hns_roce_dev *hr_dev)
* hns_roce_probe - RoCE driver entrance
* @pdev: pointer to platform device
* Return : int
*
*/
static int hns_roce_probe(struct platform_device *pdev)
{ {
int ret; int ret;
struct hns_roce_dev *hr_dev; struct device *dev = hr_dev->dev;
struct device *dev = &pdev->dev;
hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
if (!hr_dev)
return -ENOMEM;
hr_dev->pdev = pdev;
platform_set_drvdata(pdev, hr_dev);
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) && if (hr_dev->hw->reset) {
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) { ret = hr_dev->hw->reset(hr_dev, true);
dev_err(dev, "Not usable DMA addressing mode\n"); if (ret) {
ret = -EIO; dev_err(dev, "Reset RoCE engine failed!\n");
goto error_failed_get_cfg; return ret;
}
} }
ret = hns_roce_get_cfg(hr_dev); if (hr_dev->hw->cmq_init) {
if (ret) { ret = hr_dev->hw->cmq_init(hr_dev);
dev_err(dev, "Get Configuration failed!\n"); if (ret) {
goto error_failed_get_cfg; dev_err(dev, "Init RoCE Command Queue failed!\n");
goto error_failed_cmq_init;
}
} }
ret = hr_dev->hw->reset(hr_dev, true); ret = hr_dev->hw->hw_profile(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Reset RoCE engine failed!\n"); dev_err(dev, "Get RoCE engine profile failed!\n");
goto error_failed_get_cfg; goto error_failed_cmd_init;
} }
hr_dev->hw->hw_profile(hr_dev);
ret = hns_roce_cmd_init(hr_dev); ret = hns_roce_cmd_init(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "cmd init failed!\n"); dev_err(dev, "cmd init failed!\n");
goto error_failed_cmd_init; goto error_failed_cmd_init;
} }
ret = hns_roce_init_eq_table(hr_dev); if (hr_dev->cmd_mod) {
if (ret) { ret = hns_roce_init_eq_table(hr_dev);
dev_err(dev, "eq init failed!\n"); if (ret) {
goto error_failed_eq_table; dev_err(dev, "eq init failed!\n");
goto error_failed_eq_table;
}
} }
if (hr_dev->cmd_mod) { if (hr_dev->cmd_mod) {
...@@ -898,10 +744,12 @@ static int hns_roce_probe(struct platform_device *pdev) ...@@ -898,10 +744,12 @@ static int hns_roce_probe(struct platform_device *pdev)
goto error_failed_setup_hca; goto error_failed_setup_hca;
} }
ret = hr_dev->hw->hw_init(hr_dev); if (hr_dev->hw->hw_init) {
if (ret) { ret = hr_dev->hw->hw_init(hr_dev);
dev_err(dev, "hw_init failed!\n"); if (ret) {
goto error_failed_engine_init; dev_err(dev, "hw_init failed!\n");
goto error_failed_engine_init;
}
} }
ret = hns_roce_register_device(hr_dev); ret = hns_roce_register_device(hr_dev);
...@@ -911,7 +759,8 @@ static int hns_roce_probe(struct platform_device *pdev) ...@@ -911,7 +759,8 @@ static int hns_roce_probe(struct platform_device *pdev)
return 0; return 0;
error_failed_register_device: error_failed_register_device:
hr_dev->hw->hw_exit(hr_dev); if (hr_dev->hw->hw_exit)
hr_dev->hw->hw_exit(hr_dev);
error_failed_engine_init: error_failed_engine_init:
hns_roce_cleanup_bitmap(hr_dev); hns_roce_cleanup_bitmap(hr_dev);
...@@ -924,58 +773,47 @@ static int hns_roce_probe(struct platform_device *pdev) ...@@ -924,58 +773,47 @@ static int hns_roce_probe(struct platform_device *pdev)
hns_roce_cmd_use_polling(hr_dev); hns_roce_cmd_use_polling(hr_dev);
error_failed_use_event: error_failed_use_event:
hns_roce_cleanup_eq_table(hr_dev); if (hr_dev->cmd_mod)
hns_roce_cleanup_eq_table(hr_dev);
error_failed_eq_table: error_failed_eq_table:
hns_roce_cmd_cleanup(hr_dev); hns_roce_cmd_cleanup(hr_dev);
error_failed_cmd_init: error_failed_cmd_init:
ret = hr_dev->hw->reset(hr_dev, false); if (hr_dev->hw->cmq_exit)
if (ret) hr_dev->hw->cmq_exit(hr_dev);
dev_err(&hr_dev->pdev->dev, "roce_engine reset fail\n");
error_failed_get_cfg: error_failed_cmq_init:
ib_dealloc_device(&hr_dev->ib_dev); if (hr_dev->hw->reset) {
ret = hr_dev->hw->reset(hr_dev, false);
if (ret)
dev_err(dev, "Dereset RoCE engine failed!\n");
}
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(hns_roce_init);
/** void hns_roce_exit(struct hns_roce_dev *hr_dev)
* hns_roce_remove - remove RoCE device
* @pdev: pointer to platform device
*/
static int hns_roce_remove(struct platform_device *pdev)
{ {
struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
hns_roce_unregister_device(hr_dev); hns_roce_unregister_device(hr_dev);
hr_dev->hw->hw_exit(hr_dev); if (hr_dev->hw->hw_exit)
hr_dev->hw->hw_exit(hr_dev);
hns_roce_cleanup_bitmap(hr_dev); hns_roce_cleanup_bitmap(hr_dev);
hns_roce_cleanup_hem(hr_dev); hns_roce_cleanup_hem(hr_dev);
if (hr_dev->cmd_mod) if (hr_dev->cmd_mod)
hns_roce_cmd_use_polling(hr_dev); hns_roce_cmd_use_polling(hr_dev);
hns_roce_cleanup_eq_table(hr_dev); if (hr_dev->cmd_mod)
hns_roce_cleanup_eq_table(hr_dev);
hns_roce_cmd_cleanup(hr_dev); hns_roce_cmd_cleanup(hr_dev);
hr_dev->hw->reset(hr_dev, false); if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
ib_dealloc_device(&hr_dev->ib_dev); if (hr_dev->hw->reset)
hr_dev->hw->reset(hr_dev, false);
return 0;
} }
EXPORT_SYMBOL_GPL(hns_roce_exit);
static struct platform_driver hns_roce_driver = {
.probe = hns_roce_probe,
.remove = hns_roce_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = hns_roce_of_match,
.acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
},
};
module_platform_driver(hns_roce_driver);
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
......
...@@ -47,6 +47,7 @@ unsigned long key_to_hw_index(u32 key) ...@@ -47,6 +47,7 @@ unsigned long key_to_hw_index(u32 key)
{ {
return (key << 24) | (key >> 8); return (key << 24) | (key >> 8);
} }
EXPORT_SYMBOL_GPL(key_to_hw_index);
static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev, static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox, struct hns_roce_cmd_mailbox *mailbox,
...@@ -65,6 +66,7 @@ int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, ...@@ -65,6 +66,7 @@ int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT, mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
} }
EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt);
static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order, static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
unsigned long *seg) unsigned long *seg)
...@@ -175,18 +177,28 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy) ...@@ -175,18 +177,28 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
} }
static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order, static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
unsigned long *seg) unsigned long *seg, u32 mtt_type)
{ {
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
int ret = 0; struct hns_roce_hem_table *table;
struct hns_roce_buddy *buddy;
int ret;
if (mtt_type == MTT_TYPE_WQE) {
buddy = &mr_table->mtt_buddy;
table = &mr_table->mtt_table;
} else {
buddy = &mr_table->mtt_cqe_buddy;
table = &mr_table->mtt_cqe_table;
}
ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg); ret = hns_roce_buddy_alloc(buddy, order, seg);
if (ret == -1) if (ret == -1)
return -1; return -1;
if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg, if (hns_roce_table_get_range(hr_dev, table, *seg,
*seg + (1 << order) - 1)) { *seg + (1 << order) - 1)) {
hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order); hns_roce_buddy_free(buddy, *seg, order);
return -1; return -1;
} }
...@@ -196,7 +208,7 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order, ...@@ -196,7 +208,7 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
struct hns_roce_mtt *mtt) struct hns_roce_mtt *mtt)
{ {
int ret = 0; int ret;
int i; int i;
/* Page num is zero, correspond to DMA memory register */ /* Page num is zero, correspond to DMA memory register */
...@@ -215,7 +227,8 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, ...@@ -215,7 +227,8 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
++mtt->order; ++mtt->order;
/* Allocate MTT entry */ /* Allocate MTT entry */
ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg); ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
mtt->mtt_type);
if (ret == -1) if (ret == -1)
return -ENOMEM; return -ENOMEM;
...@@ -229,18 +242,262 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt) ...@@ -229,18 +242,262 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
if (mtt->order < 0) if (mtt->order < 0)
return; return;
hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order); if (mtt->mtt_type == MTT_TYPE_WQE) {
hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg, hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1); mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
} else {
hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
}
}
EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int err_loop_index,
int loop_i, int loop_j)
{
struct device *dev = hr_dev->dev;
u32 mhop_num;
u32 pbl_bt_sz;
u64 bt_idx;
int i, j;
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
mhop_num = hr_dev->caps.pbl_hop_num;
i = loop_i;
j = loop_j;
if (mhop_num == 3 && err_loop_index == 2) {
for (; i >= 0; i--) {
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]);
for (j = 0; j < pbl_bt_sz / 8; j++) {
if (i == loop_i && j >= loop_j)
break;
bt_idx = i * pbl_bt_sz / 8 + j;
dma_free_coherent(dev, pbl_bt_sz,
mr->pbl_bt_l2[bt_idx],
mr->pbl_l2_dma_addr[bt_idx]);
}
}
} else if (mhop_num == 3 && err_loop_index == 1) {
for (i -= 1; i >= 0; i--) {
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]);
for (j = 0; j < pbl_bt_sz / 8; j++) {
bt_idx = i * pbl_bt_sz / 8 + j;
dma_free_coherent(dev, pbl_bt_sz,
mr->pbl_bt_l2[bt_idx],
mr->pbl_l2_dma_addr[bt_idx]);
}
}
} else if (mhop_num == 2 && err_loop_index == 1) {
for (i -= 1; i >= 0; i--)
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]);
} else {
dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
mhop_num, err_loop_index);
return;
}
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
mr->pbl_bt_l0 = NULL;
mr->pbl_l0_dma_addr = 0;
}
/* PBL multi hop addressing */
static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr)
{
struct device *dev = hr_dev->dev;
int mr_alloc_done = 0;
int npages_allocated;
int i = 0, j = 0;
u32 pbl_bt_sz;
u32 mhop_num;
u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
u64 bt_idx;
u64 size;
mhop_num = hr_dev->caps.pbl_hop_num;
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
if (mhop_num == HNS_ROCE_HOP_NUM_0)
return 0;
/* hop_num = 1 */
if (mhop_num == 1) {
if (npages > pbl_bt_sz / 8) {
dev_err(dev, "npages %d is larger than buf_pg_sz!",
npages);
return -EINVAL;
}
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
GFP_KERNEL);
if (!mr->pbl_buf)
return -ENOMEM;
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_dma_addr;
mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0;
}
mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
sizeof(*mr->pbl_l1_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l1_dma_addr)
return -ENOMEM;
mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
GFP_KERNEL);
if (!mr->pbl_bt_l1)
goto err_kcalloc_bt_l1;
if (mhop_num == 3) {
mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_l2_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l2_dma_addr)
goto err_kcalloc_l2_dma;
mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_bt_l2),
GFP_KERNEL);
if (!mr->pbl_bt_l2)
goto err_kcalloc_bt_l2;
}
/* alloc L0 BT */
mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l0_dma_addr),
GFP_KERNEL);
if (!mr->pbl_bt_l0)
goto err_dma_alloc_l0;
if (mhop_num == 2) {
/* alloc L1 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) {
if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
size = pbl_bt_sz;
} else {
npages_allocated = i * (pbl_bt_sz / 8);
size = (npages - npages_allocated) * 8;
}
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
&(mr->pbl_l1_dma_addr[i]),
GFP_KERNEL);
if (!mr->pbl_bt_l1[i]) {
hns_roce_loop_free(hr_dev, mr, 1, i, 0);
goto err_dma_alloc_l0;
}
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
pbl_bt_cnt++;
if (pbl_bt_cnt >= pbl_last_bt_num)
break;
}
} else if (mhop_num == 3) {
/* alloc L1, L2 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) {
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l1_dma_addr[i]),
GFP_KERNEL);
if (!mr->pbl_bt_l1[i]) {
hns_roce_loop_free(hr_dev, mr, 1, i, 0);
goto err_dma_alloc_l0;
}
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
for (j = 0; j < pbl_bt_sz / 8; j++) {
bt_idx = i * pbl_bt_sz / 8 + j;
if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
size = pbl_bt_sz;
} else {
npages_allocated = bt_idx *
(pbl_bt_sz / 8);
size = (npages - npages_allocated) * 8;
}
mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
dev, size,
&(mr->pbl_l2_dma_addr[bt_idx]),
GFP_KERNEL);
if (!mr->pbl_bt_l2[bt_idx]) {
hns_roce_loop_free(hr_dev, mr, 2, i, j);
goto err_dma_alloc_l0;
}
*(mr->pbl_bt_l1[i] + j) =
mr->pbl_l2_dma_addr[bt_idx];
pbl_bt_cnt++;
if (pbl_bt_cnt >= pbl_last_bt_num) {
mr_alloc_done = 1;
break;
}
}
if (mr_alloc_done)
break;
}
}
mr->l0_chunk_last_num = i + 1;
if (mhop_num == 3)
mr->l1_chunk_last_num = j + 1;
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_l0_dma_addr;
mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0;
err_dma_alloc_l0:
kfree(mr->pbl_bt_l2);
mr->pbl_bt_l2 = NULL;
err_kcalloc_bt_l2:
kfree(mr->pbl_l2_dma_addr);
mr->pbl_l2_dma_addr = NULL;
err_kcalloc_l2_dma:
kfree(mr->pbl_bt_l1);
mr->pbl_bt_l1 = NULL;
err_kcalloc_bt_l1:
kfree(mr->pbl_l1_dma_addr);
mr->pbl_l1_dma_addr = NULL;
return -ENOMEM;
} }
static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
u64 size, u32 access, int npages, u64 size, u32 access, int npages,
struct hns_roce_mr *mr) struct hns_roce_mr *mr)
{ {
struct device *dev = hr_dev->dev;
unsigned long index = 0; unsigned long index = 0;
int ret = 0; int ret = 0;
struct device *dev = &hr_dev->pdev->dev;
/* Allocate a key for mr from mr_table */ /* Allocate a key for mr from mr_table */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index); ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
...@@ -258,22 +515,117 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, ...@@ -258,22 +515,117 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->type = MR_TYPE_DMA; mr->type = MR_TYPE_DMA;
mr->pbl_buf = NULL; mr->pbl_buf = NULL;
mr->pbl_dma_addr = 0; mr->pbl_dma_addr = 0;
/* PBL multi-hop addressing parameters */
mr->pbl_bt_l2 = NULL;
mr->pbl_bt_l1 = NULL;
mr->pbl_bt_l0 = NULL;
mr->pbl_l2_dma_addr = NULL;
mr->pbl_l1_dma_addr = NULL;
mr->pbl_l0_dma_addr = 0;
} else { } else {
mr->type = MR_TYPE_MR; mr->type = MR_TYPE_MR;
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, if (!hr_dev->caps.pbl_hop_num) {
&(mr->pbl_dma_addr), mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
GFP_KERNEL); &(mr->pbl_dma_addr),
if (!mr->pbl_buf) GFP_KERNEL);
return -ENOMEM; if (!mr->pbl_buf)
return -ENOMEM;
} else {
ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
}
} }
return 0; return ret;
}
static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr)
{
struct device *dev = hr_dev->dev;
int npages_allocated;
int npages;
int i, j;
u32 pbl_bt_sz;
u32 mhop_num;
u64 bt_idx;
npages = ib_umem_page_count(mr->umem);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
mhop_num = hr_dev->caps.pbl_hop_num;
if (mhop_num == HNS_ROCE_HOP_NUM_0)
return;
/* hop_num = 1 */
if (mhop_num == 1) {
dma_free_coherent(dev, (unsigned int)(npages * 8),
mr->pbl_buf, mr->pbl_dma_addr);
return;
}
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
mr->pbl_l0_dma_addr);
if (mhop_num == 2) {
for (i = 0; i < mr->l0_chunk_last_num; i++) {
if (i == mr->l0_chunk_last_num - 1) {
npages_allocated = i * (pbl_bt_sz / 8);
dma_free_coherent(dev,
(npages - npages_allocated) * 8,
mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]);
break;
}
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]);
}
} else if (mhop_num == 3) {
for (i = 0; i < mr->l0_chunk_last_num; i++) {
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]);
for (j = 0; j < pbl_bt_sz / 8; j++) {
bt_idx = i * (pbl_bt_sz / 8) + j;
if ((i == mr->l0_chunk_last_num - 1)
&& j == mr->l1_chunk_last_num - 1) {
npages_allocated = bt_idx *
(pbl_bt_sz / 8);
dma_free_coherent(dev,
(npages - npages_allocated) * 8,
mr->pbl_bt_l2[bt_idx],
mr->pbl_l2_dma_addr[bt_idx]);
break;
}
dma_free_coherent(dev, pbl_bt_sz,
mr->pbl_bt_l2[bt_idx],
mr->pbl_l2_dma_addr[bt_idx]);
}
}
}
kfree(mr->pbl_bt_l1);
kfree(mr->pbl_l1_dma_addr);
mr->pbl_bt_l1 = NULL;
mr->pbl_l1_dma_addr = NULL;
if (mhop_num == 3) {
kfree(mr->pbl_bt_l2);
kfree(mr->pbl_l2_dma_addr);
mr->pbl_bt_l2 = NULL;
mr->pbl_l2_dma_addr = NULL;
}
} }
static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr) struct hns_roce_mr *mr)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
int npages = 0; int npages = 0;
int ret; int ret;
...@@ -286,10 +638,18 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, ...@@ -286,10 +638,18 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
if (mr->size != ~0ULL) { if (mr->size != ~0ULL) {
npages = ib_umem_page_count(mr->umem); npages = ib_umem_page_count(mr->umem);
dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
mr->pbl_dma_addr); if (!hr_dev->caps.pbl_hop_num)
dma_free_coherent(dev, (unsigned int)(npages * 8),
mr->pbl_buf, mr->pbl_dma_addr);
else
hns_roce_mhop_free(hr_dev, mr);
} }
if (mr->enabled)
hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
key_to_hw_index(mr->key));
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
key_to_hw_index(mr->key), BITMAP_NO_RR); key_to_hw_index(mr->key), BITMAP_NO_RR);
} }
...@@ -299,7 +659,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, ...@@ -299,7 +659,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
{ {
int ret; int ret;
unsigned long mtpt_idx = key_to_hw_index(mr->key); unsigned long mtpt_idx = key_to_hw_index(mr->key);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
...@@ -345,10 +705,11 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, ...@@ -345,10 +705,11 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, u32 start_index, struct hns_roce_mtt *mtt, u32 start_index,
u32 npages, u64 *page_list) u32 npages, u64 *page_list)
{ {
u32 i = 0; struct hns_roce_hem_table *table;
__le64 *mtts = NULL;
dma_addr_t dma_handle; dma_addr_t dma_handle;
__le64 *mtts;
u32 s = start_index * sizeof(u64); u32 s = start_index * sizeof(u64);
u32 i;
/* All MTTs must fit in the same page */ /* All MTTs must fit in the same page */
if (start_index / (PAGE_SIZE / sizeof(u64)) != if (start_index / (PAGE_SIZE / sizeof(u64)) !=
...@@ -358,15 +719,24 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, ...@@ -358,15 +719,24 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1)) if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
return -EINVAL; return -EINVAL;
mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table, if (mtt->mtt_type == MTT_TYPE_WQE)
table = &hr_dev->mr_table.mtt_table;
else
table = &hr_dev->mr_table.mtt_cqe_table;
mtts = hns_roce_table_find(hr_dev, table,
mtt->first_seg + s / hr_dev->caps.mtt_entry_sz, mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
&dma_handle); &dma_handle);
if (!mtts) if (!mtts)
return -ENOMEM; return -ENOMEM;
/* Save page addr, low 12 bits : 0 */ /* Save page addr, low 12 bits : 0 */
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i) {
mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT; if (!hr_dev->caps.mtt_hop_num)
mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
else
mtts[i] = cpu_to_le64(page_list[i]);
}
return 0; return 0;
} }
...@@ -400,9 +770,9 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev, ...@@ -400,9 +770,9 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct hns_roce_buf *buf) struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
{ {
u32 i = 0; u64 *page_list;
int ret = 0; int ret;
u64 *page_list = NULL; u32 i;
page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL); page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
if (!page_list) if (!page_list)
...@@ -425,7 +795,7 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, ...@@ -425,7 +795,7 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
int ret = 0; int ret;
ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap, ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
hr_dev->caps.num_mtpts, hr_dev->caps.num_mtpts,
...@@ -439,8 +809,17 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) ...@@ -439,8 +809,17 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
if (ret) if (ret)
goto err_buddy; goto err_buddy;
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
ilog2(hr_dev->caps.num_cqe_segs));
if (ret)
goto err_buddy_cqe;
}
return 0; return 0;
err_buddy_cqe:
hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
err_buddy: err_buddy:
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap); hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
return ret; return ret;
...@@ -451,13 +830,15 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev) ...@@ -451,13 +830,15 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
hns_roce_buddy_cleanup(&mr_table->mtt_buddy); hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap); hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
} }
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
{ {
int ret = 0; struct hns_roce_mr *mr;
struct hns_roce_mr *mr = NULL; int ret;
mr = kmalloc(sizeof(*mr), GFP_KERNEL); mr = kmalloc(sizeof(*mr), GFP_KERNEL);
if (mr == NULL) if (mr == NULL)
...@@ -526,16 +907,36 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, ...@@ -526,16 +907,36 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr, static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr,
struct ib_umem *umem) struct ib_umem *umem)
{ {
int i = 0;
int entry;
struct scatterlist *sg; struct scatterlist *sg;
int i = 0, j = 0;
int entry;
if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
return 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12; if (!hr_dev->caps.pbl_hop_num) {
i++; mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
i++;
} else if (hr_dev->caps.pbl_hop_num == 1) {
mr->pbl_buf[i] = sg_dma_address(sg);
i++;
} else {
if (hr_dev->caps.pbl_hop_num == 2)
mr->pbl_bt_l1[i][j] = sg_dma_address(sg);
else if (hr_dev->caps.pbl_hop_num == 3)
mr->pbl_bt_l2[i][j] = sg_dma_address(sg);
j++;
if (j >= (PAGE_SIZE / 8)) {
i++;
j = 0;
}
}
} }
/* Memory barrier */ /* Memory barrier */
...@@ -549,10 +950,12 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -549,10 +950,12 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr = NULL; struct hns_roce_mr *mr;
int ret = 0; int bt_size;
int n = 0; int ret;
int n;
int i;
mr = kmalloc(sizeof(*mr), GFP_KERNEL); mr = kmalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) if (!mr)
...@@ -573,11 +976,27 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -573,11 +976,27 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_umem; goto err_umem;
} }
if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) { if (!hr_dev->caps.pbl_hop_num) {
dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n", if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
length); dev_err(dev,
ret = -EINVAL; " MR len %lld err. MR is limited to 4G at most!\n",
goto err_umem; length);
ret = -EINVAL;
goto err_umem;
}
} else {
int pbl_size = 1;
bt_size = (1 << PAGE_SHIFT) / 8;
for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
pbl_size *= bt_size;
if (n > pbl_size) {
dev_err(dev,
" MR len %lld err. MR page num is limited to %d!\n",
length, pbl_size);
ret = -EINVAL;
goto err_umem;
}
} }
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length, ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
...@@ -585,7 +1004,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -585,7 +1004,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (ret) if (ret)
goto err_umem; goto err_umem;
ret = hns_roce_ib_umem_write_mr(mr, mr->umem); ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
if (ret) if (ret)
goto err_mr; goto err_mr;
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
*/ */
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pci.h>
#include "hns_roce_device.h" #include "hns_roce_device.h"
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
...@@ -60,7 +61,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, ...@@ -60,7 +61,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_pd *pd; struct hns_roce_pd *pd;
int ret; int ret;
...@@ -86,6 +87,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, ...@@ -86,6 +87,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
return &pd->ibpd; return &pd->ibpd;
} }
EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
int hns_roce_dealloc_pd(struct ib_pd *pd) int hns_roce_dealloc_pd(struct ib_pd *pd)
{ {
...@@ -94,6 +96,7 @@ int hns_roce_dealloc_pd(struct ib_pd *pd) ...@@ -94,6 +96,7 @@ int hns_roce_dealloc_pd(struct ib_pd *pd)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd);
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{ {
...@@ -109,12 +112,17 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) ...@@ -109,12 +112,17 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
uar->index = (uar->index - 1) % uar->index = (uar->index - 1) %
(hr_dev->caps.phy_num_uars - 1) + 1; (hr_dev->caps.phy_num_uars - 1) + 1;
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); if (!dev_is_pci(hr_dev->dev)) {
if (!res) { res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
dev_err(&hr_dev->pdev->dev, "memory resource not found!\n"); if (!res) {
return -EINVAL; dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
return -EINVAL;
}
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
} else {
uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
>> PAGE_SHIFT);
} }
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
return 0; return 0;
} }
......
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_qp *qp; struct hns_roce_qp *qp;
spin_lock(&qp_table->lock); spin_lock(&qp_table->lock);
...@@ -136,6 +136,7 @@ enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) ...@@ -136,6 +136,7 @@ enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
return HNS_ROCE_QP_NUM_STATE; return HNS_ROCE_QP_NUM_STATE;
} }
} }
EXPORT_SYMBOL_GPL(to_hns_roce_state);
static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
...@@ -153,7 +154,7 @@ static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ...@@ -153,7 +154,7 @@ static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock); spin_unlock_irq(&qp_table->lock);
if (ret) { if (ret) {
dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n"); dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
goto err_put_irrl; goto err_put_irrl;
} }
...@@ -171,7 +172,7 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ...@@ -171,7 +172,7 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
int ret; int ret;
if (!qpn) if (!qpn)
...@@ -227,6 +228,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) ...@@ -227,6 +228,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hr_qp->qpn & (hr_dev->caps.num_qps - 1)); hr_qp->qpn & (hr_dev->caps.num_qps - 1));
spin_unlock_irqrestore(&qp_table->lock, flags); spin_unlock_irqrestore(&qp_table->lock, flags);
} }
EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{ {
...@@ -241,6 +243,7 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) ...@@ -241,6 +243,7 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
} }
} }
EXPORT_SYMBOL_GPL(hns_roce_qp_free);
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
int cnt) int cnt)
...@@ -252,13 +255,14 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, ...@@ -252,13 +255,14 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR); hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
} }
EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, int is_user, int has_srq, struct ib_qp_cap *cap, int is_user, int has_srq,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
{ {
struct device *dev = hr_dev->dev;
u32 max_cnt; u32 max_cnt;
struct device *dev = &hr_dev->pdev->dev;
/* Check the validity of QP support capacity */ /* Check the validity of QP support capacity */
if (cap->max_recv_wr > hr_dev->caps.max_wqes || if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
...@@ -282,20 +286,27 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, ...@@ -282,20 +286,27 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
return -EINVAL; return -EINVAL;
} }
/* In v1 engine, parameter verification procession */ if (hr_dev->caps.min_wqes)
max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ? max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM; else
max_cnt = cap->max_recv_wr;
hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt); hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) { if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n"); dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
return -EINVAL; return -EINVAL;
} }
max_cnt = max(1U, cap->max_recv_sge); max_cnt = max(1U, cap->max_recv_sge);
hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
/* WQE is fixed for 64B */ if (hr_dev->caps.max_rq_sg <= 2)
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); hr_qp->rq.wqe_shift =
ilog2(hr_dev->caps.max_rq_desc_sz);
else
hr_qp->rq.wqe_shift =
ilog2(hr_dev->caps.max_rq_desc_sz
* hr_qp->rq.max_gs);
} }
cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt; cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
...@@ -305,32 +316,77 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, ...@@ -305,32 +316,77 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
} }
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp, struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd) struct hns_roce_ib_create_qp *ucmd)
{ {
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride); u8 max_sq_stride = ilog2(roundup_sq_stride);
u32 max_cnt;
/* Sanity check SQ size before proceeding */ /* Sanity check SQ size before proceeding */
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
ucmd->log_sq_stride > max_sq_stride || ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
dev_err(&hr_dev->pdev->dev, "check SQ size error!\n"); dev_err(hr_dev->dev, "check SQ size error!\n");
return -EINVAL;
}
if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
cap->max_send_sge);
return -EINVAL; return -EINVAL;
} }
hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride; hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
max_cnt = max(1U, cap->max_send_sge);
if (hr_dev->caps.max_sq_sg <= 2)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
if (hr_qp->sq.max_gs > 2)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
hr_qp->sge.sge_shift = 4;
/* Get buf size, SQ and RQ are aligned to page_szie */ /* Get buf size, SQ and RQ are aligned to page_szie */
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << if (hr_dev->caps.max_sq_sg <= 2) {
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) + hr_qp->rq.wqe_shift), PAGE_SIZE) +
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE); hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0; hr_qp->sq.offset = 0;
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE); hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift), PAGE_SIZE) +
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0;
if (hr_qp->sge.sge_cnt) {
hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
PAGE_SIZE);
hr_qp->rq.offset = hr_qp->sge.offset +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift),
PAGE_SIZE);
} else {
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
PAGE_SIZE);
}
}
return 0; return 0;
} }
...@@ -339,13 +395,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -339,13 +395,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
u32 max_cnt; u32 max_cnt;
int size;
if (cap->max_send_wr > hr_dev->caps.max_wqes || if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg || cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) { cap->max_inline_data > hr_dev->caps.max_sq_inline) {
dev_err(dev, "hns_roce_set_kernel_sq_size error1\n"); dev_err(dev, "SQ WR or sge or inline data error!\n");
return -EINVAL; return -EINVAL;
} }
...@@ -353,27 +410,45 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -353,27 +410,45 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq_max_wqes_per_wr = 1; hr_qp->sq_max_wqes_per_wr = 1;
hr_qp->sq_spare_wqes = 0; hr_qp->sq_spare_wqes = 0;
/* In v1 engine, parameter verification procession */ if (hr_dev->caps.min_wqes)
max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ? max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM; else
max_cnt = cap->max_send_wr;
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n"); dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
return -EINVAL; return -EINVAL;
} }
/* Get data_seg numbers */ /* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge); max_cnt = max(1U, cap->max_send_sge);
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); if (hr_dev->caps.max_sq_sg <= 2)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */ if (hr_qp->sq.max_gs > 2) {
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->rq.wqe_shift), PAGE_SIZE) + (hr_qp->sq.max_gs - 2));
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << hr_qp->sge.sge_shift = 4;
hr_qp->sq.wqe_shift), PAGE_SIZE); }
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
hr_qp->sq.offset = 0; hr_qp->sq.offset = 0;
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
hr_qp->sq.wqe_shift), PAGE_SIZE); PAGE_SIZE);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
hr_qp->sge.offset = size;
size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, PAGE_SIZE);
}
hr_qp->rq.offset = size;
size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
PAGE_SIZE);
hr_qp->buff_size = size;
/* Get wr and sge number which send */ /* Get wr and sge number which send */
cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt; cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
...@@ -391,7 +466,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -391,7 +466,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_udata *udata, unsigned long sqpn, struct ib_udata *udata, unsigned long sqpn,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_qp ucmd; struct hns_roce_ib_create_qp ucmd;
unsigned long qpn = 0; unsigned long qpn = 0;
int ret = 0; int ret = 0;
...@@ -421,7 +496,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -421,7 +496,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out; goto err_out;
} }
ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd); ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
&ucmd);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
goto err_out; goto err_out;
...@@ -436,6 +512,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -436,6 +512,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out; goto err_out;
} }
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem), ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
hr_qp->umem->page_shift, &hr_qp->mtt); hr_qp->umem->page_shift, &hr_qp->mtt);
if (ret) { if (ret) {
...@@ -472,10 +549,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -472,10 +549,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
} }
/* QP doorbell register address */ /* QP doorbell register address */
hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG + hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index; DB_REG_OFFSET * hr_dev->priv_uar.index;
hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
ROCEE_DB_OTHERS_L_0_REG +
DB_REG_OFFSET * hr_dev->priv_uar.index; DB_REG_OFFSET * hr_dev->priv_uar.index;
/* Allocate QP buf */ /* Allocate QP buf */
...@@ -486,6 +562,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -486,6 +562,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out; goto err_out;
} }
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
/* Write MTT */ /* Write MTT */
ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages, ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
hr_qp->hr_buf.page_shift, &hr_qp->mtt); hr_qp->hr_buf.page_shift, &hr_qp->mtt);
...@@ -522,7 +599,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -522,7 +599,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
} }
} }
if ((init_attr->qp_type) == IB_QPT_GSI) { if (init_attr->qp_type == IB_QPT_GSI &&
hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
/* In v1 engine, GSI QP context in RoCE engine's register */
ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_qp_alloc failed!\n"); dev_err(dev, "hns_roce_qp_alloc failed!\n");
...@@ -571,7 +650,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -571,7 +650,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_sqp *hr_sqp; struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp; struct hns_roce_qp *hr_qp;
int ret; int ret;
...@@ -629,6 +708,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -629,6 +708,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return &hr_qp->ibqp; return &hr_qp->ibqp;
} }
EXPORT_SYMBOL_GPL(hns_roce_create_qp);
int to_hr_qp_type(int qp_type) int to_hr_qp_type(int qp_type)
{ {
...@@ -647,6 +727,7 @@ int to_hr_qp_type(int qp_type) ...@@ -647,6 +727,7 @@ int to_hr_qp_type(int qp_type)
return transport_type; return transport_type;
} }
EXPORT_SYMBOL_GPL(to_hr_qp_type);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata) int attr_mask, struct ib_udata *udata)
...@@ -654,7 +735,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -654,7 +735,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state; enum ib_qp_state cur_state, new_state;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
int ret = -EINVAL; int ret = -EINVAL;
int p; int p;
enum ib_mtu active_mtu; enum ib_mtu active_mtu;
...@@ -692,7 +773,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -692,7 +773,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
if (attr->path_mtu > IB_MTU_2048 || if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
attr->path_mtu > IB_MTU_4096) ||
(hr_dev->caps.max_mtu == IB_MTU_2048 &&
attr->path_mtu > IB_MTU_2048) ||
attr->path_mtu < IB_MTU_256 || attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > active_mtu) { attr->path_mtu > active_mtu) {
dev_err(dev, "attr path_mtu(%d)invalid while modify qp", dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
...@@ -716,9 +800,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -716,9 +800,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} }
if (cur_state == new_state && cur_state == IB_QPS_RESET) { if (cur_state == new_state && cur_state == IB_QPS_RESET) {
ret = -EPERM; ret = 0;
dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
new_state);
goto out; goto out;
} }
...@@ -745,6 +827,7 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) ...@@ -745,6 +827,7 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
} }
} }
EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
...@@ -761,6 +844,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, ...@@ -761,6 +844,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
spin_unlock_irq(&recv_cq->lock); spin_unlock_irq(&recv_cq->lock);
} }
} }
EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
__be32 send_ieth(struct ib_send_wr *wr) __be32 send_ieth(struct ib_send_wr *wr)
{ {
...@@ -774,6 +858,7 @@ __be32 send_ieth(struct ib_send_wr *wr) ...@@ -774,6 +858,7 @@ __be32 send_ieth(struct ib_send_wr *wr)
return 0; return 0;
} }
} }
EXPORT_SYMBOL_GPL(send_ieth);
static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
{ {
...@@ -785,11 +870,20 @@ void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) ...@@ -785,11 +870,20 @@ void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
{ {
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
} }
EXPORT_SYMBOL_GPL(get_recv_wqe);
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
{ {
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
} }
EXPORT_SYMBOL_GPL(get_send_wqe);
void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
{
return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
(n << hr_qp->sge.sge_shift));
}
EXPORT_SYMBOL_GPL(get_send_extend_sge);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq) struct ib_cq *ib_cq)
...@@ -808,6 +902,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, ...@@ -808,6 +902,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
return cur + nreq >= hr_wq->max_post; return cur + nreq >= hr_wq->max_post;
} }
EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{ {
...@@ -823,7 +918,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) ...@@ -823,7 +918,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
hr_dev->caps.num_qps - 1, SQP_NUM, hr_dev->caps.num_qps - 1, SQP_NUM,
reserved_from_top); reserved_from_top);
if (ret) { if (ret) {
dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n", dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
ret); ret);
return ret; return ret;
} }
......
config INFINIBAND_I40IW config INFINIBAND_I40IW
tristate "Intel(R) Ethernet X722 iWARP Driver" tristate "Intel(R) Ethernet X722 iWARP Driver"
depends on INET && I40E depends on INET && I40E
depends on PCI
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
---help--- ---help---
Intel(R) Ethernet X722 iWARP Driver Intel(R) Ethernet X722 iWARP Driver
......
...@@ -1229,13 +1229,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1229,13 +1229,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
page_shift, order, access_flags); page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) { if (PTR_ERR(mr) == -EAGAIN) {
mlx5_ib_dbg(dev, "cache empty for order %d", order); mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
mr = NULL; mr = NULL;
} }
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
if (access_flags & IB_ACCESS_ON_DEMAND) { if (access_flags & IB_ACCESS_ON_DEMAND) {
err = -EINVAL; err = -EINVAL;
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB"); pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
goto error; goto error;
} }
use_umr = false; use_umr = false;
......
...@@ -1093,7 +1093,7 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) ...@@ -1093,7 +1093,7 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
rsp = &mqe->u.rsp; rsp = &mqe->u.rsp;
if (cqe_status || ext_status) { if (cqe_status || ext_status) {
pr_err("%s() cqe_status=0x%x, ext_status=0x%x,", pr_err("%s() cqe_status=0x%x, ext_status=0x%x,\n",
__func__, cqe_status, ext_status); __func__, cqe_status, ext_status);
if (rsp) { if (rsp) {
/* This is for embedded cmds. */ /* This is for embedded cmds. */
......
...@@ -658,7 +658,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp, ...@@ -658,7 +658,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
if (reset) { if (reset) {
status = ocrdma_mbx_rdma_stats(dev, true); status = ocrdma_mbx_rdma_stats(dev, true);
if (status) { if (status) {
pr_err("Failed to reset stats = %d", status); pr_err("Failed to reset stats = %d\n", status);
goto err; goto err;
} }
} }
......
config INFINIBAND_QEDR config INFINIBAND_QEDR
tristate "QLogic RoCE driver" tristate "QLogic RoCE driver"
depends on 64BIT && QEDE depends on 64BIT && QEDE
depends on PCI
select QED_LL2 select QED_LL2
select QED_RDMA select QED_RDMA
---help--- ---help---
......
config INFINIBAND_QIB config INFINIBAND_QIB
tristate "Intel PCIe HCA support" tristate "Intel PCIe HCA support"
depends on 64BIT && INFINIBAND_RDMAVT depends on 64BIT && INFINIBAND_RDMAVT
depends on PCI
---help--- ---help---
This is a low-level driver for Intel PCIe QLE InfiniBand host This is a low-level driver for Intel PCIe QLE InfiniBand host
channel adapters. This driver does not support the Intel channel adapters. This driver does not support the Intel
......
config INFINIBAND_RDMAVT config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library" tristate "RDMA verbs transport library"
depends on 64BIT depends on 64BIT
depends on PCI
select DMA_VIRT_OPS select DMA_VIRT_OPS
---help--- ---help---
This is a common software verbs provider for RDMA networks. This is a common software verbs provider for RDMA networks.
...@@ -51,7 +51,6 @@ ...@@ -51,7 +51,6 @@
#include <net/addrconf.h> #include <net/addrconf.h>
#include <linux/inetdevice.h> #include <linux/inetdevice.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include <linux/pci.h>
#define DRV_VERSION "1.0.0" #define DRV_VERSION "1.0.0"
...@@ -2312,7 +2311,8 @@ static void ipoib_add_one(struct ib_device *device) ...@@ -2312,7 +2311,8 @@ static void ipoib_add_one(struct ib_device *device)
} }
if (!count) { if (!count) {
kfree(dev_list); pr_err("Failed to init port, removing it\n");
ipoib_remove_one(device, dev_list);
return; return;
} }
......
...@@ -34,7 +34,7 @@ config LNET_SELFTEST ...@@ -34,7 +34,7 @@ config LNET_SELFTEST
config LNET_XPRT_IB config LNET_XPRT_IB
tristate "LNET infiniband support" tristate "LNET infiniband support"
depends on LNET && INFINIBAND && INFINIBAND_ADDR_TRANS depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS
default LNET && INFINIBAND default LNET && INFINIBAND
help help
This option allows the LNET users to use infiniband as an This option allows the LNET users to use infiniband as an
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册