Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
1848757c
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
161
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1848757c
编写于
9月 27, 2017
作者:
D
Doug Ledford
浏览文件
操作
浏览文件
下载
差异文件
Merge branches 'hns' and 'misc' into k.o/for-next
Signed-off-by:
N
Doug Ledford
<
dledford@redhat.com
>
上级
7ae6f2a3
400d324a
4d45b757
变更
47
展开全部
隐藏空白更改
内联
并排
Showing
47 changed file
with
6669 addition
and
922 deletion
+6669
-922
drivers/infiniband/Kconfig
drivers/infiniband/Kconfig
+0
-1
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs.h
+21
-14
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_cmd.c
+53
-72
drivers/infiniband/core/uverbs_ioctl_merge.c
drivers/infiniband/core/uverbs_ioctl_merge.c
+1
-1
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_main.c
+10
-12
drivers/infiniband/core/uverbs_std_types.c
drivers/infiniband/core/uverbs_std_types.c
+2
-1
drivers/infiniband/hw/cxgb3/Kconfig
drivers/infiniband/hw/cxgb3/Kconfig
+1
-1
drivers/infiniband/hw/cxgb3/cxio_hal.c
drivers/infiniband/hw/cxgb3/cxio_hal.c
+2
-4
drivers/infiniband/hw/cxgb4/Kconfig
drivers/infiniband/hw/cxgb4/Kconfig
+1
-1
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cm.c
+119
-125
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/cq.c
+25
-25
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/device.c
+8
-8
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/ev.c
+1
-1
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+7
-10
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/cxgb4/mem.c
+13
-13
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/provider.c
+18
-18
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/qp.c
+27
-32
drivers/infiniband/hw/cxgb4/resource.c
drivers/infiniband/hw/cxgb4/resource.c
+23
-23
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/cxgb4/t4.h
+8
-12
drivers/infiniband/hw/hns/Kconfig
drivers/infiniband/hw/hns/Kconfig
+23
-2
drivers/infiniband/hw/hns/Makefile
drivers/infiniband/hw/hns/Makefile
+7
-1
drivers/infiniband/hw/hns/hns_roce_ah.c
drivers/infiniband/hw/hns/hns_roce_ah.c
+1
-1
drivers/infiniband/hw/hns/hns_roce_alloc.c
drivers/infiniband/hw/hns/hns_roce_alloc.c
+5
-3
drivers/infiniband/hw/hns/hns_roce_cmd.c
drivers/infiniband/hw/hns/hns_roce_cmd.c
+12
-95
drivers/infiniband/hw/hns/hns_roce_cmd.h
drivers/infiniband/hw/hns/hns_roce_cmd.h
+50
-0
drivers/infiniband/hw/hns/hns_roce_common.h
drivers/infiniband/hw/hns/hns_roce_common.h
+23
-0
drivers/infiniband/hw/hns/hns_roce_cq.c
drivers/infiniband/hw/hns/hns_roce_cq.c
+45
-27
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_device.h
+88
-10
drivers/infiniband/hw/hns/hns_roce_hem.c
drivers/infiniband/hw/hns/hns_roce_hem.c
+672
-27
drivers/infiniband/hw/hns/hns_roce_hem.h
drivers/infiniband/hw/hns/hns_roce_hem.h
+30
-2
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+366
-35
drivers/infiniband/hw/hns/hns_roce_hw_v1.h
drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+5
-0
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+3128
-0
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+1165
-0
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/hns/hns_roce_main.c
+81
-243
drivers/infiniband/hw/hns/hns_roce_mr.c
drivers/infiniband/hw/hns/hns_roce_mr.c
+466
-47
drivers/infiniband/hw/hns/hns_roce_pd.c
drivers/infiniband/hw/hns/hns_roce_pd.c
+14
-6
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/hns/hns_roce_qp.c
+137
-42
drivers/infiniband/hw/i40iw/Kconfig
drivers/infiniband/hw/i40iw/Kconfig
+1
-0
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/mr.c
+2
-2
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+1
-1
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+1
-1
drivers/infiniband/hw/qedr/Kconfig
drivers/infiniband/hw/qedr/Kconfig
+1
-0
drivers/infiniband/hw/qib/Kconfig
drivers/infiniband/hw/qib/Kconfig
+1
-0
drivers/infiniband/sw/rdmavt/Kconfig
drivers/infiniband/sw/rdmavt/Kconfig
+1
-0
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
+2
-2
drivers/staging/lustre/lnet/Kconfig
drivers/staging/lustre/lnet/Kconfig
+1
-1
未找到文件。
drivers/infiniband/Kconfig
浏览文件 @
1848757c
menuconfig INFINIBAND
tristate "InfiniBand support"
depends on PCI || BROKEN
depends on HAS_IOMEM
depends on NET
depends on INET
...
...
drivers/infiniband/core/uverbs.h
浏览文件 @
1848757c
...
...
@@ -47,21 +47,28 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \
(udata)->inbuf = (const void __user *) (ibuf); \
(udata)->outbuf = (void __user *) (obuf); \
(udata)->inlen = (ilen); \
(udata)->outlen = (olen); \
} while (0)
static
inline
void
ib_uverbs_init_udata
(
struct
ib_udata
*
udata
,
const
void
__user
*
ibuf
,
void
__user
*
obuf
,
size_t
ilen
,
size_t
olen
)
{
udata
->
inbuf
=
ibuf
;
udata
->
outbuf
=
obuf
;
udata
->
inlen
=
ilen
;
udata
->
outlen
=
olen
;
}
#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
do { \
(udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
(udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
(udata)->inlen = (ilen); \
(udata)->outlen = (olen); \
} while (0)
static
inline
void
ib_uverbs_init_udata_buf_or_null
(
struct
ib_udata
*
udata
,
const
void
__user
*
ibuf
,
void
__user
*
obuf
,
size_t
ilen
,
size_t
olen
)
{
ib_uverbs_init_udata
(
udata
,
ilen
?
ibuf
:
NULL
,
olen
?
obuf
:
NULL
,
ilen
,
olen
);
}
/*
* Our lifetime rules for these structs are the following:
...
...
drivers/infiniband/core/uverbs_cmd.c
浏览文件 @
1848757c
...
...
@@ -91,8 +91,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto
err
;
}
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -141,8 +141,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto
err_fd
;
}
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
{
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
{
ret
=
-
EFAULT
;
goto
err_file
;
}
...
...
@@ -238,8 +237,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
memset
(
&
resp
,
0
,
sizeof
resp
);
copy_query_dev_fields
(
file
,
ib_dev
,
&
resp
,
&
ib_dev
->
attrs
);
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
return
-
EFAULT
;
return
in_len
;
...
...
@@ -295,8 +293,7 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
resp
.
link_layer
=
rdma_port_get_link_layer
(
ib_dev
,
cmd
.
port_num
);
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
return
-
EFAULT
;
return
in_len
;
...
...
@@ -320,8 +317,8 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
cmd
))
return
-
EFAULT
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -344,8 +341,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
memset
(
&
resp
,
0
,
sizeof
resp
);
resp
.
pd_handle
=
uobj
->
id
;
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
{
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
{
ret
=
-
EFAULT
;
goto
err_copy
;
}
...
...
@@ -490,8 +486,8 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
cmd
))
return
-
EFAULT
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -556,8 +552,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
atomic_inc
(
&
xrcd
->
usecnt
);
}
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
{
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
{
ret
=
-
EFAULT
;
goto
err_copy
;
}
...
...
@@ -655,8 +650,8 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
cmd
))
return
-
EFAULT
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -705,8 +700,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
resp
.
rkey
=
mr
->
rkey
;
resp
.
mr_handle
=
uobj
->
id
;
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
{
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
{
ret
=
-
EFAULT
;
goto
err_copy
;
}
...
...
@@ -748,8 +742,8 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
(
cmd
)))
return
-
EFAULT
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -800,8 +794,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
resp
.
lkey
=
mr
->
lkey
;
resp
.
rkey
=
mr
->
rkey
;
if
(
copy_to_user
((
void
__user
*
)(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
(
resp
)))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
(
resp
)))
ret
=
-
EFAULT
;
else
ret
=
in_len
;
...
...
@@ -867,8 +860,8 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
goto
err_free
;
}
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -889,8 +882,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
resp
.
rkey
=
mw
->
rkey
;
resp
.
mw_handle
=
uobj
->
id
;
if
(
copy_to_user
((
void
__user
*
)(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
(
resp
)))
{
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
(
resp
)))
{
ret
=
-
EFAULT
;
goto
err_copy
;
}
...
...
@@ -956,8 +948,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
uobj_file
.
uobj
);
ib_uverbs_init_event_queue
(
&
ev_file
->
ev_queue
);
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
{
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
{
uobj_alloc_abort
(
uobj
);
return
-
EFAULT
;
}
...
...
@@ -1087,10 +1078,11 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
(
cmd
)))
return
-
EFAULT
;
INIT_UDATA
(
&
ucore
,
buf
,
(
unsigned
long
)
cmd
.
response
,
sizeof
(
cmd
),
sizeof
(
resp
));
ib_uverbs_init_udata
(
&
ucore
,
buf
,
u64_to_user_ptr
(
cmd
.
response
),
sizeof
(
cmd
),
sizeof
(
resp
));
INIT_UDATA
(
&
uhw
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
uhw
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -1173,8 +1165,8 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
cmd
))
return
-
EFAULT
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -1188,8 +1180,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
resp
.
cqe
=
cq
->
cqe
;
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
.
cqe
))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
.
cqe
))
ret
=
-
EFAULT
;
out:
...
...
@@ -1249,7 +1240,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
return
-
EINVAL
;
/* we copy a struct ib_uverbs_poll_cq_resp to user space */
header_ptr
=
(
void
__user
*
)(
unsigned
long
)
cmd
.
response
;
header_ptr
=
u64_to_user_ptr
(
cmd
.
response
)
;
data_ptr
=
header_ptr
+
sizeof
resp
;
memset
(
&
resp
,
0
,
sizeof
resp
);
...
...
@@ -1343,8 +1334,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
resp
.
async_events_reported
=
obj
->
async_events_reported
;
uverbs_uobject_put
(
uobj
);
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
return
-
EFAULT
;
return
in_len
;
...
...
@@ -1650,10 +1640,10 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
(
cmd
)))
return
-
EFAULT
;
INIT_UDATA
(
&
ucore
,
buf
,
(
unsigned
long
)
cmd
.
response
,
sizeof
(
cmd
),
resp_size
);
INIT_UDATA
(
&
uhw
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
resp_size
,
ib_uverbs_init_udata
(
&
ucore
,
buf
,
u64_to_user_ptr
(
cmd
.
response
),
sizeof
(
cmd
),
resp_size
);
ib_uverbs_init_udata
(
&
uhw
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
resp_size
,
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
resp_size
);
...
...
@@ -1750,8 +1740,8 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
cmd
))
return
-
EFAULT
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -1795,8 +1785,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
resp
.
qpn
=
qp
->
qp_num
;
resp
.
qp_handle
=
obj
->
uevent
.
uobject
.
id
;
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
{
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
{
ret
=
-
EFAULT
;
goto
err_destroy
;
}
...
...
@@ -1911,8 +1900,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
resp
.
max_inline_data
=
init_attr
->
cap
.
max_inline_data
;
resp
.
sq_sig_all
=
init_attr
->
sq_sig_type
==
IB_SIGNAL_ALL_WR
;
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
ret
=
-
EFAULT
;
out:
...
...
@@ -2042,7 +2030,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
~
((
IB_USER_LEGACY_LAST_QP_ATTR_MASK
<<
1
)
-
1
))
return
-
EOPNOTSUPP
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
.
base
),
NULL
,
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
.
base
),
NULL
,
in_len
-
sizeof
(
cmd
.
base
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
);
...
...
@@ -2126,8 +2114,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
resp
.
events_reported
=
obj
->
uevent
.
events_reported
;
uverbs_uobject_put
(
uobj
);
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
return
-
EFAULT
;
return
in_len
;
...
...
@@ -2311,8 +2298,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
break
;
}
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
ret
=
-
EFAULT
;
out_put:
...
...
@@ -2460,8 +2446,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
}
}
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
ret
=
-
EFAULT
;
out:
...
...
@@ -2510,8 +2495,7 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
break
;
}
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
ret
=
-
EFAULT
;
out:
...
...
@@ -2548,8 +2532,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if
(
!
rdma_is_port_valid
(
ib_dev
,
cmd
.
attr
.
port_num
))
return
-
EINVAL
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -2600,8 +2584,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
resp
.
ah_handle
=
uobj
->
id
;
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
{
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
{
ret
=
-
EFAULT
;
goto
err_copy
;
}
...
...
@@ -3627,8 +3610,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
xcmd
.
max_sge
=
cmd
.
max_sge
;
xcmd
.
srq_limit
=
cmd
.
srq_limit
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -3654,8 +3637,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
cmd
))
return
-
EFAULT
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
(
cmd
),
u64_to_user_ptr
(
cmd
.
response
)
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
)
-
sizeof
(
struct
ib_uverbs_cmd_hdr
),
out_len
-
sizeof
(
resp
));
...
...
@@ -3680,7 +3663,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
cmd
))
return
-
EFAULT
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
cmd
,
NULL
,
in_len
-
sizeof
cmd
,
ib_uverbs_init_udata
(
&
udata
,
buf
+
sizeof
cmd
,
NULL
,
in_len
-
sizeof
cmd
,
out_len
);
srq
=
uobj_get_obj_read
(
srq
,
cmd
.
srq_handle
,
file
->
ucontext
);
...
...
@@ -3731,8 +3714,7 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
resp
.
max_sge
=
attr
.
max_sge
;
resp
.
srq_limit
=
attr
.
srq_limit
;
if
(
copy_to_user
((
void
__user
*
)
(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
resp
))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
resp
))
return
-
EFAULT
;
return
in_len
;
...
...
@@ -3773,8 +3755,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
}
resp
.
events_reported
=
obj
->
events_reported
;
uverbs_uobject_put
(
uobj
);
if
(
copy_to_user
((
void
__user
*
)(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
(
resp
)))
if
(
copy_to_user
(
u64_to_user_ptr
(
cmd
.
response
),
&
resp
,
sizeof
(
resp
)))
return
-
EFAULT
;
return
in_len
;
...
...
drivers/infiniband/core/uverbs_ioctl_merge.c
浏览文件 @
1848757c
...
...
@@ -376,7 +376,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
min_id
)
||
WARN
(
attr_obj_with_special_access
&&
!
(
attr
->
flags
&
UVERBS_ATTR_SPEC_F_MANDATORY
),
"ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy ac
e
ess but isn't mandatory
\n
"
,
"ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy ac
c
ess but isn't mandatory
\n
"
,
min_id
)
||
WARN
(
IS_ATTR_OBJECT
(
attr
)
&&
attr
->
flags
&
UVERBS_ATTR_SPEC_F_MIN_SZ
,
...
...
drivers/infiniband/core/uverbs_main.c
浏览文件 @
1848757c
...
...
@@ -763,7 +763,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
}
if
(
!
access_ok
(
VERIFY_WRITE
,
(
void
__user
*
)
(
unsigned
long
)
ex_hdr
.
response
,
u64_to_user_ptr
(
ex_hdr
.
response
)
,
(
hdr
.
out_words
+
ex_hdr
.
provider_out_words
)
*
8
))
{
ret
=
-
EFAULT
;
goto
out
;
...
...
@@ -775,19 +775,17 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
}
}
INIT_UDATA_BUF_OR_NULL
(
&
ucore
,
buf
,
(
unsigned
long
)
ex_hdr
.
response
,
hdr
.
in_words
*
8
,
hdr
.
out_words
*
8
);
ib_uverbs_init_udata_buf_or_null
(
&
ucore
,
buf
,
u64_to_user_ptr
(
ex_hdr
.
response
),
hdr
.
in_words
*
8
,
hdr
.
out_words
*
8
);
INIT_UDATA_BUF_OR_NULL
(
&
uhw
,
buf
+
ucore
.
inlen
,
(
unsigned
long
)
ex_hdr
.
response
+
ucore
.
outlen
,
ex_hdr
.
provider_in_words
*
8
,
ex_hdr
.
provider_out_words
*
8
);
ib_uverbs_init_udata_buf_or_null
(
&
uhw
,
buf
+
ucore
.
inlen
,
u64_to_user_ptr
(
ex_hdr
.
response
)
+
ucore
.
outlen
,
ex_hdr
.
provider_in_words
*
8
,
ex_hdr
.
provider_out_words
*
8
);
ret
=
uverbs_ex_cmd_table
[
command
](
file
,
ib_dev
,
&
ucore
,
&
uhw
);
ret
=
uverbs_ex_cmd_table
[
command
](
file
,
ib_dev
,
&
ucore
,
&
uhw
);
if
(
!
ret
)
ret
=
written_count
;
}
else
{
...
...
drivers/infiniband/core/uverbs_std_types.c
浏览文件 @
1848757c
...
...
@@ -246,7 +246,8 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
outbuf_len
=
uhw_out
->
ptr_attr
.
len
;
}
INIT_UDATA_BUF_OR_NULL
(
udata
,
inbuf
,
outbuf
,
inbuf_len
,
outbuf_len
);
ib_uverbs_init_udata_buf_or_null
(
udata
,
inbuf
,
outbuf
,
inbuf_len
,
outbuf_len
);
}
static
int
uverbs_create_cq_handler
(
struct
ib_device
*
ib_dev
,
...
...
drivers/infiniband/hw/cxgb3/Kconfig
浏览文件 @
1848757c
config INFINIBAND_CXGB3
tristate "Chelsio RDMA Driver"
depends on CHELSIO_T3
&& INET
depends on CHELSIO_T3
select GENERIC_ALLOCATOR
---help---
This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
...
...
drivers/infiniband/hw/cxgb3/cxio_hal.c
浏览文件 @
1848757c
...
...
@@ -404,12 +404,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
int
cxio_flush_sq
(
struct
t3_wq
*
wq
,
struct
t3_cq
*
cq
,
int
count
)
{
__u32
ptr
;
__u32
ptr
=
wq
->
sq_rptr
+
count
;
int
flushed
=
0
;
struct
t3_swsq
*
sqp
=
wq
->
sq
+
Q_PTR2IDX
(
wq
->
sq_r
ptr
,
wq
->
sq_size_log2
);
struct
t3_swsq
*
sqp
=
wq
->
sq
+
Q_PTR2IDX
(
ptr
,
wq
->
sq_size_log2
);
ptr
=
wq
->
sq_rptr
+
count
;
sqp
=
wq
->
sq
+
Q_PTR2IDX
(
ptr
,
wq
->
sq_size_log2
);
while
(
ptr
!=
wq
->
sq_wptr
)
{
sqp
->
signaled
=
0
;
insert_sq_cqe
(
wq
,
cq
,
sqp
);
...
...
drivers/infiniband/hw/cxgb4/Kconfig
浏览文件 @
1848757c
config INFINIBAND_CXGB4
tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET
&& (IPV6 || IPV6=n)
depends on CHELSIO_T4 && INET
select CHELSIO_LIB
select GENERIC_ALLOCATOR
---help---
...
...
drivers/infiniband/hw/cxgb4/cm.c
浏览文件 @
1848757c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/cxgb4/cq.c
浏览文件 @
1848757c
...
...
@@ -144,7 +144,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
ret
=
c4iw_ofld_send
(
rdev
,
skb
);
if
(
ret
)
goto
err4
;
pr_debug
(
"
%s wait_event wr_wait %p
\n
"
,
__func__
,
&
wr_wait
);
pr_debug
(
"
wait_event wr_wait %p
\n
"
,
&
wr_wait
);
ret
=
c4iw_wait_for_reply
(
rdev
,
&
wr_wait
,
0
,
0
,
__func__
);
if
(
ret
)
goto
err4
;
...
...
@@ -178,7 +178,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
{
struct
t4_cqe
cqe
;
pr_debug
(
"
%s wq %p cq %p sw_cidx %u sw_pidx %u
\n
"
,
__func__
,
pr_debug
(
"
wq %p cq %p sw_cidx %u sw_pidx %u
\n
"
,
wq
,
cq
,
cq
->
sw_cidx
,
cq
->
sw_pidx
);
memset
(
&
cqe
,
0
,
sizeof
(
cqe
));
cqe
.
header
=
cpu_to_be32
(
CQE_STATUS_V
(
T4_ERR_SWFLUSH
)
|
...
...
@@ -197,7 +197,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
int
in_use
=
wq
->
rq
.
in_use
-
count
;
BUG_ON
(
in_use
<
0
);
pr_debug
(
"
%s wq %p cq %p rq.in_use %u skip count %u
\n
"
,
__func__
,
pr_debug
(
"
wq %p cq %p rq.in_use %u skip count %u
\n
"
,
wq
,
cq
,
wq
->
rq
.
in_use
,
count
);
while
(
in_use
--
)
{
insert_recv_cqe
(
wq
,
cq
);
...
...
@@ -211,7 +211,7 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
{
struct
t4_cqe
cqe
;
pr_debug
(
"
%s wq %p cq %p sw_cidx %u sw_pidx %u
\n
"
,
__func__
,
pr_debug
(
"
wq %p cq %p sw_cidx %u sw_pidx %u
\n
"
,
wq
,
cq
,
cq
->
sw_cidx
,
cq
->
sw_pidx
);
memset
(
&
cqe
,
0
,
sizeof
(
cqe
));
cqe
.
header
=
cpu_to_be32
(
CQE_STATUS_V
(
T4_ERR_SWFLUSH
)
|
...
...
@@ -281,8 +281,8 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
/*
* Insert this completed cqe into the swcq.
*/
pr_debug
(
"
%s
moving cqe into swcq sq idx %u cq idx %u
\n
"
,
__func__
,
cidx
,
cq
->
sw_pidx
);
pr_debug
(
"moving cqe into swcq sq idx %u cq idx %u
\n
"
,
cidx
,
cq
->
sw_pidx
);
swsqe
->
cqe
.
header
|=
htonl
(
CQE_SWCQE_V
(
1
));
cq
->
sw_queue
[
cq
->
sw_pidx
]
=
swsqe
->
cqe
;
t4_swcq_produce
(
cq
);
...
...
@@ -337,7 +337,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
struct
t4_swsqe
*
swsqe
;
int
ret
;
pr_debug
(
"
%s cqid 0x%x
\n
"
,
__func__
,
chp
->
cq
.
cqid
);
pr_debug
(
"
cqid 0x%x
\n
"
,
chp
->
cq
.
cqid
);
ret
=
t4_next_hw_cqe
(
&
chp
->
cq
,
&
hw_cqe
);
/*
...
...
@@ -430,7 +430,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
u32
ptr
;
*
count
=
0
;
pr_debug
(
"
%s count zero %d
\n
"
,
__func__
,
*
count
);
pr_debug
(
"
count zero %d
\n
"
,
*
count
);
ptr
=
cq
->
sw_cidx
;
while
(
ptr
!=
cq
->
sw_pidx
)
{
cqe
=
&
cq
->
sw_queue
[
ptr
];
...
...
@@ -440,7 +440,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
if
(
++
ptr
==
cq
->
size
)
ptr
=
0
;
}
pr_debug
(
"
%s cq %p count %d
\n
"
,
__func__
,
cq
,
*
count
);
pr_debug
(
"
cq %p count %d
\n
"
,
cq
,
*
count
);
}
/*
...
...
@@ -471,8 +471,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if
(
ret
)
return
ret
;
pr_debug
(
"
%s
CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x
\n
"
,
__func__
,
CQE_OVFBIT
(
hw_cqe
),
CQE_QPID
(
hw_cqe
),
pr_debug
(
"CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x
\n
"
,
CQE_OVFBIT
(
hw_cqe
),
CQE_QPID
(
hw_cqe
),
CQE_GENBIT
(
hw_cqe
),
CQE_TYPE
(
hw_cqe
),
CQE_STATUS
(
hw_cqe
),
CQE_OPCODE
(
hw_cqe
),
CQE_LEN
(
hw_cqe
),
CQE_WRID_HI
(
hw_cqe
),
CQE_WRID_LOW
(
hw_cqe
));
...
...
@@ -603,8 +603,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if
(
!
SW_CQE
(
hw_cqe
)
&&
(
CQE_WRID_SQ_IDX
(
hw_cqe
)
!=
wq
->
sq
.
cidx
))
{
struct
t4_swsqe
*
swsqe
;
pr_debug
(
"
%s
out of order completion going in sw_sq at idx %u
\n
"
,
__func__
,
CQE_WRID_SQ_IDX
(
hw_cqe
));
pr_debug
(
"out of order completion going in sw_sq at idx %u
\n
"
,
CQE_WRID_SQ_IDX
(
hw_cqe
));
swsqe
=
&
wq
->
sq
.
sw_sq
[
CQE_WRID_SQ_IDX
(
hw_cqe
)];
swsqe
->
cqe
=
*
hw_cqe
;
swsqe
->
complete
=
1
;
...
...
@@ -638,13 +638,13 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
BUG_ON
(
wq
->
sq
.
in_use
<=
0
&&
wq
->
sq
.
in_use
>=
wq
->
sq
.
size
);
wq
->
sq
.
cidx
=
(
uint16_t
)
idx
;
pr_debug
(
"
%s completing sq idx %u
\n
"
,
__func__
,
wq
->
sq
.
cidx
);
pr_debug
(
"
completing sq idx %u
\n
"
,
wq
->
sq
.
cidx
);
*
cookie
=
wq
->
sq
.
sw_sq
[
wq
->
sq
.
cidx
].
wr_id
;
if
(
c4iw_wr_log
)
c4iw_log_wr_stats
(
wq
,
hw_cqe
);
t4_sq_consume
(
wq
);
}
else
{
pr_debug
(
"
%s completing rq idx %u
\n
"
,
__func__
,
wq
->
rq
.
cidx
);
pr_debug
(
"
completing rq idx %u
\n
"
,
wq
->
rq
.
cidx
);
*
cookie
=
wq
->
rq
.
sw_rq
[
wq
->
rq
.
cidx
].
wr_id
;
BUG_ON
(
t4_rq_empty
(
wq
));
if
(
c4iw_wr_log
)
...
...
@@ -661,12 +661,12 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
skip_cqe:
if
(
SW_CQE
(
hw_cqe
))
{
pr_debug
(
"
%s
cq %p cqid 0x%x skip sw cqe cidx %u
\n
"
,
__func__
,
cq
,
cq
->
cqid
,
cq
->
sw_cidx
);
pr_debug
(
"cq %p cqid 0x%x skip sw cqe cidx %u
\n
"
,
cq
,
cq
->
cqid
,
cq
->
sw_cidx
);
t4_swcq_consume
(
cq
);
}
else
{
pr_debug
(
"
%s
cq %p cqid 0x%x skip hw cqe cidx %u
\n
"
,
__func__
,
cq
,
cq
->
cqid
,
cq
->
cidx
);
pr_debug
(
"cq %p cqid 0x%x skip hw cqe cidx %u
\n
"
,
cq
,
cq
->
cqid
,
cq
->
cidx
);
t4_hwcq_consume
(
cq
);
}
return
ret
;
...
...
@@ -712,8 +712,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc
->
vendor_err
=
CQE_STATUS
(
&
cqe
);
wc
->
wc_flags
=
0
;
pr_debug
(
"
%s
qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx
\n
"
,
__func__
,
CQE_QPID
(
&
cqe
),
pr_debug
(
"qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx
\n
"
,
CQE_QPID
(
&
cqe
),
CQE_TYPE
(
&
cqe
),
CQE_OPCODE
(
&
cqe
),
CQE_STATUS
(
&
cqe
),
CQE_LEN
(
&
cqe
),
CQE_WRID_HI
(
&
cqe
),
CQE_WRID_LOW
(
&
cqe
),
...
...
@@ -857,7 +857,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
struct
c4iw_cq
*
chp
;
struct
c4iw_ucontext
*
ucontext
;
pr_debug
(
"
%s ib_cq %p
\n
"
,
__func__
,
ib_cq
);
pr_debug
(
"
ib_cq %p
\n
"
,
ib_cq
);
chp
=
to_c4iw_cq
(
ib_cq
);
remove_handle
(
chp
->
rhp
,
&
chp
->
rhp
->
cqidr
,
chp
->
cq
.
cqid
);
...
...
@@ -889,7 +889,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
size_t
memsize
,
hwentries
;
struct
c4iw_mm_entry
*
mm
,
*
mm2
;
pr_debug
(
"
%s ib_dev %p entries %d
\n
"
,
__func__
,
ibdev
,
entries
);
pr_debug
(
"
ib_dev %p entries %d
\n
"
,
ibdev
,
entries
);
if
(
attr
->
flags
)
return
ERR_PTR
(
-
EINVAL
);
...
...
@@ -996,8 +996,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
mm2
->
len
=
PAGE_SIZE
;
insert_mmap
(
ucontext
,
mm2
);
}
pr_debug
(
"
%s
cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx
\n
"
,
__func__
,
chp
->
cq
.
cqid
,
chp
,
chp
->
cq
.
size
,
pr_debug
(
"cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx
\n
"
,
chp
->
cq
.
cqid
,
chp
,
chp
->
cq
.
size
,
chp
->
cq
.
memsize
,
(
unsigned
long
long
)
chp
->
cq
.
dma_addr
);
return
&
chp
->
ibcq
;
err6:
...
...
drivers/infiniband/hw/cxgb4/device.c
浏览文件 @
1848757c
...
...
@@ -811,8 +811,8 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev
->
qpmask
=
rdev
->
lldi
.
udb_density
-
1
;
rdev
->
cqmask
=
rdev
->
lldi
.
ucq_density
-
1
;
pr_debug
(
"
%s
dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u
\n
"
,
__func__
,
pci_name
(
rdev
->
lldi
.
pdev
),
rdev
->
lldi
.
vr
->
stag
.
start
,
pr_debug
(
"dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u
\n
"
,
pci_name
(
rdev
->
lldi
.
pdev
),
rdev
->
lldi
.
vr
->
stag
.
start
,
rdev
->
lldi
.
vr
->
stag
.
size
,
c4iw_num_stags
(
rdev
),
rdev
->
lldi
.
vr
->
pbl
.
start
,
rdev
->
lldi
.
vr
->
pbl
.
size
,
rdev
->
lldi
.
vr
->
rq
.
start
,
...
...
@@ -935,7 +935,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
static
void
c4iw_remove
(
struct
uld_ctx
*
ctx
)
{
pr_debug
(
"
%s c4iw_dev %p
\n
"
,
__func__
,
ctx
->
dev
);
pr_debug
(
"
c4iw_dev %p
\n
"
,
ctx
->
dev
);
c4iw_unregister_device
(
ctx
->
dev
);
c4iw_dealloc
(
ctx
);
}
...
...
@@ -969,8 +969,8 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp
->
rdev
.
lldi
=
*
infop
;
/* init various hw-queue params based on lld info */
pr_debug
(
"
%s:
Ing. padding boundary is %d, egrsstatuspagesize = %d
\n
"
,
__func__
,
devp
->
rdev
.
lldi
.
sge_ingpadboundary
,
pr_debug
(
"Ing. padding boundary is %d, egrsstatuspagesize = %d
\n
"
,
devp
->
rdev
.
lldi
.
sge_ingpadboundary
,
devp
->
rdev
.
lldi
.
sge_egrstatuspagesize
);
devp
->
rdev
.
hw_queue
.
t4_eq_status_entries
=
...
...
@@ -1069,8 +1069,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
}
ctx
->
lldi
=
*
infop
;
pr_debug
(
"
%s
found device %s nchan %u nrxq %u ntxq %u nports %u
\n
"
,
__func__
,
pci_name
(
ctx
->
lldi
.
pdev
),
pr_debug
(
"found device %s nchan %u nrxq %u ntxq %u nports %u
\n
"
,
pci_name
(
ctx
->
lldi
.
pdev
),
ctx
->
lldi
.
nchan
,
ctx
->
lldi
.
nrxq
,
ctx
->
lldi
.
ntxq
,
ctx
->
lldi
.
nports
);
...
...
@@ -1203,7 +1203,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
{
struct
uld_ctx
*
ctx
=
handle
;
pr_debug
(
"
%s new_state %u
\n
"
,
__func__
,
new_state
);
pr_debug
(
"
new_state %u
\n
"
,
new_state
);
switch
(
new_state
)
{
case
CXGB4_STATE_UP
:
pr_info
(
"%s: Up
\n
"
,
pci_name
(
ctx
->
lldi
.
pdev
));
...
...
drivers/infiniband/hw/cxgb4/ev.c
浏览文件 @
1848757c
...
...
@@ -234,7 +234,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
if
(
atomic_dec_and_test
(
&
chp
->
refcnt
))
wake_up
(
&
chp
->
wait
);
}
else
{
pr_
debug
(
"%s unknown cqid 0x%x
\n
"
,
__func__
,
qid
);
pr_
warn
(
"%s unknown cqid 0x%x
\n
"
,
__func__
,
qid
);
spin_unlock_irqrestore
(
&
dev
->
lock
,
flag
);
}
return
0
;
...
...
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
浏览文件 @
1848757c
...
...
@@ -230,8 +230,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
ret
=
wait_for_completion_timeout
(
&
wr_waitp
->
completion
,
C4IW_WR_TO
);
if
(
!
ret
)
{
pr_
debug
(
"%s - Device %s not responding (disabling device) - tid %u qpid %u
\n
"
,
func
,
pci_name
(
rdev
->
lldi
.
pdev
),
hwtid
,
qpid
);
pr_
err
(
"%s - Device %s not responding (disabling device) - tid %u qpid %u
\n
"
,
func
,
pci_name
(
rdev
->
lldi
.
pdev
),
hwtid
,
qpid
);
rdev
->
flags
|=
T4_FATAL_ERROR
;
wr_waitp
->
ret
=
-
EIO
;
}
...
...
@@ -537,8 +537,7 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
if
(
mm
->
key
==
key
&&
mm
->
len
==
len
)
{
list_del_init
(
&
mm
->
entry
);
spin_unlock
(
&
ucontext
->
mmap_lock
);
pr_debug
(
"%s key 0x%x addr 0x%llx len %d
\n
"
,
__func__
,
key
,
pr_debug
(
"key 0x%x addr 0x%llx len %d
\n
"
,
key
,
(
unsigned
long
long
)
mm
->
addr
,
mm
->
len
);
return
mm
;
}
...
...
@@ -551,8 +550,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext,
struct
c4iw_mm_entry
*
mm
)
{
spin_lock
(
&
ucontext
->
mmap_lock
);
pr_debug
(
"
%s
key 0x%x addr 0x%llx len %d
\n
"
,
__func__
,
mm
->
key
,
(
unsigned
long
long
)
mm
->
addr
,
mm
->
len
);
pr_debug
(
"key 0x%x addr 0x%llx len %d
\n
"
,
mm
->
key
,
(
unsigned
long
long
)
mm
->
addr
,
mm
->
len
);
list_add_tail
(
&
mm
->
entry
,
&
ucontext
->
mmaps
);
spin_unlock
(
&
ucontext
->
mmap_lock
);
}
...
...
@@ -671,16 +670,14 @@ enum c4iw_mmid_state {
#define MPA_V2_IRD_ORD_MASK 0x3FFF
#define c4iw_put_ep(ep) { \
pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \
__func__, __LINE__, \
pr_debug("put_ep ep %p refcnt %d\n", \
ep, kref_read(&((ep)->kref))); \
WARN_ON(kref_read(&((ep)->kref)) < 1); \
kref_put(&((ep)->kref), _c4iw_free_ep); \
}
#define c4iw_get_ep(ep) { \
pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \
__func__, __LINE__, \
pr_debug("get_ep ep %p, refcnt %d\n", \
ep, kref_read(&((ep)->kref))); \
kref_get(&((ep)->kref)); \
}
...
...
drivers/infiniband/hw/cxgb4/mem.c
浏览文件 @
1848757c
...
...
@@ -124,7 +124,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
cmd
|=
cpu_to_be32
(
T5_ULP_MEMIO_IMM_F
);
addr
&=
0x7FFFFFF
;
pr_debug
(
"
%s addr 0x%x len %u
\n
"
,
__func__
,
addr
,
len
);
pr_debug
(
"
addr 0x%x len %u
\n
"
,
addr
,
len
);
num_wqe
=
DIV_ROUND_UP
(
len
,
C4IW_MAX_INLINE_SIZE
);
c4iw_init_wr_wait
(
&
wr_wait
);
for
(
i
=
0
;
i
<
num_wqe
;
i
++
)
{
...
...
@@ -285,8 +285,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
mutex_unlock
(
&
rdev
->
stats
.
lock
);
*
stag
=
(
stag_idx
<<
8
)
|
(
atomic_inc_return
(
&
key
)
&
0xff
);
}
pr_debug
(
"
%s
stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x
\n
"
,
__func__
,
stag_state
,
type
,
pdid
,
stag_idx
);
pr_debug
(
"stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x
\n
"
,
stag_state
,
type
,
pdid
,
stag_idx
);
/* write TPT entry */
if
(
reset_tpt_entry
)
...
...
@@ -327,8 +327,8 @@ static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
{
int
err
;
pr_debug
(
"
%s
*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d
\n
"
,
__func__
,
pbl_addr
,
rdev
->
lldi
.
vr
->
pbl
.
start
,
pr_debug
(
"*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d
\n
"
,
pbl_addr
,
rdev
->
lldi
.
vr
->
pbl
.
start
,
pbl_size
);
err
=
write_adapter_mem
(
rdev
,
pbl_addr
>>
5
,
pbl_size
<<
3
,
pbl
,
NULL
);
...
...
@@ -372,7 +372,7 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
mhp
->
attr
.
stag
=
stag
;
mmid
=
stag
>>
8
;
mhp
->
ibmr
.
rkey
=
mhp
->
ibmr
.
lkey
=
stag
;
pr_debug
(
"
%s mmid 0x%x mhp %p
\n
"
,
__func__
,
mmid
,
mhp
);
pr_debug
(
"
mmid 0x%x mhp %p
\n
"
,
mmid
,
mhp
);
return
insert_handle
(
mhp
->
rhp
,
&
mhp
->
rhp
->
mmidr
,
mhp
,
mmid
);
}
...
...
@@ -422,7 +422,7 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
int
ret
;
u32
stag
=
T4_STAG_UNSET
;
pr_debug
(
"
%s ib_pd %p
\n
"
,
__func__
,
pd
);
pr_debug
(
"
ib_pd %p
\n
"
,
pd
);
php
=
to_c4iw_pd
(
pd
);
rhp
=
php
->
rhp
;
...
...
@@ -479,7 +479,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct
c4iw_pd
*
php
;
struct
c4iw_mr
*
mhp
;
pr_debug
(
"
%s ib_pd %p
\n
"
,
__func__
,
pd
);
pr_debug
(
"
ib_pd %p
\n
"
,
pd
);
if
(
length
==
~
0ULL
)
return
ERR_PTR
(
-
EINVAL
);
...
...
@@ -616,7 +616,7 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ret
=
-
ENOMEM
;
goto
dealloc_win
;
}
pr_debug
(
"
%s mmid 0x%x mhp %p stag 0x%x
\n
"
,
__func__
,
mmid
,
mhp
,
stag
);
pr_debug
(
"
mmid 0x%x mhp %p stag 0x%x
\n
"
,
mmid
,
mhp
,
stag
);
return
&
(
mhp
->
ibmw
);
dealloc_win:
...
...
@@ -641,7 +641,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
deallocate_window
(
&
rhp
->
rdev
,
mhp
->
attr
.
stag
,
mhp
->
dereg_skb
);
kfree_skb
(
mhp
->
dereg_skb
);
kfree
(
mhp
);
pr_debug
(
"
%s ib_mw %p mmid 0x%x ptr %p
\n
"
,
__func__
,
mw
,
mmid
,
mhp
);
pr_debug
(
"
ib_mw %p mmid 0x%x ptr %p
\n
"
,
mw
,
mmid
,
mhp
);
return
0
;
}
...
...
@@ -699,7 +699,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
goto
err3
;
}
pr_debug
(
"
%s mmid 0x%x mhp %p stag 0x%x
\n
"
,
__func__
,
mmid
,
mhp
,
stag
);
pr_debug
(
"
mmid 0x%x mhp %p stag 0x%x
\n
"
,
mmid
,
mhp
,
stag
);
return
&
(
mhp
->
ibmr
);
err3:
dereg_mem
(
&
rhp
->
rdev
,
stag
,
mhp
->
attr
.
pbl_size
,
...
...
@@ -744,7 +744,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
struct
c4iw_mr
*
mhp
;
u32
mmid
;
pr_debug
(
"
%s ib_mr %p
\n
"
,
__func__
,
ib_mr
);
pr_debug
(
"
ib_mr %p
\n
"
,
ib_mr
);
mhp
=
to_c4iw_mr
(
ib_mr
);
rhp
=
mhp
->
rhp
;
...
...
@@ -762,7 +762,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
kfree
((
void
*
)
(
unsigned
long
)
mhp
->
kva
);
if
(
mhp
->
umem
)
ib_umem_release
(
mhp
->
umem
);
pr_debug
(
"
%s mmid 0x%x ptr %p
\n
"
,
__func__
,
mmid
,
mhp
);
pr_debug
(
"
mmid 0x%x ptr %p
\n
"
,
mmid
,
mhp
);
kfree
(
mhp
);
return
0
;
}
...
...
drivers/infiniband/hw/cxgb4/provider.c
浏览文件 @
1848757c
...
...
@@ -102,7 +102,7 @@ void _c4iw_free_ucontext(struct kref *kref)
ucontext
=
container_of
(
kref
,
struct
c4iw_ucontext
,
kref
);
rhp
=
to_c4iw_dev
(
ucontext
->
ibucontext
.
device
);
pr_debug
(
"
%s ucontext %p
\n
"
,
__func__
,
ucontext
);
pr_debug
(
"
ucontext %p
\n
"
,
ucontext
);
list_for_each_entry_safe
(
mm
,
tmp
,
&
ucontext
->
mmaps
,
entry
)
kfree
(
mm
);
c4iw_release_dev_ucontext
(
&
rhp
->
rdev
,
&
ucontext
->
uctx
);
...
...
@@ -113,7 +113,7 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
struct
c4iw_ucontext
*
ucontext
=
to_c4iw_ucontext
(
context
);
pr_debug
(
"
%s context %p
\n
"
,
__func__
,
context
);
pr_debug
(
"
context %p
\n
"
,
context
);
c4iw_put_ucontext
(
ucontext
);
return
0
;
}
...
...
@@ -127,7 +127,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
int
ret
=
0
;
struct
c4iw_mm_entry
*
mm
=
NULL
;
pr_debug
(
"
%s ibdev %p
\n
"
,
__func__
,
ibdev
);
pr_debug
(
"
ibdev %p
\n
"
,
ibdev
);
context
=
kzalloc
(
sizeof
(
*
context
),
GFP_KERNEL
);
if
(
!
context
)
{
ret
=
-
ENOMEM
;
...
...
@@ -185,7 +185,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
struct
c4iw_ucontext
*
ucontext
;
u64
addr
;
pr_debug
(
"
%s pgoff 0x%lx key 0x%x len %d
\n
"
,
__func__
,
vma
->
vm_pgoff
,
pr_debug
(
"
pgoff 0x%lx key 0x%x len %d
\n
"
,
vma
->
vm_pgoff
,
key
,
len
);
if
(
vma
->
vm_start
&
(
PAGE_SIZE
-
1
))
...
...
@@ -251,7 +251,7 @@ static int c4iw_deallocate_pd(struct ib_pd *pd)
php
=
to_c4iw_pd
(
pd
);
rhp
=
php
->
rhp
;
pr_debug
(
"
%s ibpd %p pdid 0x%x
\n
"
,
__func__
,
pd
,
php
->
pdid
);
pr_debug
(
"
ibpd %p pdid 0x%x
\n
"
,
pd
,
php
->
pdid
);
c4iw_put_resource
(
&
rhp
->
rdev
.
resource
.
pdid_table
,
php
->
pdid
);
mutex_lock
(
&
rhp
->
rdev
.
stats
.
lock
);
rhp
->
rdev
.
stats
.
pd
.
cur
--
;
...
...
@@ -268,7 +268,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
u32
pdid
;
struct
c4iw_dev
*
rhp
;
pr_debug
(
"
%s ibdev %p
\n
"
,
__func__
,
ibdev
);
pr_debug
(
"
ibdev %p
\n
"
,
ibdev
);
rhp
=
(
struct
c4iw_dev
*
)
ibdev
;
pdid
=
c4iw_get_resource
(
&
rhp
->
rdev
.
resource
.
pdid_table
);
if
(
!
pdid
)
...
...
@@ -291,14 +291,14 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
if
(
rhp
->
rdev
.
stats
.
pd
.
cur
>
rhp
->
rdev
.
stats
.
pd
.
max
)
rhp
->
rdev
.
stats
.
pd
.
max
=
rhp
->
rdev
.
stats
.
pd
.
cur
;
mutex_unlock
(
&
rhp
->
rdev
.
stats
.
lock
);
pr_debug
(
"
%s pdid 0x%0x ptr 0x%p
\n
"
,
__func__
,
pdid
,
php
);
pr_debug
(
"
pdid 0x%0x ptr 0x%p
\n
"
,
pdid
,
php
);
return
&
php
->
ibpd
;
}
static
int
c4iw_query_pkey
(
struct
ib_device
*
ibdev
,
u8
port
,
u16
index
,
u16
*
pkey
)
{
pr_debug
(
"
%s ibdev %p
\n
"
,
__func__
,
ibdev
);
pr_debug
(
"
ibdev %p
\n
"
,
ibdev
);
*
pkey
=
0
;
return
0
;
}
...
...
@@ -308,8 +308,8 @@ static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
{
struct
c4iw_dev
*
dev
;
pr_debug
(
"
%s
ibdev %p, port %d, index %d, gid %p
\n
"
,
__func__
,
ibdev
,
port
,
index
,
gid
);
pr_debug
(
"ibdev %p, port %d, index %d, gid %p
\n
"
,
ibdev
,
port
,
index
,
gid
);
dev
=
to_c4iw_dev
(
ibdev
);
BUG_ON
(
port
==
0
);
memset
(
&
(
gid
->
raw
[
0
]),
0
,
sizeof
(
gid
->
raw
));
...
...
@@ -323,7 +323,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
struct
c4iw_dev
*
dev
;
pr_debug
(
"
%s ibdev %p
\n
"
,
__func__
,
ibdev
);
pr_debug
(
"
ibdev %p
\n
"
,
ibdev
);
if
(
uhw
->
inlen
||
uhw
->
outlen
)
return
-
EINVAL
;
...
...
@@ -364,7 +364,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct
net_device
*
netdev
;
struct
in_device
*
inetdev
;
pr_debug
(
"
%s ibdev %p
\n
"
,
__func__
,
ibdev
);
pr_debug
(
"
ibdev %p
\n
"
,
ibdev
);
dev
=
to_c4iw_dev
(
ibdev
);
netdev
=
dev
->
rdev
.
lldi
.
ports
[
port
-
1
];
...
...
@@ -406,7 +406,7 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
{
struct
c4iw_dev
*
c4iw_dev
=
container_of
(
dev
,
struct
c4iw_dev
,
ibdev
.
dev
);
pr_debug
(
"
%s dev 0x%p
\n
"
,
__func__
,
dev
);
pr_debug
(
"
dev 0x%p
\n
"
,
dev
);
return
sprintf
(
buf
,
"%d
\n
"
,
CHELSIO_CHIP_RELEASE
(
c4iw_dev
->
rdev
.
lldi
.
adapter_type
));
}
...
...
@@ -419,7 +419,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
struct
ethtool_drvinfo
info
;
struct
net_device
*
lldev
=
c4iw_dev
->
rdev
.
lldi
.
ports
[
0
];
pr_debug
(
"
%s dev 0x%p
\n
"
,
__func__
,
dev
);
pr_debug
(
"
dev 0x%p
\n
"
,
dev
);
lldev
->
ethtool_ops
->
get_drvinfo
(
lldev
,
&
info
);
return
sprintf
(
buf
,
"%s
\n
"
,
info
.
driver
);
}
...
...
@@ -429,7 +429,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
{
struct
c4iw_dev
*
c4iw_dev
=
container_of
(
dev
,
struct
c4iw_dev
,
ibdev
.
dev
);
pr_debug
(
"
%s dev 0x%p
\n
"
,
__func__
,
dev
);
pr_debug
(
"
dev 0x%p
\n
"
,
dev
);
return
sprintf
(
buf
,
"%x.%x
\n
"
,
c4iw_dev
->
rdev
.
lldi
.
pdev
->
vendor
,
c4iw_dev
->
rdev
.
lldi
.
pdev
->
device
);
}
...
...
@@ -521,7 +521,7 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
{
struct
c4iw_dev
*
c4iw_dev
=
container_of
(
dev
,
struct
c4iw_dev
,
ibdev
);
pr_debug
(
"
%s dev 0x%p
\n
"
,
__func__
,
dev
);
pr_debug
(
"
dev 0x%p
\n
"
,
dev
);
snprintf
(
str
,
IB_FW_VERSION_NAME_MAX
,
"%u.%u.%u.%u"
,
FW_HDR_FW_VER_MAJOR_G
(
c4iw_dev
->
rdev
.
lldi
.
fw_vers
),
...
...
@@ -535,7 +535,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
int
ret
;
int
i
;
pr_debug
(
"
%s c4iw_dev %p
\n
"
,
__func__
,
dev
);
pr_debug
(
"
c4iw_dev %p
\n
"
,
dev
);
BUG_ON
(
!
dev
->
rdev
.
lldi
.
ports
[
0
]);
strlcpy
(
dev
->
ibdev
.
name
,
"cxgb4_%d"
,
IB_DEVICE_NAME_MAX
);
memset
(
&
dev
->
ibdev
.
node_guid
,
0
,
sizeof
(
dev
->
ibdev
.
node_guid
));
...
...
@@ -645,7 +645,7 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
{
int
i
;
pr_debug
(
"
%s c4iw_dev %p
\n
"
,
__func__
,
dev
);
pr_debug
(
"
c4iw_dev %p
\n
"
,
dev
);
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
c4iw_class_attributes
);
++
i
)
device_remove_file
(
&
dev
->
ibdev
.
dev
,
c4iw_class_attributes
[
i
]);
...
...
drivers/infiniband/hw/cxgb4/qp.c
浏览文件 @
1848757c
...
...
@@ -254,8 +254,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
ret
=
-
ENOMEM
;
goto
free_sq
;
}
pr_debug
(
"
%s
sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx
\n
"
,
__func__
,
wq
->
sq
.
queue
,
pr_debug
(
"sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx
\n
"
,
wq
->
sq
.
queue
,
(
unsigned
long
long
)
virt_to_phys
(
wq
->
sq
.
queue
),
wq
->
rq
.
queue
,
(
unsigned
long
long
)
virt_to_phys
(
wq
->
rq
.
queue
));
...
...
@@ -361,8 +361,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if
(
ret
)
goto
free_dma
;
pr_debug
(
"
%s
sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p
\n
"
,
__func__
,
wq
->
sq
.
qid
,
wq
->
rq
.
qid
,
wq
->
db
,
pr_debug
(
"sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p
\n
"
,
wq
->
sq
.
qid
,
wq
->
rq
.
qid
,
wq
->
db
,
wq
->
sq
.
bar2_va
,
wq
->
rq
.
bar2_va
);
return
0
;
...
...
@@ -724,7 +724,7 @@ static void free_qp_work(struct work_struct *work)
ucontext
=
qhp
->
ucontext
;
rhp
=
qhp
->
rhp
;
pr_debug
(
"
%s qhp %p ucontext %p
\n
"
,
__func__
,
qhp
,
ucontext
);
pr_debug
(
"
qhp %p ucontext %p
\n
"
,
qhp
,
ucontext
);
destroy_qp
(
&
rhp
->
rdev
,
&
qhp
->
wq
,
ucontext
?
&
ucontext
->
uctx
:
&
rhp
->
rdev
.
uctx
);
...
...
@@ -738,19 +738,19 @@ static void queue_qp_free(struct kref *kref)
struct
c4iw_qp
*
qhp
;
qhp
=
container_of
(
kref
,
struct
c4iw_qp
,
kref
);
pr_debug
(
"
%s qhp %p
\n
"
,
__func__
,
qhp
);
pr_debug
(
"
qhp %p
\n
"
,
qhp
);
queue_work
(
qhp
->
rhp
->
rdev
.
free_workq
,
&
qhp
->
free_work
);
}
void
c4iw_qp_add_ref
(
struct
ib_qp
*
qp
)
{
pr_debug
(
"
%s ib_qp %p
\n
"
,
__func__
,
qp
);
pr_debug
(
"
ib_qp %p
\n
"
,
qp
);
kref_get
(
&
to_c4iw_qp
(
qp
)
->
kref
);
}
void
c4iw_qp_rem_ref
(
struct
ib_qp
*
qp
)
{
pr_debug
(
"
%s ib_qp %p
\n
"
,
__func__
,
qp
);
pr_debug
(
"
ib_qp %p
\n
"
,
qp
);
kref_put
(
&
to_c4iw_qp
(
qp
)
->
kref
,
queue_qp_free
);
}
...
...
@@ -958,8 +958,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
c4iw_invalidate_mr
(
qhp
->
rhp
,
wr
->
ex
.
invalidate_rkey
);
break
;
default:
pr_
debug
(
"%s post of type=%d TBD!
\n
"
,
__func__
,
wr
->
opcode
);
pr_
warn
(
"%s post of type=%d TBD!
\n
"
,
__func__
,
wr
->
opcode
);
err
=
-
EINVAL
;
}
if
(
err
)
{
...
...
@@ -980,8 +980,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
init_wr_hdr
(
wqe
,
qhp
->
wq
.
sq
.
pidx
,
fw_opcode
,
fw_flags
,
len16
);
pr_debug
(
"%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u
\n
"
,
__func__
,
pr_debug
(
"cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u
\n
"
,
(
unsigned
long
long
)
wr
->
wr_id
,
qhp
->
wq
.
sq
.
pidx
,
swsqe
->
opcode
,
swsqe
->
read_len
);
wr
=
wr
->
next
;
...
...
@@ -1057,8 +1056,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wqe
->
recv
.
r2
[
1
]
=
0
;
wqe
->
recv
.
r2
[
2
]
=
0
;
wqe
->
recv
.
len16
=
len16
;
pr_debug
(
"%s cookie 0x%llx pidx %u
\n
"
,
__func__
,
pr_debug
(
"cookie 0x%llx pidx %u
\n
"
,
(
unsigned
long
long
)
wr
->
wr_id
,
qhp
->
wq
.
rq
.
pidx
);
t4_rq_produce
(
&
qhp
->
wq
,
len16
);
idx
+=
DIV_ROUND_UP
(
len16
*
16
,
T4_EQ_ENTRY_SIZE
);
...
...
@@ -1218,7 +1216,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
struct
sk_buff
*
skb
;
struct
terminate_message
*
term
;
pr_debug
(
"
%s qhp %p qid 0x%x tid %u
\n
"
,
__func__
,
qhp
,
qhp
->
wq
.
sq
.
qid
,
pr_debug
(
"
qhp %p qid 0x%x tid %u
\n
"
,
qhp
,
qhp
->
wq
.
sq
.
qid
,
qhp
->
ep
->
hwtid
);
skb
=
skb_dequeue
(
&
qhp
->
ep
->
com
.
ep_skb_list
);
...
...
@@ -1255,7 +1253,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
int
rq_flushed
,
sq_flushed
;
unsigned
long
flag
;
pr_debug
(
"
%s qhp %p rchp %p schp %p
\n
"
,
__func__
,
qhp
,
rchp
,
schp
);
pr_debug
(
"
qhp %p rchp %p schp %p
\n
"
,
qhp
,
rchp
,
schp
);
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave
(
&
rchp
->
lock
,
flag
);
...
...
@@ -1340,8 +1338,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int
ret
;
struct
sk_buff
*
skb
;
pr_debug
(
"%s qhp %p qid 0x%x tid %u
\n
"
,
__func__
,
qhp
,
qhp
->
wq
.
sq
.
qid
,
ep
->
hwtid
);
pr_debug
(
"qhp %p qid 0x%x tid %u
\n
"
,
qhp
,
qhp
->
wq
.
sq
.
qid
,
ep
->
hwtid
);
skb
=
skb_dequeue
(
&
ep
->
com
.
ep_skb_list
);
if
(
WARN_ON
(
!
skb
))
...
...
@@ -1367,13 +1364,13 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
ret
=
c4iw_wait_for_reply
(
&
rhp
->
rdev
,
&
ep
->
com
.
wr_wait
,
qhp
->
ep
->
hwtid
,
qhp
->
wq
.
sq
.
qid
,
__func__
);
out:
pr_debug
(
"
%s ret %d
\n
"
,
__func__
,
ret
);
pr_debug
(
"
ret %d
\n
"
,
ret
);
return
ret
;
}
static
void
build_rtr_msg
(
u8
p2p_type
,
struct
fw_ri_init
*
init
)
{
pr_debug
(
"
%s p2p_type = %d
\n
"
,
__func__
,
p2p_type
);
pr_debug
(
"
p2p_type = %d
\n
"
,
p2p_type
);
memset
(
&
init
->
u
,
0
,
sizeof
init
->
u
);
switch
(
p2p_type
)
{
case
FW_RI_INIT_P2PTYPE_RDMA_WRITE
:
...
...
@@ -1402,7 +1399,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
int
ret
;
struct
sk_buff
*
skb
;
pr_debug
(
"
%s qhp %p qid 0x%x tid %u ird %u ord %u
\n
"
,
__func__
,
qhp
,
pr_debug
(
"
qhp %p qid 0x%x tid %u ird %u ord %u
\n
"
,
qhp
,
qhp
->
wq
.
sq
.
qid
,
qhp
->
ep
->
hwtid
,
qhp
->
ep
->
ird
,
qhp
->
ep
->
ord
);
skb
=
alloc_skb
(
sizeof
*
wqe
,
GFP_KERNEL
);
...
...
@@ -1475,7 +1472,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
err1:
free_ird
(
rhp
,
qhp
->
attr
.
max_ird
);
out:
pr_debug
(
"
%s ret %d
\n
"
,
__func__
,
ret
);
pr_debug
(
"
ret %d
\n
"
,
ret
);
return
ret
;
}
...
...
@@ -1492,8 +1489,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int
free
=
0
;
struct
c4iw_ep
*
ep
=
NULL
;
pr_debug
(
"%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d
\n
"
,
__func__
,
pr_debug
(
"qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d
\n
"
,
qhp
,
qhp
->
wq
.
sq
.
qid
,
qhp
->
wq
.
rq
.
qid
,
qhp
->
ep
,
qhp
->
attr
.
state
,
(
mask
&
C4IW_QP_ATTR_NEXT_STATE
)
?
attrs
->
next_state
:
-
1
);
...
...
@@ -1680,7 +1676,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
goto
out
;
err:
pr_debug
(
"
%s disassociating ep %p qpid 0x%x
\n
"
,
__func__
,
qhp
->
ep
,
pr_debug
(
"
disassociating ep %p qpid 0x%x
\n
"
,
qhp
->
ep
,
qhp
->
wq
.
sq
.
qid
);
/* disassociate the LLP connection */
...
...
@@ -1717,7 +1713,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
*/
if
(
free
)
c4iw_put_ep
(
&
ep
->
com
);
pr_debug
(
"
%s exit state %d
\n
"
,
__func__
,
qhp
->
attr
.
state
);
pr_debug
(
"
exit state %d
\n
"
,
qhp
->
attr
.
state
);
return
ret
;
}
...
...
@@ -1747,7 +1743,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_qp_rem_ref
(
ib_qp
);
pr_debug
(
"
%s ib_qp %p qpid 0x%0x
\n
"
,
__func__
,
ib_qp
,
qhp
->
wq
.
sq
.
qid
);
pr_debug
(
"
ib_qp %p qpid 0x%0x
\n
"
,
ib_qp
,
qhp
->
wq
.
sq
.
qid
);
return
0
;
}
...
...
@@ -1766,7 +1762,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct
c4iw_mm_entry
*
sq_key_mm
,
*
rq_key_mm
=
NULL
,
*
sq_db_key_mm
;
struct
c4iw_mm_entry
*
rq_db_key_mm
=
NULL
,
*
ma_sync_key_mm
=
NULL
;
pr_debug
(
"
%s ib_pd %p
\n
"
,
__func__
,
pd
);
pr_debug
(
"
ib_pd %p
\n
"
,
pd
);
if
(
attrs
->
qp_type
!=
IB_QPT_RC
)
return
ERR_PTR
(
-
EINVAL
);
...
...
@@ -1937,8 +1933,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp
->
ibqp
.
qp_num
=
qhp
->
wq
.
sq
.
qid
;
init_timer
(
&
(
qhp
->
timer
));
INIT_LIST_HEAD
(
&
qhp
->
db_fc_entry
);
pr_debug
(
"%s sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u
\n
"
,
__func__
,
pr_debug
(
"sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u
\n
"
,
qhp
->
wq
.
sq
.
qid
,
qhp
->
wq
.
sq
.
size
,
qhp
->
wq
.
sq
.
memsize
,
attrs
->
cap
.
max_send_wr
,
qhp
->
wq
.
rq
.
qid
,
qhp
->
wq
.
rq
.
size
,
qhp
->
wq
.
rq
.
memsize
,
attrs
->
cap
.
max_recv_wr
);
...
...
@@ -1971,7 +1966,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum
c4iw_qp_attr_mask
mask
=
0
;
struct
c4iw_qp_attributes
attrs
;
pr_debug
(
"
%s ib_qp %p
\n
"
,
__func__
,
ibqp
);
pr_debug
(
"
ib_qp %p
\n
"
,
ibqp
);
/* iwarp does not support the RTR state */
if
((
attr_mask
&
IB_QP_STATE
)
&&
(
attr
->
qp_state
==
IB_QPS_RTR
))
...
...
@@ -2017,7 +2012,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct
ib_qp
*
c4iw_get_qp
(
struct
ib_device
*
dev
,
int
qpn
)
{
pr_debug
(
"
%s ib_dev %p qpn 0x%x
\n
"
,
__func__
,
dev
,
qpn
);
pr_debug
(
"
ib_dev %p qpn 0x%x
\n
"
,
dev
,
qpn
);
return
(
struct
ib_qp
*
)
get_qhp
(
to_c4iw_dev
(
dev
),
qpn
);
}
...
...
drivers/infiniband/hw/cxgb4/resource.c
浏览文件 @
1848757c
...
...
@@ -90,7 +90,7 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table)
void
c4iw_put_resource
(
struct
c4iw_id_table
*
id_table
,
u32
entry
)
{
pr_debug
(
"
%s entry 0x%x
\n
"
,
__func__
,
entry
);
pr_debug
(
"
entry 0x%x
\n
"
,
entry
);
c4iw_id_free
(
id_table
,
entry
);
}
...
...
@@ -141,7 +141,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
}
out:
mutex_unlock
(
&
uctx
->
lock
);
pr_debug
(
"
%s qid 0x%x
\n
"
,
__func__
,
qid
);
pr_debug
(
"
qid 0x%x
\n
"
,
qid
);
mutex_lock
(
&
rdev
->
stats
.
lock
);
if
(
rdev
->
stats
.
qid
.
cur
>
rdev
->
stats
.
qid
.
max
)
rdev
->
stats
.
qid
.
max
=
rdev
->
stats
.
qid
.
cur
;
...
...
@@ -157,7 +157,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
entry
=
kmalloc
(
sizeof
*
entry
,
GFP_KERNEL
);
if
(
!
entry
)
return
;
pr_debug
(
"
%s qid 0x%x
\n
"
,
__func__
,
qid
);
pr_debug
(
"
qid 0x%x
\n
"
,
qid
);
entry
->
qid
=
qid
;
mutex_lock
(
&
uctx
->
lock
);
list_add_tail
(
&
entry
->
entry
,
&
uctx
->
cqids
);
...
...
@@ -215,7 +215,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
}
out:
mutex_unlock
(
&
uctx
->
lock
);
pr_debug
(
"
%s qid 0x%x
\n
"
,
__func__
,
qid
);
pr_debug
(
"
qid 0x%x
\n
"
,
qid
);
mutex_lock
(
&
rdev
->
stats
.
lock
);
if
(
rdev
->
stats
.
qid
.
cur
>
rdev
->
stats
.
qid
.
max
)
rdev
->
stats
.
qid
.
max
=
rdev
->
stats
.
qid
.
cur
;
...
...
@@ -231,7 +231,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
entry
=
kmalloc
(
sizeof
*
entry
,
GFP_KERNEL
);
if
(
!
entry
)
return
;
pr_debug
(
"
%s qid 0x%x
\n
"
,
__func__
,
qid
);
pr_debug
(
"
qid 0x%x
\n
"
,
qid
);
entry
->
qid
=
qid
;
mutex_lock
(
&
uctx
->
lock
);
list_add_tail
(
&
entry
->
entry
,
&
uctx
->
qpids
);
...
...
@@ -254,7 +254,7 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32
c4iw_pblpool_alloc
(
struct
c4iw_rdev
*
rdev
,
int
size
)
{
unsigned
long
addr
=
gen_pool_alloc
(
rdev
->
pbl_pool
,
size
);
pr_debug
(
"
%s addr 0x%x size %d
\n
"
,
__func__
,
(
u32
)
addr
,
size
);
pr_debug
(
"
addr 0x%x size %d
\n
"
,
(
u32
)
addr
,
size
);
mutex_lock
(
&
rdev
->
stats
.
lock
);
if
(
addr
)
{
rdev
->
stats
.
pbl
.
cur
+=
roundup
(
size
,
1
<<
MIN_PBL_SHIFT
);
...
...
@@ -268,7 +268,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
void
c4iw_pblpool_free
(
struct
c4iw_rdev
*
rdev
,
u32
addr
,
int
size
)
{
pr_debug
(
"
%s addr 0x%x size %d
\n
"
,
__func__
,
addr
,
size
);
pr_debug
(
"
addr 0x%x size %d
\n
"
,
addr
,
size
);
mutex_lock
(
&
rdev
->
stats
.
lock
);
rdev
->
stats
.
pbl
.
cur
-=
roundup
(
size
,
1
<<
MIN_PBL_SHIFT
);
mutex_unlock
(
&
rdev
->
stats
.
lock
);
...
...
@@ -290,8 +290,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
while
(
pbl_start
<
pbl_top
)
{
pbl_chunk
=
min
(
pbl_top
-
pbl_start
+
1
,
pbl_chunk
);
if
(
gen_pool_add
(
rdev
->
pbl_pool
,
pbl_start
,
pbl_chunk
,
-
1
))
{
pr_debug
(
"
%s
failed to add PBL chunk (%x/%x)
\n
"
,
__func__
,
pbl_start
,
pbl_chunk
);
pr_debug
(
"failed to add PBL chunk (%x/%x)
\n
"
,
pbl_start
,
pbl_chunk
);
if
(
pbl_chunk
<=
1024
<<
MIN_PBL_SHIFT
)
{
pr_warn
(
"Failed to add all PBL chunks (%x/%x)
\n
"
,
pbl_start
,
pbl_top
-
pbl_start
);
...
...
@@ -299,8 +299,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
}
pbl_chunk
>>=
1
;
}
else
{
pr_debug
(
"
%s
added PBL chunk (%x/%x)
\n
"
,
__func__
,
pbl_start
,
pbl_chunk
);
pr_debug
(
"added PBL chunk (%x/%x)
\n
"
,
pbl_start
,
pbl_chunk
);
pbl_start
+=
pbl_chunk
;
}
}
...
...
@@ -322,7 +322,7 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
u32
c4iw_rqtpool_alloc
(
struct
c4iw_rdev
*
rdev
,
int
size
)
{
unsigned
long
addr
=
gen_pool_alloc
(
rdev
->
rqt_pool
,
size
<<
6
);
pr_debug
(
"
%s addr 0x%x size %d
\n
"
,
__func__
,
(
u32
)
addr
,
size
<<
6
);
pr_debug
(
"
addr 0x%x size %d
\n
"
,
(
u32
)
addr
,
size
<<
6
);
if
(
!
addr
)
pr_warn_ratelimited
(
"%s: Out of RQT memory
\n
"
,
pci_name
(
rdev
->
lldi
.
pdev
));
...
...
@@ -339,7 +339,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
void
c4iw_rqtpool_free
(
struct
c4iw_rdev
*
rdev
,
u32
addr
,
int
size
)
{
pr_debug
(
"
%s addr 0x%x size %d
\n
"
,
__func__
,
addr
,
size
<<
6
);
pr_debug
(
"
addr 0x%x size %d
\n
"
,
addr
,
size
<<
6
);
mutex_lock
(
&
rdev
->
stats
.
lock
);
rdev
->
stats
.
rqt
.
cur
-=
roundup
(
size
<<
6
,
1
<<
MIN_RQT_SHIFT
);
mutex_unlock
(
&
rdev
->
stats
.
lock
);
...
...
@@ -361,8 +361,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
while
(
rqt_start
<
rqt_top
)
{
rqt_chunk
=
min
(
rqt_top
-
rqt_start
+
1
,
rqt_chunk
);
if
(
gen_pool_add
(
rdev
->
rqt_pool
,
rqt_start
,
rqt_chunk
,
-
1
))
{
pr_debug
(
"
%s
failed to add RQT chunk (%x/%x)
\n
"
,
__func__
,
rqt_start
,
rqt_chunk
);
pr_debug
(
"failed to add RQT chunk (%x/%x)
\n
"
,
rqt_start
,
rqt_chunk
);
if
(
rqt_chunk
<=
1024
<<
MIN_RQT_SHIFT
)
{
pr_warn
(
"Failed to add all RQT chunks (%x/%x)
\n
"
,
rqt_start
,
rqt_top
-
rqt_start
);
...
...
@@ -370,8 +370,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
}
rqt_chunk
>>=
1
;
}
else
{
pr_debug
(
"
%s
added RQT chunk (%x/%x)
\n
"
,
__func__
,
rqt_start
,
rqt_chunk
);
pr_debug
(
"added RQT chunk (%x/%x)
\n
"
,
rqt_start
,
rqt_chunk
);
rqt_start
+=
rqt_chunk
;
}
}
...
...
@@ -391,7 +391,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
u32
c4iw_ocqp_pool_alloc
(
struct
c4iw_rdev
*
rdev
,
int
size
)
{
unsigned
long
addr
=
gen_pool_alloc
(
rdev
->
ocqp_pool
,
size
);
pr_debug
(
"
%s addr 0x%x size %d
\n
"
,
__func__
,
(
u32
)
addr
,
size
);
pr_debug
(
"
addr 0x%x size %d
\n
"
,
(
u32
)
addr
,
size
);
if
(
addr
)
{
mutex_lock
(
&
rdev
->
stats
.
lock
);
rdev
->
stats
.
ocqp
.
cur
+=
roundup
(
size
,
1
<<
MIN_OCQP_SHIFT
);
...
...
@@ -404,7 +404,7 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
void
c4iw_ocqp_pool_free
(
struct
c4iw_rdev
*
rdev
,
u32
addr
,
int
size
)
{
pr_debug
(
"
%s addr 0x%x size %d
\n
"
,
__func__
,
addr
,
size
);
pr_debug
(
"
addr 0x%x size %d
\n
"
,
addr
,
size
);
mutex_lock
(
&
rdev
->
stats
.
lock
);
rdev
->
stats
.
ocqp
.
cur
-=
roundup
(
size
,
1
<<
MIN_OCQP_SHIFT
);
mutex_unlock
(
&
rdev
->
stats
.
lock
);
...
...
@@ -426,8 +426,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
while
(
start
<
top
)
{
chunk
=
min
(
top
-
start
+
1
,
chunk
);
if
(
gen_pool_add
(
rdev
->
ocqp_pool
,
start
,
chunk
,
-
1
))
{
pr_debug
(
"
%s
failed to add OCQP chunk (%x/%x)
\n
"
,
__func__
,
start
,
chunk
);
pr_debug
(
"failed to add OCQP chunk (%x/%x)
\n
"
,
start
,
chunk
);
if
(
chunk
<=
1024
<<
MIN_OCQP_SHIFT
)
{
pr_warn
(
"Failed to add all OCQP chunks (%x/%x)
\n
"
,
start
,
top
-
start
);
...
...
@@ -435,8 +435,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
}
chunk
>>=
1
;
}
else
{
pr_debug
(
"
%s
added OCQP chunk (%x/%x)
\n
"
,
__func__
,
start
,
chunk
);
pr_debug
(
"added OCQP chunk (%x/%x)
\n
"
,
start
,
chunk
);
start
+=
chunk
;
}
}
...
...
drivers/infiniband/hw/cxgb4/t4.h
浏览文件 @
1848757c
...
...
@@ -466,14 +466,12 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
wmb
();
if
(
wq
->
sq
.
bar2_va
)
{
if
(
inc
==
1
&&
wq
->
sq
.
bar2_qid
==
0
&&
wqe
)
{
pr_debug
(
"%s: WC wq->sq.pidx = %d
\n
"
,
__func__
,
wq
->
sq
.
pidx
);
pr_debug
(
"WC wq->sq.pidx = %d
\n
"
,
wq
->
sq
.
pidx
);
pio_copy
((
u64
__iomem
*
)
(
wq
->
sq
.
bar2_va
+
SGE_UDB_WCDOORBELL
),
(
u64
*
)
wqe
);
}
else
{
pr_debug
(
"%s: DB wq->sq.pidx = %d
\n
"
,
__func__
,
wq
->
sq
.
pidx
);
pr_debug
(
"DB wq->sq.pidx = %d
\n
"
,
wq
->
sq
.
pidx
);
writel
(
PIDX_T5_V
(
inc
)
|
QID_V
(
wq
->
sq
.
bar2_qid
),
wq
->
sq
.
bar2_va
+
SGE_UDB_KDOORBELL
);
}
...
...
@@ -493,14 +491,12 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
wmb
();
if
(
wq
->
rq
.
bar2_va
)
{
if
(
inc
==
1
&&
wq
->
rq
.
bar2_qid
==
0
&&
wqe
)
{
pr_debug
(
"%s: WC wq->rq.pidx = %d
\n
"
,
__func__
,
wq
->
rq
.
pidx
);
pr_debug
(
"WC wq->rq.pidx = %d
\n
"
,
wq
->
rq
.
pidx
);
pio_copy
((
u64
__iomem
*
)
(
wq
->
rq
.
bar2_va
+
SGE_UDB_WCDOORBELL
),
(
void
*
)
wqe
);
}
else
{
pr_debug
(
"%s: DB wq->rq.pidx = %d
\n
"
,
__func__
,
wq
->
rq
.
pidx
);
pr_debug
(
"DB wq->rq.pidx = %d
\n
"
,
wq
->
rq
.
pidx
);
writel
(
PIDX_T5_V
(
inc
)
|
QID_V
(
wq
->
rq
.
bar2_qid
),
wq
->
rq
.
bar2_va
+
SGE_UDB_KDOORBELL
);
}
...
...
@@ -601,8 +597,8 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
{
cq
->
sw_in_use
++
;
if
(
cq
->
sw_in_use
==
cq
->
size
)
{
pr_
debug
(
"%s cxgb4 sw cq overflow cqid %u
\n
"
,
__func__
,
cq
->
cqid
);
pr_
warn
(
"%s cxgb4 sw cq overflow cqid %u
\n
"
,
__func__
,
cq
->
cqid
);
cq
->
error
=
1
;
BUG_ON
(
1
);
}
...
...
@@ -673,8 +669,8 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static
inline
struct
t4_cqe
*
t4_next_sw_cqe
(
struct
t4_cq
*
cq
)
{
if
(
cq
->
sw_in_use
==
cq
->
size
)
{
pr_
debug
(
"%s cxgb4 sw cq overflow cqid %u
\n
"
,
__func__
,
cq
->
cqid
);
pr_
warn
(
"%s cxgb4 sw cq overflow cqid %u
\n
"
,
__func__
,
cq
->
cqid
);
cq
->
error
=
1
;
BUG_ON
(
1
);
return
NULL
;
...
...
drivers/infiniband/hw/hns/Kconfig
浏览文件 @
1848757c
config INFINIBAND_HNS
tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
depends on
(ARM64 || (COMPILE_TEST && 64BIT)) && HNS && HNS_DSAF && HNS_ENET
depends on
ARM64 || (COMPILE_TEST && 64BIT)
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
is used in Hisilicon Hi1610 and more further ICT SoC.
is used in Hisilicon Hip06 and more further ICT SoC based on
platform device.
To compile this driver as a module, choose M here: the module
will be called hns-roce.
config INFINIBAND_HNS_HIP06
tristate "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
Hip07 SoC. These RoCE engines are platform devices.
To compile this driver as a module, choose M here: the module
will be called hns-roce-hw-v1.
config INFINIBAND_HNS_HIP08
tristate "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
To compile this driver as a module, choose M here: the module
will be called hns-roce-hw-v2.
drivers/infiniband/hw/hns/Makefile
浏览文件 @
1848757c
...
...
@@ -2,7 +2,13 @@
# Makefile for the Hisilicon RoCE drivers.
#
ccflags-y
:=
-Idrivers
/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS)
+=
hns-roce.o
hns-roce-objs
:=
hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o
\
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o
\
hns_roce_cq.o hns_roce_alloc.o hns_roce_hw_v1.o
hns_roce_cq.o hns_roce_alloc.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06)
+=
hns-roce-hw-v1.o
hns-roce-hw-v1-objs
:=
hns_roce_hw_v1.o
obj-$(CONFIG_INFINIBAND_HNS_HIP08)
+=
hns-roce-hw-v2.o
hns-roce-hw-v2-objs
:=
hns_roce_hw_v2.o
drivers/infiniband/hw/hns/hns_roce_ah.c
浏览文件 @
1848757c
...
...
@@ -44,7 +44,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct
ib_udata
*
udata
)
{
struct
hns_roce_dev
*
hr_dev
=
to_hr_dev
(
ibpd
->
device
);
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
struct
ib_gid_attr
gid_attr
;
struct
hns_roce_ah
*
ah
;
u16
vlan_tag
=
0xffff
;
...
...
drivers/infiniband/hw/hns/hns_roce_alloc.c
浏览文件 @
1848757c
...
...
@@ -67,6 +67,7 @@ void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
{
hns_roce_bitmap_free_range
(
bitmap
,
obj
,
1
,
rr
);
}
EXPORT_SYMBOL_GPL
(
hns_roce_bitmap_free
);
int
hns_roce_bitmap_alloc_range
(
struct
hns_roce_bitmap
*
bitmap
,
int
cnt
,
int
align
,
unsigned
long
*
obj
)
...
...
@@ -160,7 +161,7 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct
hns_roce_buf
*
buf
)
{
int
i
;
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
u32
bits_per_long
=
BITS_PER_LONG
;
if
(
buf
->
nbufs
==
1
)
{
...
...
@@ -171,12 +172,13 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
for
(
i
=
0
;
i
<
buf
->
nbufs
;
++
i
)
if
(
buf
->
page_list
[
i
].
buf
)
dma_free_coherent
(
&
hr_dev
->
pdev
->
dev
,
PAGE_SIZE
,
dma_free_coherent
(
dev
,
PAGE_SIZE
,
buf
->
page_list
[
i
].
buf
,
buf
->
page_list
[
i
].
map
);
kfree
(
buf
->
page_list
);
}
}
EXPORT_SYMBOL_GPL
(
hns_roce_buf_free
);
int
hns_roce_buf_alloc
(
struct
hns_roce_dev
*
hr_dev
,
u32
size
,
u32
max_direct
,
struct
hns_roce_buf
*
buf
)
...
...
@@ -184,7 +186,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
int
i
=
0
;
dma_addr_t
t
;
struct
page
**
pages
;
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
u32
bits_per_long
=
BITS_PER_LONG
;
/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
...
...
drivers/infiniband/hw/hns/hns_roce_cmd.c
浏览文件 @
1848757c
...
...
@@ -38,69 +38,7 @@
#define CMD_POLL_TOKEN 0xffff
#define CMD_MAX_NUM 32
#define STATUS_MASK 0xff
#define CMD_TOKEN_MASK 0x1f
#define GO_BIT_TIMEOUT_MSECS 10000
enum
{
HCR_TOKEN_OFFSET
=
0x14
,
HCR_STATUS_OFFSET
=
0x18
,
HCR_GO_BIT
=
15
,
};
static
int
cmd_pending
(
struct
hns_roce_dev
*
hr_dev
)
{
u32
status
=
readl
(
hr_dev
->
cmd
.
hcr
+
HCR_TOKEN_OFFSET
);
return
(
!!
(
status
&
(
1
<<
HCR_GO_BIT
)));
}
/* this function should be serialized with "hcr_mutex" */
static
int
__hns_roce_cmd_mbox_post_hw
(
struct
hns_roce_dev
*
hr_dev
,
u64
in_param
,
u64
out_param
,
u32
in_modifier
,
u8
op_modifier
,
u16
op
,
u16
token
,
int
event
)
{
struct
hns_roce_cmdq
*
cmd
=
&
hr_dev
->
cmd
;
struct
device
*
dev
=
&
hr_dev
->
pdev
->
dev
;
u32
__iomem
*
hcr
=
(
u32
*
)
cmd
->
hcr
;
int
ret
=
-
EAGAIN
;
unsigned
long
end
;
u32
val
=
0
;
end
=
msecs_to_jiffies
(
GO_BIT_TIMEOUT_MSECS
)
+
jiffies
;
while
(
cmd_pending
(
hr_dev
))
{
if
(
time_after
(
jiffies
,
end
))
{
dev_dbg
(
dev
,
"jiffies=%d end=%d
\n
"
,
(
int
)
jiffies
,
(
int
)
end
);
goto
out
;
}
cond_resched
();
}
roce_set_field
(
val
,
ROCEE_MB6_ROCEE_MB_CMD_M
,
ROCEE_MB6_ROCEE_MB_CMD_S
,
op
);
roce_set_field
(
val
,
ROCEE_MB6_ROCEE_MB_CMD_MDF_M
,
ROCEE_MB6_ROCEE_MB_CMD_MDF_S
,
op_modifier
);
roce_set_bit
(
val
,
ROCEE_MB6_ROCEE_MB_EVENT_S
,
event
);
roce_set_bit
(
val
,
ROCEE_MB6_ROCEE_MB_HW_RUN_S
,
1
);
roce_set_field
(
val
,
ROCEE_MB6_ROCEE_MB_TOKEN_M
,
ROCEE_MB6_ROCEE_MB_TOKEN_S
,
token
);
__raw_writeq
(
cpu_to_le64
(
in_param
),
hcr
+
0
);
__raw_writeq
(
cpu_to_le64
(
out_param
),
hcr
+
2
);
__raw_writel
(
cpu_to_le32
(
in_modifier
),
hcr
+
4
);
/* Memory barrier */
wmb
();
__raw_writel
(
cpu_to_le32
(
val
),
hcr
+
5
);
mmiowb
();
ret
=
0
;
out:
return
ret
;
}
static
int
hns_roce_cmd_mbox_post_hw
(
struct
hns_roce_dev
*
hr_dev
,
u64
in_param
,
u64
out_param
,
u32
in_modifier
,
...
...
@@ -108,12 +46,11 @@ static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
int
event
)
{
struct
hns_roce_cmdq
*
cmd
=
&
hr_dev
->
cmd
;
int
ret
=
-
EAGAIN
;
int
ret
;
mutex_lock
(
&
cmd
->
hcr_mutex
);
ret
=
__hns_roce_cmd_mbox_post_hw
(
hr_dev
,
in_param
,
out_param
,
in_modifier
,
op_modifier
,
op
,
token
,
event
);
ret
=
hr_dev
->
hw
->
post_mbox
(
hr_dev
,
in_param
,
out_param
,
in_modifier
,
op_modifier
,
op
,
token
,
event
);
mutex_unlock
(
&
cmd
->
hcr_mutex
);
return
ret
;
...
...
@@ -125,10 +62,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u8
op_modifier
,
u16
op
,
unsigned
long
timeout
)
{
struct
device
*
dev
=
&
hr_dev
->
pdev
->
dev
;
u8
__iomem
*
hcr
=
hr_dev
->
cmd
.
hcr
;
unsigned
long
end
=
0
;
u32
status
=
0
;
struct
device
*
dev
=
hr_dev
->
dev
;
int
ret
;
ret
=
hns_roce_cmd_mbox_post_hw
(
hr_dev
,
in_param
,
out_param
,
...
...
@@ -136,29 +70,10 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
CMD_POLL_TOKEN
,
0
);
if
(
ret
)
{
dev_err
(
dev
,
"[cmd_poll]hns_roce_cmd_mbox_post_hw failed
\n
"
);
goto
out
;
}
end
=
msecs_to_jiffies
(
timeout
)
+
jiffies
;
while
(
cmd_pending
(
hr_dev
)
&&
time_before
(
jiffies
,
end
))
cond_resched
();
if
(
cmd_pending
(
hr_dev
))
{
dev_err
(
dev
,
"[cmd_poll]hw run cmd TIMEDOUT!
\n
"
);
ret
=
-
ETIMEDOUT
;
goto
out
;
return
ret
;
}
status
=
le32_to_cpu
((
__force
__be32
)
__raw_readl
(
hcr
+
HCR_STATUS_OFFSET
));
if
((
status
&
STATUS_MASK
)
!=
0x1
)
{
dev_err
(
dev
,
"mailbox status 0x%x!
\n
"
,
status
);
ret
=
-
EBUSY
;
goto
out
;
}
out:
return
ret
;
return
hr_dev
->
hw
->
chk_mbox
(
hr_dev
,
timeout
);
}
static
int
hns_roce_cmd_mbox_poll
(
struct
hns_roce_dev
*
hr_dev
,
u64
in_param
,
...
...
@@ -196,9 +111,9 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
unsigned
long
timeout
)
{
struct
hns_roce_cmdq
*
cmd
=
&
hr_dev
->
cmd
;
struct
device
*
dev
=
&
hr_dev
->
pdev
->
dev
;
struct
hns_roce_cmd_context
*
context
;
int
ret
=
0
;
struct
device
*
dev
=
hr_dev
->
dev
;
int
ret
;
spin_lock
(
&
cmd
->
context_lock
);
WARN_ON
(
cmd
->
free_head
<
0
);
...
...
@@ -269,17 +184,17 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
in_modifier
,
op_modifier
,
op
,
timeout
);
}
EXPORT_SYMBOL_GPL
(
hns_roce_cmd_mbox
);
int
hns_roce_cmd_init
(
struct
hns_roce_dev
*
hr_dev
)
{
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
mutex_init
(
&
hr_dev
->
cmd
.
hcr_mutex
);
sema_init
(
&
hr_dev
->
cmd
.
poll_sem
,
1
);
hr_dev
->
cmd
.
use_events
=
0
;
hr_dev
->
cmd
.
toggle
=
1
;
hr_dev
->
cmd
.
max_cmds
=
CMD_MAX_NUM
;
hr_dev
->
cmd
.
hcr
=
hr_dev
->
reg_base
+
ROCEE_MB1_REG
;
hr_dev
->
cmd
.
pool
=
dma_pool_create
(
"hns_roce_cmd"
,
dev
,
HNS_ROCE_MAILBOX_SIZE
,
HNS_ROCE_MAILBOX_SIZE
,
0
);
...
...
@@ -356,6 +271,7 @@ struct hns_roce_cmd_mailbox
return
mailbox
;
}
EXPORT_SYMBOL_GPL
(
hns_roce_alloc_cmd_mailbox
);
void
hns_roce_free_cmd_mailbox
(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_cmd_mailbox
*
mailbox
)
...
...
@@ -366,3 +282,4 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
dma_pool_free
(
hr_dev
->
cmd
.
pool
,
mailbox
->
buf
,
mailbox
->
dma
);
kfree
(
mailbox
);
}
EXPORT_SYMBOL_GPL
(
hns_roce_free_cmd_mailbox
);
drivers/infiniband/hw/hns/hns_roce_cmd.h
浏览文件 @
1848757c
...
...
@@ -36,6 +36,56 @@
#define HNS_ROCE_MAILBOX_SIZE 4096
#define HNS_ROCE_CMD_TIMEOUT_MSECS 10000
enum
{
/* QPC BT commands */
HNS_ROCE_CMD_WRITE_QPC_BT0
=
0x0
,
HNS_ROCE_CMD_WRITE_QPC_BT1
=
0x1
,
HNS_ROCE_CMD_WRITE_QPC_BT2
=
0x2
,
HNS_ROCE_CMD_READ_QPC_BT0
=
0x4
,
HNS_ROCE_CMD_READ_QPC_BT1
=
0x5
,
HNS_ROCE_CMD_READ_QPC_BT2
=
0x6
,
HNS_ROCE_CMD_DESTROY_QPC_BT0
=
0x8
,
HNS_ROCE_CMD_DESTROY_QPC_BT1
=
0x9
,
HNS_ROCE_CMD_DESTROY_QPC_BT2
=
0xa
,
/* QPC operation */
HNS_ROCE_CMD_MODIFY_QPC
=
0x41
,
HNS_ROCE_CMD_QUERY_QPC
=
0x42
,
/* CQC BT commands */
HNS_ROCE_CMD_WRITE_CQC_BT0
=
0x10
,
HNS_ROCE_CMD_WRITE_CQC_BT1
=
0x11
,
HNS_ROCE_CMD_WRITE_CQC_BT2
=
0x12
,
HNS_ROCE_CMD_READ_CQC_BT0
=
0x14
,
HNS_ROCE_CMD_READ_CQC_BT1
=
0x15
,
HNS_ROCE_CMD_READ_CQC_BT2
=
0x1b
,
HNS_ROCE_CMD_DESTROY_CQC_BT0
=
0x18
,
HNS_ROCE_CMD_DESTROY_CQC_BT1
=
0x19
,
HNS_ROCE_CMD_DESTROY_CQC_BT2
=
0x1a
,
/* MPT BT commands */
HNS_ROCE_CMD_WRITE_MPT_BT0
=
0x20
,
HNS_ROCE_CMD_WRITE_MPT_BT1
=
0x21
,
HNS_ROCE_CMD_WRITE_MPT_BT2
=
0x22
,
HNS_ROCE_CMD_READ_MPT_BT0
=
0x24
,
HNS_ROCE_CMD_READ_MPT_BT1
=
0x25
,
HNS_ROCE_CMD_READ_MPT_BT2
=
0x26
,
HNS_ROCE_CMD_DESTROY_MPT_BT0
=
0x28
,
HNS_ROCE_CMD_DESTROY_MPT_BT1
=
0x29
,
HNS_ROCE_CMD_DESTROY_MPT_BT2
=
0x2a
,
/* SRQC BT commands */
HNS_ROCE_CMD_WRITE_SRQC_BT0
=
0x30
,
HNS_ROCE_CMD_WRITE_SRQC_BT1
=
0x31
,
HNS_ROCE_CMD_WRITE_SRQC_BT2
=
0x32
,
HNS_ROCE_CMD_READ_SRQC_BT0
=
0x34
,
HNS_ROCE_CMD_READ_SRQC_BT1
=
0x35
,
HNS_ROCE_CMD_READ_SRQC_BT2
=
0x36
,
HNS_ROCE_CMD_DESTROY_SRQC_BT0
=
0x38
,
HNS_ROCE_CMD_DESTROY_SRQC_BT1
=
0x39
,
HNS_ROCE_CMD_DESTROY_SRQC_BT2
=
0x3a
,
};
enum
{
/* TPT commands */
HNS_ROCE_CMD_SW2HW_MPT
=
0xd
,
...
...
drivers/infiniband/hw/hns/hns_roce_common.h
浏览文件 @
1848757c
...
...
@@ -341,6 +341,7 @@
#define ROCEE_BT_CMD_L_REG 0x200
#define ROCEE_MB1_REG 0x210
#define ROCEE_MB6_REG 0x224
#define ROCEE_DB_SQ_L_0_REG 0x230
#define ROCEE_DB_OTHERS_L_0_REG 0x238
#define ROCEE_QP1C_CFG0_0_REG 0x270
...
...
@@ -362,4 +363,26 @@
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
#define ROCEE_ECC_CERR_ALM0_REG 0xB40
/* V2 ROCEE REG */
#define ROCEE_TX_CMQ_BASEADDR_L_REG 0x07000
#define ROCEE_TX_CMQ_BASEADDR_H_REG 0x07004
#define ROCEE_TX_CMQ_DEPTH_REG 0x07008
#define ROCEE_TX_CMQ_TAIL_REG 0x07010
#define ROCEE_TX_CMQ_HEAD_REG 0x07014
#define ROCEE_RX_CMQ_BASEADDR_L_REG 0x07018
#define ROCEE_RX_CMQ_BASEADDR_H_REG 0x0701c
#define ROCEE_RX_CMQ_DEPTH_REG 0x07020
#define ROCEE_RX_CMQ_TAIL_REG 0x07024
#define ROCEE_RX_CMQ_HEAD_REG 0x07028
#define ROCEE_VF_SMAC_CFG0_REG 0x12000
#define ROCEE_VF_SMAC_CFG1_REG 0x12004
#define ROCEE_VF_SGID_CFG0_REG 0x10000
#define ROCEE_VF_SGID_CFG1_REG 0x10004
#define ROCEE_VF_SGID_CFG2_REG 0x10008
#define ROCEE_VF_SGID_CFG3_REG 0x1000c
#define ROCEE_VF_SGID_CFG4_REG 0x10010
#endif
/* _HNS_ROCE_COMMON_H */
drivers/infiniband/hw/hns/hns_roce_cq.c
浏览文件 @
1848757c
...
...
@@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
if
(
event_type
!=
HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID
&&
event_type
!=
HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR
&&
event_type
!=
HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW
)
{
dev_err
(
&
hr_dev
->
p
dev
->
dev
,
dev_err
(
hr_
dev
->
dev
,
"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx
\n
"
,
event_type
,
hr_cq
->
cqn
);
return
;
...
...
@@ -85,17 +85,23 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct
hns_roce_uar
*
hr_uar
,
struct
hns_roce_cq
*
hr_cq
,
int
vector
)
{
struct
hns_roce_cmd_mailbox
*
mailbox
=
NULL
;
struct
hns_roce_cq_table
*
cq_table
=
NULL
;
struct
device
*
dev
=
&
hr_dev
->
pdev
->
dev
;
struct
hns_roce_cmd_mailbox
*
mailbox
;
struct
hns_roce_hem_table
*
mtt_table
;
struct
hns_roce_cq_table
*
cq_table
;
struct
device
*
dev
=
hr_dev
->
dev
;
dma_addr_t
dma_handle
;
u64
*
mtts
=
NULL
;
int
ret
=
0
;
u64
*
mtts
;
int
ret
;
cq_table
=
&
hr_dev
->
cq_table
;
/* Get the physical address of cq buf */
mtts
=
hns_roce_table_find
(
&
hr_dev
->
mr_table
.
mtt_table
,
if
(
hns_roce_check_whether_mhop
(
hr_dev
,
HEM_TYPE_CQE
))
mtt_table
=
&
hr_dev
->
mr_table
.
mtt_cqe_table
;
else
mtt_table
=
&
hr_dev
->
mr_table
.
mtt_table
;
mtts
=
hns_roce_table_find
(
hr_dev
,
mtt_table
,
hr_mtt
->
first_seg
,
&
dma_handle
);
if
(
!
mtts
)
{
dev_err
(
dev
,
"CQ alloc.Failed to find cq buf addr.
\n
"
);
...
...
@@ -182,21 +188,22 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
void
hns_roce_free_cq
(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_cq
*
hr_cq
)
{
struct
hns_roce_cq_table
*
cq_table
=
&
hr_dev
->
cq_table
;
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
int
ret
;
ret
=
hns_roce_hw2sw_cq
(
hr_dev
,
NULL
,
hr_cq
->
cqn
);
if
(
ret
)
dev_err
(
dev
,
"HW2SW_CQ failed (%d) for CQN %06lx
\n
"
,
ret
,
hr_cq
->
cqn
);
/* Waiting interrupt process procedure carried out */
synchronize_irq
(
hr_dev
->
eq_table
.
eq
[
hr_cq
->
vector
].
irq
);
/* wait for all interrupt processed */
if
(
atomic_dec_and_test
(
&
hr_cq
->
refcount
))
complete
(
&
hr_cq
->
free
);
wait_for_completion
(
&
hr_cq
->
free
);
if
(
hr_dev
->
eq_table
.
eq
)
{
/* Waiting interrupt process procedure carried out */
synchronize_irq
(
hr_dev
->
eq_table
.
eq
[
hr_cq
->
vector
].
irq
);
/* wait for all interrupt processed */
if
(
atomic_dec_and_test
(
&
hr_cq
->
refcount
))
complete
(
&
hr_cq
->
free
);
wait_for_completion
(
&
hr_cq
->
free
);
}
spin_lock_irq
(
&
cq_table
->
lock
);
radix_tree_delete
(
&
cq_table
->
tree
,
hr_cq
->
cqn
);
...
...
@@ -205,6 +212,7 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_table_put
(
hr_dev
,
&
cq_table
->
table
,
hr_cq
->
cqn
);
hns_roce_bitmap_free
(
&
cq_table
->
bitmap
,
hr_cq
->
cqn
,
BITMAP_NO_RR
);
}
EXPORT_SYMBOL_GPL
(
hns_roce_free_cq
);
static
int
hns_roce_ib_get_cq_umem
(
struct
hns_roce_dev
*
hr_dev
,
struct
ib_ucontext
*
context
,
...
...
@@ -218,6 +226,10 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
if
(
IS_ERR
(
*
umem
))
return
PTR_ERR
(
*
umem
);
if
(
hns_roce_check_whether_mhop
(
hr_dev
,
HEM_TYPE_CQE
))
buf
->
hr_mtt
.
mtt_type
=
MTT_TYPE_CQE
;
else
buf
->
hr_mtt
.
mtt_type
=
MTT_TYPE_WQE
;
ret
=
hns_roce_mtt_init
(
hr_dev
,
ib_umem_page_count
(
*
umem
),
(
*
umem
)
->
page_shift
,
&
buf
->
hr_mtt
);
if
(
ret
)
...
...
@@ -247,6 +259,11 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
if
(
ret
)
goto
out
;
if
(
hns_roce_check_whether_mhop
(
hr_dev
,
HEM_TYPE_CQE
))
buf
->
hr_mtt
.
mtt_type
=
MTT_TYPE_CQE
;
else
buf
->
hr_mtt
.
mtt_type
=
MTT_TYPE_WQE
;
ret
=
hns_roce_mtt_init
(
hr_dev
,
buf
->
hr_buf
.
npages
,
buf
->
hr_buf
.
page_shift
,
&
buf
->
hr_mtt
);
if
(
ret
)
...
...
@@ -281,13 +298,13 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
struct
ib_udata
*
udata
)
{
struct
hns_roce_dev
*
hr_dev
=
to_hr_dev
(
ib_dev
);
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
struct
hns_roce_ib_create_cq
ucmd
;
struct
hns_roce_cq
*
hr_cq
=
NULL
;
struct
hns_roce_uar
*
uar
=
NULL
;
int
vector
=
attr
->
comp_vector
;
int
cq_entries
=
attr
->
cqe
;
int
ret
=
0
;
int
ret
;
if
(
cq_entries
<
1
||
cq_entries
>
hr_dev
->
caps
.
max_cqes
)
{
dev_err
(
dev
,
"Creat CQ failed. entries=%d, max=%d
\n
"
,
...
...
@@ -295,13 +312,12 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
return
ERR_PTR
(
-
EINVAL
);
}
hr_cq
=
k
m
alloc
(
sizeof
(
*
hr_cq
),
GFP_KERNEL
);
hr_cq
=
k
z
alloc
(
sizeof
(
*
hr_cq
),
GFP_KERNEL
);
if
(
!
hr_cq
)
return
ERR_PTR
(
-
ENOMEM
);
/* In v1 engine, parameter verification */
if
(
cq_entries
<
HNS_ROCE_MIN_CQE_NUM
)
cq_entries
=
HNS_ROCE_MIN_CQE_NUM
;
if
(
hr_dev
->
caps
.
min_cqes
)
cq_entries
=
max
(
cq_entries
,
hr_dev
->
caps
.
min_cqes
);
cq_entries
=
roundup_pow_of_two
((
unsigned
int
)
cq_entries
);
hr_cq
->
ib_cq
.
cqe
=
cq_entries
-
1
;
...
...
@@ -335,8 +351,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
}
uar
=
&
hr_dev
->
priv_uar
;
hr_cq
->
cq_db_l
=
hr_dev
->
reg_base
+
ROCEE_DB_OTHERS_L_0_REG
+
0x1000
*
uar
->
index
;
hr_cq
->
cq_db_l
=
hr_dev
->
reg_base
+
hr_dev
->
odb_offset
+
DB_REG_OFFSET
*
uar
->
index
;
}
/* Allocate cq index, fill cq_context */
...
...
@@ -353,7 +369,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
* problems if tptr is set to zero here, so we initialze it in user
* space.
*/
if
(
!
context
)
if
(
!
context
&&
hr_cq
->
tptr_addr
)
*
hr_cq
->
tptr_addr
=
0
;
/* Get created cq handler and carry out event */
...
...
@@ -385,6 +401,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
kfree
(
hr_cq
);
return
ERR_PTR
(
ret
);
}
EXPORT_SYMBOL_GPL
(
hns_roce_ib_create_cq
);
int
hns_roce_ib_destroy_cq
(
struct
ib_cq
*
ib_cq
)
{
...
...
@@ -410,10 +427,11 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
return
ret
;
}
EXPORT_SYMBOL_GPL
(
hns_roce_ib_destroy_cq
);
void
hns_roce_cq_completion
(
struct
hns_roce_dev
*
hr_dev
,
u32
cqn
)
{
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
struct
hns_roce_cq
*
cq
;
cq
=
radix_tree_lookup
(
&
hr_dev
->
cq_table
.
tree
,
...
...
@@ -429,7 +447,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
void
hns_roce_cq_event
(
struct
hns_roce_dev
*
hr_dev
,
u32
cqn
,
int
event_type
)
{
struct
hns_roce_cq_table
*
cq_table
=
&
hr_dev
->
cq_table
;
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
struct
hns_roce_cq
*
cq
;
cq
=
radix_tree_lookup
(
&
cq_table
->
tree
,
...
...
drivers/infiniband/hw/hns/hns_roce_device.h
浏览文件 @
1848757c
...
...
@@ -78,6 +78,8 @@
#define HNS_ROCE_MAX_GID_NUM 16
#define HNS_ROCE_GID_SIZE 16
#define HNS_ROCE_HOP_NUM_0 0xff
#define BITMAP_NO_RR 0
#define BITMAP_RR 1
...
...
@@ -168,6 +170,11 @@ enum {
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE
=
0x07
,
};
enum
hns_roce_mtt_type
{
MTT_TYPE_WQE
,
MTT_TYPE_CQE
,
};
#define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_PORT_DOWN 0
...
...
@@ -232,12 +239,17 @@ struct hns_roce_hem_table {
int
lowmem
;
struct
mutex
mutex
;
struct
hns_roce_hem
**
hem
;
u64
**
bt_l1
;
dma_addr_t
*
bt_l1_dma_addr
;
u64
**
bt_l0
;
dma_addr_t
*
bt_l0_dma_addr
;
};
struct
hns_roce_mtt
{
unsigned
long
first_seg
;
int
order
;
int
page_shift
;
unsigned
long
first_seg
;
int
order
;
int
page_shift
;
enum
hns_roce_mtt_type
mtt_type
;
};
/* Only support 4K page size for mr register */
...
...
@@ -255,6 +267,19 @@ struct hns_roce_mr {
int
type
;
/* MR's register type */
u64
*
pbl_buf
;
/* MR's PBL space */
dma_addr_t
pbl_dma_addr
;
/* MR's PBL space PA */
u32
pbl_size
;
/* PA number in the PBL */
u64
pbl_ba
;
/* page table address */
u32
l0_chunk_last_num
;
/* L0 last number */
u32
l1_chunk_last_num
;
/* L1 last number */
u64
**
pbl_bt_l2
;
/* PBL BT L2 */
u64
**
pbl_bt_l1
;
/* PBL BT L1 */
u64
*
pbl_bt_l0
;
/* PBL BT L0 */
dma_addr_t
*
pbl_l2_dma_addr
;
/* PBL BT L2 dma addr */
dma_addr_t
*
pbl_l1_dma_addr
;
/* PBL BT L1 dma addr */
dma_addr_t
pbl_l0_dma_addr
;
/* PBL BT L0 dma addr */
u32
pbl_ba_pg_sz
;
/* BT chunk page size */
u32
pbl_buf_pg_sz
;
/* buf chunk page size */
u32
pbl_hop_num
;
/* multi-hop number */
};
struct
hns_roce_mr_table
{
...
...
@@ -262,6 +287,8 @@ struct hns_roce_mr_table {
struct
hns_roce_buddy
mtt_buddy
;
struct
hns_roce_hem_table
mtt_table
;
struct
hns_roce_hem_table
mtpt_table
;
struct
hns_roce_buddy
mtt_cqe_buddy
;
struct
hns_roce_hem_table
mtt_cqe_table
;
};
struct
hns_roce_wq
{
...
...
@@ -277,6 +304,12 @@ struct hns_roce_wq {
void
__iomem
*
db_reg_l
;
};
struct
hns_roce_sge
{
int
sge_cnt
;
/* SGE num */
int
offset
;
int
sge_shift
;
/* SGE size */
};
struct
hns_roce_buf_list
{
void
*
buf
;
dma_addr_t
map
;
...
...
@@ -367,7 +400,6 @@ struct hns_roce_cmd_context {
struct
hns_roce_cmdq
{
struct
dma_pool
*
pool
;
u8
__iomem
*
hcr
;
struct
mutex
hcr_mutex
;
struct
semaphore
poll_sem
;
/*
...
...
@@ -429,6 +461,9 @@ struct hns_roce_qp {
atomic_t
refcount
;
struct
completion
free
;
struct
hns_roce_sge
sge
;
u32
next_sge
;
};
struct
hns_roce_sqp
{
...
...
@@ -477,16 +512,20 @@ struct hns_roce_caps {
u32
max_wqes
;
/* 16k */
u32
max_sq_desc_sz
;
/* 64 */
u32
max_rq_desc_sz
;
/* 64 */
u32
max_srq_desc_sz
;
int
max_qp_init_rdma
;
int
max_qp_dest_rdma
;
int
num_cqs
;
int
max_cqes
;
int
min_cqes
;
u32
min_wqes
;
int
reserved_cqs
;
int
num_aeq_vectors
;
/* 1 */
int
num_comp_vectors
;
/* 32 ceq */
int
num_other_vectors
;
int
num_mtpts
;
u32
num_mtt_segs
;
u32
num_cqe_segs
;
int
reserved_mrws
;
int
reserved_uars
;
int
num_pds
;
...
...
@@ -499,16 +538,47 @@ struct hns_roce_caps {
int
qpc_entry_sz
;
int
irrl_entry_sz
;
int
cqc_entry_sz
;
u32
pbl_ba_pg_sz
;
u32
pbl_buf_pg_sz
;
u32
pbl_hop_num
;
int
aeqe_depth
;
int
ceqe_depth
[
HNS_ROCE_COMP_VEC_NUM
];
enum
ib_mtu
max_mtu
;
u32
qpc_bt_num
;
u32
srqc_bt_num
;
u32
cqc_bt_num
;
u32
mpt_bt_num
;
u32
qpc_ba_pg_sz
;
u32
qpc_buf_pg_sz
;
u32
qpc_hop_num
;
u32
srqc_ba_pg_sz
;
u32
srqc_buf_pg_sz
;
u32
srqc_hop_num
;
u32
cqc_ba_pg_sz
;
u32
cqc_buf_pg_sz
;
u32
cqc_hop_num
;
u32
mpt_ba_pg_sz
;
u32
mpt_buf_pg_sz
;
u32
mpt_hop_num
;
u32
mtt_ba_pg_sz
;
u32
mtt_buf_pg_sz
;
u32
mtt_hop_num
;
u32
cqe_ba_pg_sz
;
u32
cqe_buf_pg_sz
;
u32
cqe_hop_num
;
};
struct
hns_roce_hw
{
int
(
*
reset
)(
struct
hns_roce_dev
*
hr_dev
,
bool
enable
);
void
(
*
hw_profile
)(
struct
hns_roce_dev
*
hr_dev
);
int
(
*
cmq_init
)(
struct
hns_roce_dev
*
hr_dev
);
void
(
*
cmq_exit
)(
struct
hns_roce_dev
*
hr_dev
);
int
(
*
hw_profile
)(
struct
hns_roce_dev
*
hr_dev
);
int
(
*
hw_init
)(
struct
hns_roce_dev
*
hr_dev
);
void
(
*
hw_exit
)(
struct
hns_roce_dev
*
hr_dev
);
int
(
*
post_mbox
)(
struct
hns_roce_dev
*
hr_dev
,
u64
in_param
,
u64
out_param
,
u32
in_modifier
,
u8
op_modifier
,
u16
op
,
u16
token
,
int
event
);
int
(
*
chk_mbox
)(
struct
hns_roce_dev
*
hr_dev
,
unsigned
long
timeout
);
void
(
*
set_gid
)(
struct
hns_roce_dev
*
hr_dev
,
u8
port
,
int
gid_index
,
union
ib_gid
*
gid
);
void
(
*
set_mac
)(
struct
hns_roce_dev
*
hr_dev
,
u8
phy_port
,
u8
*
addr
);
...
...
@@ -519,8 +589,11 @@ struct hns_roce_hw {
void
(
*
write_cqc
)(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_cq
*
hr_cq
,
void
*
mb_buf
,
u64
*
mtts
,
dma_addr_t
dma_handle
,
int
nent
,
u32
vector
);
int
(
*
set_hem
)(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_hem_table
*
table
,
int
obj
,
int
step_idx
);
int
(
*
clear_hem
)(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_hem_table
*
table
,
int
obj
);
struct
hns_roce_hem_table
*
table
,
int
obj
,
int
step_idx
);
int
(
*
query_qp
)(
struct
ib_qp
*
ibqp
,
struct
ib_qp_attr
*
qp_attr
,
int
qp_attr_mask
,
struct
ib_qp_init_attr
*
qp_init_attr
);
int
(
*
modify_qp
)(
struct
ib_qp
*
ibqp
,
const
struct
ib_qp_attr
*
attr
,
...
...
@@ -535,12 +608,13 @@ struct hns_roce_hw {
int
(
*
poll_cq
)(
struct
ib_cq
*
ibcq
,
int
num_entries
,
struct
ib_wc
*
wc
);
int
(
*
dereg_mr
)(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_mr
*
mr
);
int
(
*
destroy_cq
)(
struct
ib_cq
*
ibcq
);
void
*
priv
;
};
struct
hns_roce_dev
{
struct
ib_device
ib_dev
;
struct
platform_device
*
pdev
;
struct
pci_dev
*
pci_dev
;
struct
device
*
dev
;
struct
hns_roce_uar
priv_uar
;
const
char
*
irq_names
[
HNS_ROCE_MAX_IRQ_NUM
];
spinlock_t
sm_lock
;
...
...
@@ -569,9 +643,12 @@ struct hns_roce_dev {
int
cmd_mod
;
int
loop_idc
;
u32
sdb_offset
;
u32
odb_offset
;
dma_addr_t
tptr_dma_addr
;
/*only for hw v1*/
u32
tptr_size
;
/*only for hw v1*/
struct
hns_roce_hw
*
hw
;
const
struct
hns_roce_hw
*
hw
;
void
*
priv
;
};
static
inline
struct
hns_roce_dev
*
to_hr_dev
(
struct
ib_device
*
ib_dev
)
...
...
@@ -723,6 +800,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int
attr_mask
,
struct
ib_udata
*
udata
);
void
*
get_recv_wqe
(
struct
hns_roce_qp
*
hr_qp
,
int
n
);
void
*
get_send_wqe
(
struct
hns_roce_qp
*
hr_qp
,
int
n
);
void
*
get_send_extend_sge
(
struct
hns_roce_qp
*
hr_qp
,
int
n
);
bool
hns_roce_wq_overflow
(
struct
hns_roce_wq
*
hr_wq
,
int
nreq
,
struct
ib_cq
*
ib_cq
);
enum
hns_roce_qp_state
to_hns_roce_state
(
enum
ib_qp_state
state
);
...
...
@@ -749,7 +827,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void
hns_roce_cq_event
(
struct
hns_roce_dev
*
hr_dev
,
u32
cqn
,
int
event_type
);
void
hns_roce_qp_event
(
struct
hns_roce_dev
*
hr_dev
,
u32
qpn
,
int
event_type
);
int
hns_get_gid_index
(
struct
hns_roce_dev
*
hr_dev
,
u8
port
,
int
gid_index
);
extern
struct
hns_roce_hw
hns_roce_hw_v1
;
int
hns_roce_init
(
struct
hns_roce_dev
*
hr_dev
);
void
hns_roce_exit
(
struct
hns_roce_dev
*
hr_dev
)
;
#endif
/* _HNS_ROCE_DEVICE_H */
drivers/infiniband/hw/hns/hns_roce_hem.c
浏览文件 @
1848757c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/hns/hns_roce_hem.h
浏览文件 @
1848757c
...
...
@@ -47,6 +47,7 @@ enum {
/* UNMAP HEM */
HEM_TYPE_MTT
,
HEM_TYPE_CQE
,
HEM_TYPE_IRRL
,
};
...
...
@@ -54,6 +55,18 @@ enum {
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
(sizeof(struct scatterlist)))
#define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2)
#define check_whether_bt_num_2(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == 1) || \
(type >= HEM_TYPE_MTT && hop_num == 2))
#define check_whether_bt_num_1(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
(type >= HEM_TYPE_MTT && hop_num == 1) || \
(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
enum
{
HNS_ROCE_HEM_PAGE_SHIFT
=
12
,
HNS_ROCE_HEM_PAGE_SIZE
=
1
<<
HNS_ROCE_HEM_PAGE_SHIFT
,
...
...
@@ -77,12 +90,23 @@ struct hns_roce_hem_iter {
int
page_idx
;
};
struct
hns_roce_hem_mhop
{
u32
hop_num
;
u32
buf_chunk_size
;
u32
bt_chunk_size
;
u32
ba_l0_num
;
u32
l0_idx
;
/* level 0 base address table index */
u32
l1_idx
;
/* level 1 base address table index */
u32
l2_idx
;
/* level 2 base address table index */
};
void
hns_roce_free_hem
(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_hem
*
hem
);
int
hns_roce_table_get
(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_hem_table
*
table
,
unsigned
long
obj
);
void
hns_roce_table_put
(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_hem_table
*
table
,
unsigned
long
obj
);
void
*
hns_roce_table_find
(
struct
hns_roce_hem_table
*
table
,
unsigned
long
obj
,
void
*
hns_roce_table_find
(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_hem_table
*
table
,
unsigned
long
obj
,
dma_addr_t
*
dma_handle
);
int
hns_roce_table_get_range
(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_hem_table
*
table
,
...
...
@@ -97,6 +121,10 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
void
hns_roce_cleanup_hem_table
(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_hem_table
*
table
);
void
hns_roce_cleanup_hem
(
struct
hns_roce_dev
*
hr_dev
);
int
hns_roce_calc_hem_mhop
(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_hem_table
*
table
,
unsigned
long
*
obj
,
struct
hns_roce_hem_mhop
*
mhop
);
bool
hns_roce_check_whether_mhop
(
struct
hns_roce_dev
*
hr_dev
,
u32
type
);
static
inline
void
hns_roce_hem_first
(
struct
hns_roce_hem
*
hem
,
struct
hns_roce_hem_iter
*
iter
)
...
...
@@ -105,7 +133,7 @@ static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
iter
->
chunk
=
list_empty
(
&
hem
->
chunk_list
)
?
NULL
:
list_entry
(
hem
->
chunk_list
.
next
,
struct
hns_roce_hem_chunk
,
list
);
iter
->
page_idx
=
0
;
iter
->
page_idx
=
0
;
}
static
inline
int
hns_roce_hem_last
(
struct
hns_roce_hem_iter
*
iter
)
...
...
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
浏览文件 @
1848757c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/hns/hns_roce_hw_v1.h
浏览文件 @
1848757c
...
...
@@ -948,6 +948,11 @@ struct hns_roce_qp_context {
#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
#define STATUS_MASK 0xff
#define GO_BIT_TIMEOUT_MSECS 10000
#define HCR_STATUS_OFFSET 0x18
#define HCR_GO_BIT 15
struct
hns_roce_rq_db
{
u32
u32_4
;
u32
u32_8
;
...
...
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
0 → 100644
浏览文件 @
1848757c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
0 → 100644
浏览文件 @
1848757c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/hns/hns_roce_main.c
浏览文件 @
1848757c
...
...
@@ -57,6 +57,7 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
{
return
gid_index
*
hr_dev
->
caps
.
num_ports
+
port
;
}
EXPORT_SYMBOL_GPL
(
hns_get_gid_index
);
static
void
hns_roce_set_mac
(
struct
hns_roce_dev
*
hr_dev
,
u8
port
,
u8
*
addr
)
{
...
...
@@ -116,7 +117,7 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
static
int
handle_en_event
(
struct
hns_roce_dev
*
hr_dev
,
u8
port
,
unsigned
long
event
)
{
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
struct
net_device
*
netdev
;
netdev
=
hr_dev
->
iboe
.
netdevs
[
port
];
...
...
@@ -174,8 +175,9 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
u8
i
;
for
(
i
=
0
;
i
<
hr_dev
->
caps
.
num_ports
;
i
++
)
{
hr_dev
->
hw
->
set_mtu
(
hr_dev
,
hr_dev
->
iboe
.
phy_port
[
i
],
hr_dev
->
caps
.
max_mtu
);
if
(
hr_dev
->
hw
->
set_mtu
)
hr_dev
->
hw
->
set_mtu
(
hr_dev
,
hr_dev
->
iboe
.
phy_port
[
i
],
hr_dev
->
caps
.
max_mtu
);
hns_roce_set_mac
(
hr_dev
,
i
,
hr_dev
->
iboe
.
netdevs
[
i
]
->
dev_addr
);
}
...
...
@@ -200,7 +202,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props
->
max_qp_wr
=
hr_dev
->
caps
.
max_wqes
;
props
->
device_cap_flags
=
IB_DEVICE_PORT_ACTIVE_EVENT
|
IB_DEVICE_RC_RNR_NAK_GEN
;
props
->
max_sge
=
hr_dev
->
caps
.
max_sq_sg
;
props
->
max_sge
=
max
(
hr_dev
->
caps
.
max_sq_sg
,
hr_dev
->
caps
.
max_rq_sg
)
;
props
->
max_sge_rd
=
1
;
props
->
max_cq
=
hr_dev
->
caps
.
num_cqs
;
props
->
max_cqe
=
hr_dev
->
caps
.
max_cqes
;
...
...
@@ -238,7 +240,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct
ib_port_attr
*
props
)
{
struct
hns_roce_dev
*
hr_dev
=
to_hr_dev
(
ib_dev
);
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
struct
net_device
*
net_dev
;
unsigned
long
flags
;
enum
ib_mtu
mtu
;
...
...
@@ -379,7 +381,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
to_hr_ucontext
(
context
)
->
uar
.
pfn
,
PAGE_SIZE
,
vma
->
vm_page_prot
))
return
-
EAGAIN
;
}
else
if
(
vma
->
vm_pgoff
==
1
&&
hr_dev
->
hw_rev
==
HNS_ROCE_HW_VER1
)
{
}
else
if
(
vma
->
vm_pgoff
==
1
&&
hr_dev
->
tptr_dma_addr
&&
hr_dev
->
tptr_size
)
{
/* vm_pgoff: 1 -- TPTR */
if
(
io_remap_pfn_range
(
vma
,
vma
->
vm_start
,
hr_dev
->
tptr_dma_addr
>>
PAGE_SHIFT
,
...
...
@@ -426,7 +429,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
int
ret
;
struct
hns_roce_ib_iboe
*
iboe
=
NULL
;
struct
ib_device
*
ib_dev
=
NULL
;
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
iboe
=
&
hr_dev
->
iboe
;
spin_lock_init
(
&
iboe
->
lock
);
...
...
@@ -531,173 +534,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
return
ret
;
}
static
const
struct
of_device_id
hns_roce_of_match
[]
=
{
{
.
compatible
=
"hisilicon,hns-roce-v1"
,
.
data
=
&
hns_roce_hw_v1
,
},
{},
};
MODULE_DEVICE_TABLE
(
of
,
hns_roce_of_match
);
static
const
struct
acpi_device_id
hns_roce_acpi_match
[]
=
{
{
"HISI00D1"
,
(
kernel_ulong_t
)
&
hns_roce_hw_v1
},
{},
};
MODULE_DEVICE_TABLE
(
acpi
,
hns_roce_acpi_match
);
static
int
hns_roce_node_match
(
struct
device
*
dev
,
void
*
fwnode
)
{
return
dev
->
fwnode
==
fwnode
;
}
static
struct
platform_device
*
hns_roce_find_pdev
(
struct
fwnode_handle
*
fwnode
)
{
struct
device
*
dev
;
/* get the 'device'corresponding to matching 'fwnode' */
dev
=
bus_find_device
(
&
platform_bus_type
,
NULL
,
fwnode
,
hns_roce_node_match
);
/* get the platform device */
return
dev
?
to_platform_device
(
dev
)
:
NULL
;
}
static
int
hns_roce_get_cfg
(
struct
hns_roce_dev
*
hr_dev
)
{
int
i
;
int
ret
;
u8
phy_port
;
int
port_cnt
=
0
;
struct
device
*
dev
=
&
hr_dev
->
pdev
->
dev
;
struct
device_node
*
net_node
;
struct
net_device
*
netdev
=
NULL
;
struct
platform_device
*
pdev
=
NULL
;
struct
resource
*
res
;
/* check if we are compatible with the underlying SoC */
if
(
dev_of_node
(
dev
))
{
const
struct
of_device_id
*
of_id
;
of_id
=
of_match_node
(
hns_roce_of_match
,
dev
->
of_node
);
if
(
!
of_id
)
{
dev_err
(
dev
,
"device is not compatible!
\n
"
);
return
-
ENXIO
;
}
hr_dev
->
hw
=
(
struct
hns_roce_hw
*
)
of_id
->
data
;
if
(
!
hr_dev
->
hw
)
{
dev_err
(
dev
,
"couldn't get H/W specific DT data!
\n
"
);
return
-
ENXIO
;
}
}
else
if
(
is_acpi_device_node
(
dev
->
fwnode
))
{
const
struct
acpi_device_id
*
acpi_id
;
acpi_id
=
acpi_match_device
(
hns_roce_acpi_match
,
dev
);
if
(
!
acpi_id
)
{
dev_err
(
dev
,
"device is not compatible!
\n
"
);
return
-
ENXIO
;
}
hr_dev
->
hw
=
(
struct
hns_roce_hw
*
)
acpi_id
->
driver_data
;
if
(
!
hr_dev
->
hw
)
{
dev_err
(
dev
,
"couldn't get H/W specific ACPI data!
\n
"
);
return
-
ENXIO
;
}
}
else
{
dev_err
(
dev
,
"can't read compatibility data from DT or ACPI
\n
"
);
return
-
ENXIO
;
}
/* get the mapped register base address */
res
=
platform_get_resource
(
hr_dev
->
pdev
,
IORESOURCE_MEM
,
0
);
if
(
!
res
)
{
dev_err
(
dev
,
"memory resource not found!
\n
"
);
return
-
EINVAL
;
}
hr_dev
->
reg_base
=
devm_ioremap_resource
(
dev
,
res
);
if
(
IS_ERR
(
hr_dev
->
reg_base
))
return
PTR_ERR
(
hr_dev
->
reg_base
);
/* read the node_guid of IB device from the DT or ACPI */
ret
=
device_property_read_u8_array
(
dev
,
"node-guid"
,
(
u8
*
)
&
hr_dev
->
ib_dev
.
node_guid
,
GUID_LEN
);
if
(
ret
)
{
dev_err
(
dev
,
"couldn't get node_guid from DT or ACPI!
\n
"
);
return
ret
;
}
/* get the RoCE associated ethernet ports or netdevices */
for
(
i
=
0
;
i
<
HNS_ROCE_MAX_PORTS
;
i
++
)
{
if
(
dev_of_node
(
dev
))
{
net_node
=
of_parse_phandle
(
dev
->
of_node
,
"eth-handle"
,
i
);
if
(
!
net_node
)
continue
;
pdev
=
of_find_device_by_node
(
net_node
);
}
else
if
(
is_acpi_device_node
(
dev
->
fwnode
))
{
struct
acpi_reference_args
args
;
struct
fwnode_handle
*
fwnode
;
ret
=
acpi_node_get_property_reference
(
dev
->
fwnode
,
"eth-handle"
,
i
,
&
args
);
if
(
ret
)
continue
;
fwnode
=
acpi_fwnode_handle
(
args
.
adev
);
pdev
=
hns_roce_find_pdev
(
fwnode
);
}
else
{
dev_err
(
dev
,
"cannot read data from DT or ACPI
\n
"
);
return
-
ENXIO
;
}
if
(
pdev
)
{
netdev
=
platform_get_drvdata
(
pdev
);
phy_port
=
(
u8
)
i
;
if
(
netdev
)
{
hr_dev
->
iboe
.
netdevs
[
port_cnt
]
=
netdev
;
hr_dev
->
iboe
.
phy_port
[
port_cnt
]
=
phy_port
;
}
else
{
dev_err
(
dev
,
"no netdev found with pdev %s
\n
"
,
pdev
->
name
);
return
-
ENODEV
;
}
port_cnt
++
;
}
}
if
(
port_cnt
==
0
)
{
dev_err
(
dev
,
"unable to get eth-handle for available ports!
\n
"
);
return
-
EINVAL
;
}
hr_dev
->
caps
.
num_ports
=
port_cnt
;
/* cmd issue mode: 0 is poll, 1 is event */
hr_dev
->
cmd_mod
=
1
;
hr_dev
->
loop_idc
=
0
;
/* read the interrupt names from the DT or ACPI */
ret
=
device_property_read_string_array
(
dev
,
"interrupt-names"
,
hr_dev
->
irq_names
,
HNS_ROCE_MAX_IRQ_NUM
);
if
(
ret
<
0
)
{
dev_err
(
dev
,
"couldn't get interrupt names from DT or ACPI!
\n
"
);
return
ret
;
}
/* fetch the interrupt numbers */
for
(
i
=
0
;
i
<
HNS_ROCE_MAX_IRQ_NUM
;
i
++
)
{
hr_dev
->
irq
[
i
]
=
platform_get_irq
(
hr_dev
->
pdev
,
i
);
if
(
hr_dev
->
irq
[
i
]
<=
0
)
{
dev_err
(
dev
,
"platform get of irq[=%d] failed!
\n
"
,
i
);
return
-
EINVAL
;
}
}
return
0
;
}
static
int
hns_roce_init_hem
(
struct
hns_roce_dev
*
hr_dev
)
{
int
ret
;
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
ret
=
hns_roce_init_hem_table
(
hr_dev
,
&
hr_dev
->
mr_table
.
mtt_table
,
HEM_TYPE_MTT
,
hr_dev
->
caps
.
mtt_entry_sz
,
...
...
@@ -707,6 +547,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
return
ret
;
}
if
(
hns_roce_check_whether_mhop
(
hr_dev
,
HEM_TYPE_CQE
))
{
ret
=
hns_roce_init_hem_table
(
hr_dev
,
&
hr_dev
->
mr_table
.
mtt_cqe_table
,
HEM_TYPE_CQE
,
hr_dev
->
caps
.
mtt_entry_sz
,
hr_dev
->
caps
.
num_cqe_segs
,
1
);
if
(
ret
)
{
dev_err
(
dev
,
"Failed to init MTT CQE context memory, aborting.
\n
"
);
goto
err_unmap_cqe
;
}
}
ret
=
hns_roce_init_hem_table
(
hr_dev
,
&
hr_dev
->
mr_table
.
mtpt_table
,
HEM_TYPE_MTPT
,
hr_dev
->
caps
.
mtpt_entry_sz
,
hr_dev
->
caps
.
num_mtpts
,
1
);
...
...
@@ -754,6 +605,12 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
err_unmap_mtt:
hns_roce_cleanup_hem_table
(
hr_dev
,
&
hr_dev
->
mr_table
.
mtt_table
);
if
(
hns_roce_check_whether_mhop
(
hr_dev
,
HEM_TYPE_CQE
))
hns_roce_cleanup_hem_table
(
hr_dev
,
&
hr_dev
->
mr_table
.
mtt_cqe_table
);
err_unmap_cqe:
hns_roce_cleanup_hem_table
(
hr_dev
,
&
hr_dev
->
mr_table
.
mtt_table
);
return
ret
;
}
...
...
@@ -766,7 +623,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
static
int
hns_roce_setup_hca
(
struct
hns_roce_dev
*
hr_dev
)
{
int
ret
;
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
spin_lock_init
(
&
hr_dev
->
sm_lock
);
spin_lock_init
(
&
hr_dev
->
bt_cmd_lock
);
...
...
@@ -826,56 +683,45 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
return
ret
;
}
/**
* hns_roce_probe - RoCE driver entrance
* @pdev: pointer to platform device
* Return : int
*
*/
static
int
hns_roce_probe
(
struct
platform_device
*
pdev
)
int
hns_roce_init
(
struct
hns_roce_dev
*
hr_dev
)
{
int
ret
;
struct
hns_roce_dev
*
hr_dev
;
struct
device
*
dev
=
&
pdev
->
dev
;
hr_dev
=
(
struct
hns_roce_dev
*
)
ib_alloc_device
(
sizeof
(
*
hr_dev
));
if
(
!
hr_dev
)
return
-
ENOMEM
;
hr_dev
->
pdev
=
pdev
;
platform_set_drvdata
(
pdev
,
hr_dev
);
struct
device
*
dev
=
hr_dev
->
dev
;
if
(
dma_set_mask_and_coherent
(
dev
,
DMA_BIT_MASK
(
64ULL
))
&&
dma_set_mask_and_coherent
(
dev
,
DMA_BIT_MASK
(
32ULL
)))
{
dev_err
(
dev
,
"Not usable DMA addressing mode
\n
"
);
ret
=
-
EIO
;
goto
error_failed_get_cfg
;
if
(
hr_dev
->
hw
->
reset
)
{
ret
=
hr_dev
->
hw
->
reset
(
hr_dev
,
true
);
if
(
ret
)
{
dev_err
(
dev
,
"Reset RoCE engine failed!
\n
"
);
return
ret
;
}
}
ret
=
hns_roce_get_cfg
(
hr_dev
);
if
(
ret
)
{
dev_err
(
dev
,
"Get Configuration failed!
\n
"
);
goto
error_failed_get_cfg
;
if
(
hr_dev
->
hw
->
cmq_init
)
{
ret
=
hr_dev
->
hw
->
cmq_init
(
hr_dev
);
if
(
ret
)
{
dev_err
(
dev
,
"Init RoCE Command Queue failed!
\n
"
);
goto
error_failed_cmq_init
;
}
}
ret
=
hr_dev
->
hw
->
reset
(
hr_dev
,
true
);
ret
=
hr_dev
->
hw
->
hw_profile
(
hr_dev
);
if
(
ret
)
{
dev_err
(
dev
,
"
Reset RoCE engin
e failed!
\n
"
);
goto
error_failed_
get_cfg
;
dev_err
(
dev
,
"
Get RoCE engine profil
e failed!
\n
"
);
goto
error_failed_
cmd_init
;
}
hr_dev
->
hw
->
hw_profile
(
hr_dev
);
ret
=
hns_roce_cmd_init
(
hr_dev
);
if
(
ret
)
{
dev_err
(
dev
,
"cmd init failed!
\n
"
);
goto
error_failed_cmd_init
;
}
ret
=
hns_roce_init_eq_table
(
hr_dev
);
if
(
ret
)
{
dev_err
(
dev
,
"eq init failed!
\n
"
);
goto
error_failed_eq_table
;
if
(
hr_dev
->
cmd_mod
)
{
ret
=
hns_roce_init_eq_table
(
hr_dev
);
if
(
ret
)
{
dev_err
(
dev
,
"eq init failed!
\n
"
);
goto
error_failed_eq_table
;
}
}
if
(
hr_dev
->
cmd_mod
)
{
...
...
@@ -898,10 +744,12 @@ static int hns_roce_probe(struct platform_device *pdev)
goto
error_failed_setup_hca
;
}
ret
=
hr_dev
->
hw
->
hw_init
(
hr_dev
);
if
(
ret
)
{
dev_err
(
dev
,
"hw_init failed!
\n
"
);
goto
error_failed_engine_init
;
if
(
hr_dev
->
hw
->
hw_init
)
{
ret
=
hr_dev
->
hw
->
hw_init
(
hr_dev
);
if
(
ret
)
{
dev_err
(
dev
,
"hw_init failed!
\n
"
);
goto
error_failed_engine_init
;
}
}
ret
=
hns_roce_register_device
(
hr_dev
);
...
...
@@ -911,7 +759,8 @@ static int hns_roce_probe(struct platform_device *pdev)
return
0
;
error_failed_register_device:
hr_dev
->
hw
->
hw_exit
(
hr_dev
);
if
(
hr_dev
->
hw
->
hw_exit
)
hr_dev
->
hw
->
hw_exit
(
hr_dev
);
error_failed_engine_init:
hns_roce_cleanup_bitmap
(
hr_dev
);
...
...
@@ -924,58 +773,47 @@ static int hns_roce_probe(struct platform_device *pdev)
hns_roce_cmd_use_polling
(
hr_dev
);
error_failed_use_event:
hns_roce_cleanup_eq_table
(
hr_dev
);
if
(
hr_dev
->
cmd_mod
)
hns_roce_cleanup_eq_table
(
hr_dev
);
error_failed_eq_table:
hns_roce_cmd_cleanup
(
hr_dev
);
error_failed_cmd_init:
ret
=
hr_dev
->
hw
->
reset
(
hr_dev
,
false
);
if
(
ret
)
dev_err
(
&
hr_dev
->
pdev
->
dev
,
"roce_engine reset fail
\n
"
);
if
(
hr_dev
->
hw
->
cmq_exit
)
hr_dev
->
hw
->
cmq_exit
(
hr_dev
);
error_failed_get_cfg:
ib_dealloc_device
(
&
hr_dev
->
ib_dev
);
error_failed_cmq_init:
if
(
hr_dev
->
hw
->
reset
)
{
ret
=
hr_dev
->
hw
->
reset
(
hr_dev
,
false
);
if
(
ret
)
dev_err
(
dev
,
"Dereset RoCE engine failed!
\n
"
);
}
return
ret
;
}
EXPORT_SYMBOL_GPL
(
hns_roce_init
);
/**
* hns_roce_remove - remove RoCE device
* @pdev: pointer to platform device
*/
static
int
hns_roce_remove
(
struct
platform_device
*
pdev
)
void
hns_roce_exit
(
struct
hns_roce_dev
*
hr_dev
)
{
struct
hns_roce_dev
*
hr_dev
=
platform_get_drvdata
(
pdev
);
hns_roce_unregister_device
(
hr_dev
);
hr_dev
->
hw
->
hw_exit
(
hr_dev
);
if
(
hr_dev
->
hw
->
hw_exit
)
hr_dev
->
hw
->
hw_exit
(
hr_dev
);
hns_roce_cleanup_bitmap
(
hr_dev
);
hns_roce_cleanup_hem
(
hr_dev
);
if
(
hr_dev
->
cmd_mod
)
hns_roce_cmd_use_polling
(
hr_dev
);
hns_roce_cleanup_eq_table
(
hr_dev
);
if
(
hr_dev
->
cmd_mod
)
hns_roce_cleanup_eq_table
(
hr_dev
);
hns_roce_cmd_cleanup
(
hr_dev
);
hr_dev
->
hw
->
reset
(
hr_dev
,
false
);
ib_dealloc_device
(
&
hr_dev
->
ib_dev
);
return
0
;
if
(
hr_dev
->
hw
->
cmq_exit
)
hr_dev
->
hw
->
cmq_exit
(
hr_dev
);
if
(
hr_dev
->
hw
->
reset
)
hr_dev
->
hw
->
reset
(
hr_dev
,
false
);
}
static
struct
platform_driver
hns_roce_driver
=
{
.
probe
=
hns_roce_probe
,
.
remove
=
hns_roce_remove
,
.
driver
=
{
.
name
=
DRV_NAME
,
.
of_match_table
=
hns_roce_of_match
,
.
acpi_match_table
=
ACPI_PTR
(
hns_roce_acpi_match
),
},
};
module_platform_driver
(
hns_roce_driver
);
EXPORT_SYMBOL_GPL
(
hns_roce_exit
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_AUTHOR
(
"Wei Hu <xavier.huwei@huawei.com>"
);
...
...
drivers/infiniband/hw/hns/hns_roce_mr.c
浏览文件 @
1848757c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/hns/hns_roce_pd.c
浏览文件 @
1848757c
...
...
@@ -31,6 +31,7 @@
*/
#include <linux/platform_device.h>
#include <linux/pci.h>
#include "hns_roce_device.h"
static
int
hns_roce_pd_alloc
(
struct
hns_roce_dev
*
hr_dev
,
unsigned
long
*
pdn
)
...
...
@@ -60,7 +61,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
struct
ib_udata
*
udata
)
{
struct
hns_roce_dev
*
hr_dev
=
to_hr_dev
(
ib_dev
);
struct
device
*
dev
=
&
hr_dev
->
p
dev
->
dev
;
struct
device
*
dev
=
hr_
dev
->
dev
;
struct
hns_roce_pd
*
pd
;
int
ret
;
...
...
@@ -86,6 +87,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
return
&
pd
->
ibpd
;
}
EXPORT_SYMBOL_GPL
(
hns_roce_alloc_pd
);
int
hns_roce_dealloc_pd
(
struct
ib_pd
*
pd
)
{
...
...
@@ -94,6 +96,7 @@ int hns_roce_dealloc_pd(struct ib_pd *pd)
return
0
;
}
EXPORT_SYMBOL_GPL
(
hns_roce_dealloc_pd
);
int
hns_roce_uar_alloc
(
struct
hns_roce_dev
*
hr_dev
,
struct
hns_roce_uar
*
uar
)
{
...
...
@@ -109,12 +112,17 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
uar
->
index
=
(
uar
->
index
-
1
)
%
(
hr_dev
->
caps
.
phy_num_uars
-
1
)
+
1
;
res
=
platform_get_resource
(
hr_dev
->
pdev
,
IORESOURCE_MEM
,
0
);
if
(
!
res
)
{
dev_err
(
&
hr_dev
->
pdev
->
dev
,
"memory resource not found!
\n
"
);
return
-
EINVAL
;
if
(
!
dev_is_pci
(
hr_dev
->
dev
))
{
res
=
platform_get_resource
(
hr_dev
->
pdev
,
IORESOURCE_MEM
,
0
);
if
(
!
res
)
{
dev_err
(
&
hr_dev
->
pdev
->
dev
,
"memory resource not found!
\n
"
);
return
-
EINVAL
;
}
uar
->
pfn
=
((
res
->
start
)
>>
PAGE_SHIFT
)
+
uar
->
index
;
}
else
{
uar
->
pfn
=
((
pci_resource_start
(
hr_dev
->
pci_dev
,
2
))
>>
PAGE_SHIFT
);
}
uar
->
pfn
=
((
res
->
start
)
>>
PAGE_SHIFT
)
+
uar
->
index
;
return
0
;
}
...
...
drivers/infiniband/hw/hns/hns_roce_qp.c
浏览文件 @
1848757c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/i40iw/Kconfig
浏览文件 @
1848757c
config INFINIBAND_I40IW
tristate "Intel(R) Ethernet X722 iWARP Driver"
depends on INET && I40E
depends on PCI
select GENERIC_ALLOCATOR
---help---
Intel(R) Ethernet X722 iWARP Driver
...
...
drivers/infiniband/hw/mlx5/mr.c
浏览文件 @
1848757c
...
...
@@ -1229,13 +1229,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr
=
alloc_mr_from_cache
(
pd
,
umem
,
virt_addr
,
length
,
ncont
,
page_shift
,
order
,
access_flags
);
if
(
PTR_ERR
(
mr
)
==
-
EAGAIN
)
{
mlx5_ib_dbg
(
dev
,
"cache empty for order %d"
,
order
);
mlx5_ib_dbg
(
dev
,
"cache empty for order %d
\n
"
,
order
);
mr
=
NULL
;
}
}
else
if
(
!
MLX5_CAP_GEN
(
dev
->
mdev
,
umr_extended_translation_offset
))
{
if
(
access_flags
&
IB_ACCESS_ON_DEMAND
)
{
err
=
-
EINVAL
;
pr_err
(
"Got MR registration for ODP MR > 512MB, not supported for Connect-IB"
);
pr_err
(
"Got MR registration for ODP MR > 512MB, not supported for Connect-IB
\n
"
);
goto
error
;
}
use_umr
=
false
;
...
...
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
浏览文件 @
1848757c
...
...
@@ -1093,7 +1093,7 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
rsp
=
&
mqe
->
u
.
rsp
;
if
(
cqe_status
||
ext_status
)
{
pr_err
(
"%s() cqe_status=0x%x, ext_status=0x%x,"
,
pr_err
(
"%s() cqe_status=0x%x, ext_status=0x%x,
\n
"
,
__func__
,
cqe_status
,
ext_status
);
if
(
rsp
)
{
/* This is for embedded cmds. */
...
...
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
浏览文件 @
1848757c
...
...
@@ -658,7 +658,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
if
(
reset
)
{
status
=
ocrdma_mbx_rdma_stats
(
dev
,
true
);
if
(
status
)
{
pr_err
(
"Failed to reset stats = %d"
,
status
);
pr_err
(
"Failed to reset stats = %d
\n
"
,
status
);
goto
err
;
}
}
...
...
drivers/infiniband/hw/qedr/Kconfig
浏览文件 @
1848757c
config INFINIBAND_QEDR
tristate "QLogic RoCE driver"
depends on 64BIT && QEDE
depends on PCI
select QED_LL2
select QED_RDMA
---help---
...
...
drivers/infiniband/hw/qib/Kconfig
浏览文件 @
1848757c
config INFINIBAND_QIB
tristate "Intel PCIe HCA support"
depends on 64BIT && INFINIBAND_RDMAVT
depends on PCI
---help---
This is a low-level driver for Intel PCIe QLE InfiniBand host
channel adapters. This driver does not support the Intel
...
...
drivers/infiniband/sw/rdmavt/Kconfig
浏览文件 @
1848757c
config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library"
depends on 64BIT
depends on PCI
select DMA_VIRT_OPS
---help---
This is a common software verbs provider for RDMA networks.
drivers/infiniband/ulp/ipoib/ipoib_main.c
浏览文件 @
1848757c
...
...
@@ -51,7 +51,6 @@
#include <net/addrconf.h>
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
#include <linux/pci.h>
#define DRV_VERSION "1.0.0"
...
...
@@ -2312,7 +2311,8 @@ static void ipoib_add_one(struct ib_device *device)
}
if
(
!
count
)
{
kfree
(
dev_list
);
pr_err
(
"Failed to init port, removing it
\n
"
);
ipoib_remove_one
(
device
,
dev_list
);
return
;
}
...
...
drivers/staging/lustre/lnet/Kconfig
浏览文件 @
1848757c
...
...
@@ -34,7 +34,7 @@ config LNET_SELFTEST
config LNET_XPRT_IB
tristate "LNET infiniband support"
depends on LNET && INFINIBAND && INFINIBAND_ADDR_TRANS
depends on LNET &&
PCI &&
INFINIBAND && INFINIBAND_ADDR_TRANS
default LNET && INFINIBAND
help
This option allows the LNET users to use infiniband as an
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录