Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
86ef0bea
K
Kernel
项目概览
openeuler
/
Kernel
接近 2 年 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
86ef0bea
编写于
12月 14, 2016
作者:
D
Doug Ledford
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'mlx' into merge-test
上级
253f8b22
7ceb740c
变更
40
展开全部
隐藏空白更改
内联
并排
Showing
40 changed file
with
875 addition
and
336 deletion
+875
-336
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/core_priv.h
+0
-3
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs.h
+1
-0
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_cmd.c
+155
-74
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_main.c
+1
-0
drivers/infiniband/core/verbs.c
drivers/infiniband/core/verbs.c
+57
-51
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
+2
-1
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/provider.c
+3
-1
drivers/infiniband/hw/hns/hns_roce_ah.c
drivers/infiniband/hw/hns/hns_roce_ah.c
+2
-1
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_device.h
+2
-1
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
+3
-1
drivers/infiniband/hw/mlx4/ah.c
drivers/infiniband/hw/mlx4/ah.c
+7
-3
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/mad.c
+42
-7
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/main.c
+19
-11
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mlx4_ib.h
+2
-1
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/qp.c
+8
-5
drivers/infiniband/hw/mlx5/ah.c
drivers/infiniband/hw/mlx5/ah.c
+24
-1
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/cq.c
+31
-3
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/main.c
+180
-88
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/mlx5/mem.c
+6
-1
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mlx5_ib.h
+9
-3
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/mr.c
+52
-19
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/qp.c
+106
-25
drivers/infiniband/hw/mlx5/srq.c
drivers/infiniband/hw/mlx5/srq.c
+2
-2
drivers/infiniband/hw/mthca/mthca_av.c
drivers/infiniband/hw/mthca/mthca_av.c
+2
-4
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_provider.c
+3
-1
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/nes/nes_verbs.c
+2
-1
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+2
-1
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+3
-1
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/qedr/verbs.c
+2
-1
drivers/infiniband/hw/qedr/verbs.h
drivers/infiniband/hw/qedr/verbs.h
+2
-1
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+3
-1
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+3
-1
drivers/infiniband/sw/rxe/rxe_param.h
drivers/infiniband/sw/rxe/rxe_param.h
+1
-1
drivers/infiniband/sw/rxe/rxe_verbs.c
drivers/infiniband/sw/rxe/rxe_verbs.c
+3
-1
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
+0
-2
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+3
-2
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/mlx5_ifc.h
+1
-1
include/rdma/ib_verbs.h
include/rdma/ib_verbs.h
+56
-14
include/uapi/rdma/ib_user_verbs.h
include/uapi/rdma/ib_user_verbs.h
+38
-0
include/uapi/rdma/mlx5-abi.h
include/uapi/rdma/mlx5-abi.h
+37
-1
未找到文件。
drivers/infiniband/core/core_priv.h
浏览文件 @
86ef0bea
...
...
@@ -72,9 +72,6 @@ void ib_device_unregister_sysfs(struct ib_device *device);
void
ib_cache_setup
(
void
);
void
ib_cache_cleanup
(
void
);
int
ib_resolve_eth_dmac
(
struct
ib_qp
*
qp
,
struct
ib_qp_attr
*
qp_attr
,
int
*
qp_attr_mask
);
typedef
void
(
*
roce_netdev_callback
)(
struct
ib_device
*
device
,
u8
port
,
struct
net_device
*
idev
,
void
*
cookie
);
...
...
drivers/infiniband/core/uverbs.h
浏览文件 @
86ef0bea
...
...
@@ -289,5 +289,6 @@ IB_UVERBS_DECLARE_EX_CMD(modify_wq);
IB_UVERBS_DECLARE_EX_CMD
(
destroy_wq
);
IB_UVERBS_DECLARE_EX_CMD
(
create_rwq_ind_table
);
IB_UVERBS_DECLARE_EX_CMD
(
destroy_rwq_ind_table
);
IB_UVERBS_DECLARE_EX_CMD
(
modify_qp
);
#endif
/* UVERBS_H */
drivers/infiniband/core/uverbs_cmd.c
浏览文件 @
86ef0bea
...
...
@@ -2328,94 +2328,88 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
}
}
ssize_t
ib_uverbs_modify_qp
(
struct
ib_uverbs_file
*
file
,
struct
ib_device
*
ib_dev
,
const
char
__user
*
buf
,
int
in_len
,
int
out_len
)
static
int
modify_qp
(
struct
ib_uverbs_file
*
file
,
struct
ib_uverbs_ex_modify_qp
*
cmd
,
struct
ib_udata
*
udata
)
{
struct
ib_uverbs_modify_qp
cmd
;
struct
ib_udata
udata
;
struct
ib_qp
*
qp
;
struct
ib_qp_attr
*
attr
;
int
ret
;
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
cmd
))
return
-
EFAULT
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
cmd
,
NULL
,
in_len
-
sizeof
cmd
,
out_len
);
struct
ib_qp_attr
*
attr
;
struct
ib_qp
*
qp
;
int
ret
;
attr
=
kmalloc
(
sizeof
*
attr
,
GFP_KERNEL
);
if
(
!
attr
)
return
-
ENOMEM
;
qp
=
idr_read_qp
(
cmd
.
qp_handle
,
file
->
ucontext
);
qp
=
idr_read_qp
(
cmd
->
base
.
qp_handle
,
file
->
ucontext
);
if
(
!
qp
)
{
ret
=
-
EINVAL
;
goto
out
;
}
attr
->
qp_state
=
cmd
.
qp_state
;
attr
->
cur_qp_state
=
cmd
.
cur_qp_state
;
attr
->
path_mtu
=
cmd
.
path_mtu
;
attr
->
path_mig_state
=
cmd
.
path_mig_state
;
attr
->
qkey
=
cmd
.
qkey
;
attr
->
rq_psn
=
cmd
.
rq_psn
;
attr
->
sq_psn
=
cmd
.
sq_psn
;
attr
->
dest_qp_num
=
cmd
.
dest_qp_num
;
attr
->
qp_access_flags
=
cmd
.
qp_access_flags
;
attr
->
pkey_index
=
cmd
.
pkey_index
;
attr
->
alt_pkey_index
=
cmd
.
alt_pkey_index
;
attr
->
en_sqd_async_notify
=
cmd
.
en_sqd_async_notify
;
attr
->
max_rd_atomic
=
cmd
.
max_rd_atomic
;
attr
->
max_dest_rd_atomic
=
cmd
.
max_dest_rd_atomic
;
attr
->
min_rnr_timer
=
cmd
.
min_rnr_timer
;
attr
->
port_num
=
cmd
.
port_num
;
attr
->
timeout
=
cmd
.
timeout
;
attr
->
retry_cnt
=
cmd
.
retry_cnt
;
attr
->
rnr_retry
=
cmd
.
rnr_retry
;
attr
->
alt_port_num
=
cmd
.
alt_port_num
;
attr
->
alt_timeout
=
cmd
.
alt_timeout
;
memcpy
(
attr
->
ah_attr
.
grh
.
dgid
.
raw
,
cmd
.
dest
.
dgid
,
16
);
attr
->
ah_attr
.
grh
.
flow_label
=
cmd
.
dest
.
flow_label
;
attr
->
ah_attr
.
grh
.
sgid_index
=
cmd
.
dest
.
sgid_index
;
attr
->
ah_attr
.
grh
.
hop_limit
=
cmd
.
dest
.
hop_limit
;
attr
->
ah_attr
.
grh
.
traffic_class
=
cmd
.
dest
.
traffic_class
;
attr
->
ah_attr
.
dlid
=
cmd
.
dest
.
dlid
;
attr
->
ah_attr
.
sl
=
cmd
.
dest
.
sl
;
attr
->
ah_attr
.
src_path_bits
=
cmd
.
dest
.
src_path_bits
;
attr
->
ah_attr
.
static_rate
=
cmd
.
dest
.
static_rate
;
attr
->
ah_attr
.
ah_flags
=
cmd
.
dest
.
is_global
?
IB_AH_GRH
:
0
;
attr
->
ah_attr
.
port_num
=
cmd
.
dest
.
port_num
;
memcpy
(
attr
->
alt_ah_attr
.
grh
.
dgid
.
raw
,
cmd
.
alt_dest
.
dgid
,
16
);
attr
->
alt_ah_attr
.
grh
.
flow_label
=
cmd
.
alt_dest
.
flow_label
;
attr
->
alt_ah_attr
.
grh
.
sgid_index
=
cmd
.
alt_dest
.
sgid_index
;
attr
->
alt_ah_attr
.
grh
.
hop_limit
=
cmd
.
alt_dest
.
hop_limit
;
attr
->
alt_ah_attr
.
grh
.
traffic_class
=
cmd
.
alt_dest
.
traffic_class
;
attr
->
alt_ah_attr
.
dlid
=
cmd
.
alt_dest
.
dlid
;
attr
->
alt_ah_attr
.
sl
=
cmd
.
alt_dest
.
sl
;
attr
->
alt_ah_attr
.
src_path_bits
=
cmd
.
alt_dest
.
src_path_bits
;
attr
->
alt_ah_attr
.
static_rate
=
cmd
.
alt_dest
.
static_rate
;
attr
->
alt_ah_attr
.
ah_flags
=
cmd
.
alt_dest
.
is_global
?
IB_AH_GRH
:
0
;
attr
->
alt_ah_attr
.
port_num
=
cmd
.
alt_dest
.
port_num
;
attr
->
qp_state
=
cmd
->
base
.
qp_state
;
attr
->
cur_qp_state
=
cmd
->
base
.
cur_qp_state
;
attr
->
path_mtu
=
cmd
->
base
.
path_mtu
;
attr
->
path_mig_state
=
cmd
->
base
.
path_mig_state
;
attr
->
qkey
=
cmd
->
base
.
qkey
;
attr
->
rq_psn
=
cmd
->
base
.
rq_psn
;
attr
->
sq_psn
=
cmd
->
base
.
sq_psn
;
attr
->
dest_qp_num
=
cmd
->
base
.
dest_qp_num
;
attr
->
qp_access_flags
=
cmd
->
base
.
qp_access_flags
;
attr
->
pkey_index
=
cmd
->
base
.
pkey_index
;
attr
->
alt_pkey_index
=
cmd
->
base
.
alt_pkey_index
;
attr
->
en_sqd_async_notify
=
cmd
->
base
.
en_sqd_async_notify
;
attr
->
max_rd_atomic
=
cmd
->
base
.
max_rd_atomic
;
attr
->
max_dest_rd_atomic
=
cmd
->
base
.
max_dest_rd_atomic
;
attr
->
min_rnr_timer
=
cmd
->
base
.
min_rnr_timer
;
attr
->
port_num
=
cmd
->
base
.
port_num
;
attr
->
timeout
=
cmd
->
base
.
timeout
;
attr
->
retry_cnt
=
cmd
->
base
.
retry_cnt
;
attr
->
rnr_retry
=
cmd
->
base
.
rnr_retry
;
attr
->
alt_port_num
=
cmd
->
base
.
alt_port_num
;
attr
->
alt_timeout
=
cmd
->
base
.
alt_timeout
;
attr
->
rate_limit
=
cmd
->
rate_limit
;
memcpy
(
attr
->
ah_attr
.
grh
.
dgid
.
raw
,
cmd
->
base
.
dest
.
dgid
,
16
);
attr
->
ah_attr
.
grh
.
flow_label
=
cmd
->
base
.
dest
.
flow_label
;
attr
->
ah_attr
.
grh
.
sgid_index
=
cmd
->
base
.
dest
.
sgid_index
;
attr
->
ah_attr
.
grh
.
hop_limit
=
cmd
->
base
.
dest
.
hop_limit
;
attr
->
ah_attr
.
grh
.
traffic_class
=
cmd
->
base
.
dest
.
traffic_class
;
attr
->
ah_attr
.
dlid
=
cmd
->
base
.
dest
.
dlid
;
attr
->
ah_attr
.
sl
=
cmd
->
base
.
dest
.
sl
;
attr
->
ah_attr
.
src_path_bits
=
cmd
->
base
.
dest
.
src_path_bits
;
attr
->
ah_attr
.
static_rate
=
cmd
->
base
.
dest
.
static_rate
;
attr
->
ah_attr
.
ah_flags
=
cmd
->
base
.
dest
.
is_global
?
IB_AH_GRH
:
0
;
attr
->
ah_attr
.
port_num
=
cmd
->
base
.
dest
.
port_num
;
memcpy
(
attr
->
alt_ah_attr
.
grh
.
dgid
.
raw
,
cmd
->
base
.
alt_dest
.
dgid
,
16
);
attr
->
alt_ah_attr
.
grh
.
flow_label
=
cmd
->
base
.
alt_dest
.
flow_label
;
attr
->
alt_ah_attr
.
grh
.
sgid_index
=
cmd
->
base
.
alt_dest
.
sgid_index
;
attr
->
alt_ah_attr
.
grh
.
hop_limit
=
cmd
->
base
.
alt_dest
.
hop_limit
;
attr
->
alt_ah_attr
.
grh
.
traffic_class
=
cmd
->
base
.
alt_dest
.
traffic_class
;
attr
->
alt_ah_attr
.
dlid
=
cmd
->
base
.
alt_dest
.
dlid
;
attr
->
alt_ah_attr
.
sl
=
cmd
->
base
.
alt_dest
.
sl
;
attr
->
alt_ah_attr
.
src_path_bits
=
cmd
->
base
.
alt_dest
.
src_path_bits
;
attr
->
alt_ah_attr
.
static_rate
=
cmd
->
base
.
alt_dest
.
static_rate
;
attr
->
alt_ah_attr
.
ah_flags
=
cmd
->
base
.
alt_dest
.
is_global
?
IB_AH_GRH
:
0
;
attr
->
alt_ah_attr
.
port_num
=
cmd
->
base
.
alt_dest
.
port_num
;
if
(
qp
->
real_qp
==
qp
)
{
ret
=
ib_resolve_eth_dmac
(
qp
,
attr
,
&
cmd
.
attr_mask
);
if
(
ret
)
goto
release_qp
;
if
(
cmd
->
base
.
attr_mask
&
IB_QP_AV
)
{
ret
=
ib_resolve_eth_dmac
(
qp
->
device
,
&
attr
->
ah_attr
);
if
(
ret
)
goto
release_qp
;
}
ret
=
qp
->
device
->
modify_qp
(
qp
,
attr
,
modify_qp_mask
(
qp
->
qp_type
,
cmd
.
attr_mask
),
&
udata
);
modify_qp_mask
(
qp
->
qp_type
,
cmd
->
base
.
attr_mask
),
udata
);
}
else
{
ret
=
ib_modify_qp
(
qp
,
attr
,
modify_qp_mask
(
qp
->
qp_type
,
cmd
.
attr_mask
));
ret
=
ib_modify_qp
(
qp
,
attr
,
modify_qp_mask
(
qp
->
qp_type
,
cmd
->
base
.
attr_mask
));
}
if
(
ret
)
goto
release_qp
;
ret
=
in_len
;
release_qp:
put_qp_read
(
qp
);
...
...
@@ -2425,6 +2419,68 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
return
ret
;
}
ssize_t
ib_uverbs_modify_qp
(
struct
ib_uverbs_file
*
file
,
struct
ib_device
*
ib_dev
,
const
char
__user
*
buf
,
int
in_len
,
int
out_len
)
{
struct
ib_uverbs_ex_modify_qp
cmd
=
{};
struct
ib_udata
udata
;
int
ret
;
if
(
copy_from_user
(
&
cmd
.
base
,
buf
,
sizeof
(
cmd
.
base
)))
return
-
EFAULT
;
if
(
cmd
.
base
.
attr_mask
&
~
((
IB_USER_LEGACY_LAST_QP_ATTR_MASK
<<
1
)
-
1
))
return
-
EOPNOTSUPP
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
.
base
),
NULL
,
in_len
-
sizeof
(
cmd
.
base
),
out_len
);
ret
=
modify_qp
(
file
,
&
cmd
,
&
udata
);
if
(
ret
)
return
ret
;
return
in_len
;
}
int
ib_uverbs_ex_modify_qp
(
struct
ib_uverbs_file
*
file
,
struct
ib_device
*
ib_dev
,
struct
ib_udata
*
ucore
,
struct
ib_udata
*
uhw
)
{
struct
ib_uverbs_ex_modify_qp
cmd
=
{};
int
ret
;
/*
* Last bit is reserved for extending the attr_mask by
* using another field.
*/
BUILD_BUG_ON
(
IB_USER_LAST_QP_ATTR_MASK
==
(
1
<<
31
));
if
(
ucore
->
inlen
<
sizeof
(
cmd
.
base
))
return
-
EINVAL
;
ret
=
ib_copy_from_udata
(
&
cmd
,
ucore
,
min
(
sizeof
(
cmd
),
ucore
->
inlen
));
if
(
ret
)
return
ret
;
if
(
cmd
.
base
.
attr_mask
&
~
((
IB_USER_LAST_QP_ATTR_MASK
<<
1
)
-
1
))
return
-
EOPNOTSUPP
;
if
(
ucore
->
inlen
>
sizeof
(
cmd
))
{
if
(
ib_is_udata_cleared
(
ucore
,
sizeof
(
cmd
),
ucore
->
inlen
-
sizeof
(
cmd
)))
return
-
EOPNOTSUPP
;
}
ret
=
modify_qp
(
file
,
&
cmd
,
uhw
);
return
ret
;
}
ssize_t
ib_uverbs_destroy_qp
(
struct
ib_uverbs_file
*
file
,
struct
ib_device
*
ib_dev
,
const
char
__user
*
buf
,
int
in_len
,
...
...
@@ -2875,6 +2931,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
struct
ib_ah
*
ah
;
struct
ib_ah_attr
attr
;
int
ret
;
struct
ib_udata
udata
;
if
(
out_len
<
sizeof
resp
)
return
-
ENOSPC
;
...
...
@@ -2882,6 +2939,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
cmd
))
return
-
EFAULT
;
INIT_UDATA
(
&
udata
,
buf
+
sizeof
(
cmd
),
(
unsigned
long
)
cmd
.
response
+
sizeof
(
resp
),
in_len
-
sizeof
(
cmd
),
out_len
-
sizeof
(
resp
));
uobj
=
kmalloc
(
sizeof
*
uobj
,
GFP_KERNEL
);
if
(
!
uobj
)
return
-
ENOMEM
;
...
...
@@ -2908,12 +2969,16 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
memset
(
&
attr
.
dmac
,
0
,
sizeof
(
attr
.
dmac
));
memcpy
(
attr
.
grh
.
dgid
.
raw
,
cmd
.
attr
.
grh
.
dgid
,
16
);
ah
=
ib_create_ah
(
pd
,
&
attr
);
ah
=
pd
->
device
->
create_ah
(
pd
,
&
attr
,
&
udata
);
if
(
IS_ERR
(
ah
))
{
ret
=
PTR_ERR
(
ah
);
goto
err_put
;
}
ah
->
device
=
pd
->
device
;
ah
->
pd
=
pd
;
atomic_inc
(
&
pd
->
usecnt
);
ah
->
uobject
=
uobj
;
uobj
->
object
=
ah
;
...
...
@@ -3124,8 +3189,10 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
kern_spec_val
=
(
void
*
)
kern_spec
+
sizeof
(
struct
ib_uverbs_flow_spec_hdr
);
kern_spec_mask
=
kern_spec_val
+
kern_filter_sz
;
if
(
ib_spec
->
type
==
(
IB_FLOW_SPEC_INNER
|
IB_FLOW_SPEC_VXLAN_TUNNEL
))
return
-
EINVAL
;
switch
(
ib_spec
->
type
)
{
switch
(
ib_spec
->
type
&
~
IB_FLOW_SPEC_INNER
)
{
case
IB_FLOW_SPEC_ETH
:
ib_filter_sz
=
offsetof
(
struct
ib_flow_eth_filter
,
real_sz
);
actual_filter_sz
=
spec_filter_size
(
kern_spec_mask
,
...
...
@@ -3175,6 +3242,21 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
memcpy
(
&
ib_spec
->
tcp_udp
.
val
,
kern_spec_val
,
actual_filter_sz
);
memcpy
(
&
ib_spec
->
tcp_udp
.
mask
,
kern_spec_mask
,
actual_filter_sz
);
break
;
case
IB_FLOW_SPEC_VXLAN_TUNNEL
:
ib_filter_sz
=
offsetof
(
struct
ib_flow_tunnel_filter
,
real_sz
);
actual_filter_sz
=
spec_filter_size
(
kern_spec_mask
,
kern_filter_sz
,
ib_filter_sz
);
if
(
actual_filter_sz
<=
0
)
return
-
EINVAL
;
ib_spec
->
tunnel
.
size
=
sizeof
(
struct
ib_flow_spec_tunnel
);
memcpy
(
&
ib_spec
->
tunnel
.
val
,
kern_spec_val
,
actual_filter_sz
);
memcpy
(
&
ib_spec
->
tunnel
.
mask
,
kern_spec_mask
,
actual_filter_sz
);
if
((
ntohl
(
ib_spec
->
tunnel
.
mask
.
tunnel_id
))
>=
BIT
(
24
)
||
(
ntohl
(
ib_spec
->
tunnel
.
val
.
tunnel_id
))
>=
BIT
(
24
))
return
-
EINVAL
;
break
;
default:
return
-
EINVAL
;
}
...
...
@@ -3745,7 +3827,6 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
err
=
PTR_ERR
(
flow_id
);
goto
err_free
;
}
flow_id
->
qp
=
qp
;
flow_id
->
uobject
=
uobj
;
uobj
->
object
=
flow_id
;
...
...
drivers/infiniband/core/uverbs_main.c
浏览文件 @
86ef0bea
...
...
@@ -137,6 +137,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
[
IB_USER_VERBS_EX_CMD_DESTROY_WQ
]
=
ib_uverbs_ex_destroy_wq
,
[
IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL
]
=
ib_uverbs_ex_create_rwq_ind_table
,
[
IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL
]
=
ib_uverbs_ex_destroy_rwq_ind_table
,
[
IB_USER_VERBS_EX_CMD_MODIFY_QP
]
=
ib_uverbs_ex_modify_qp
,
};
static
void
ib_uverbs_add_one
(
struct
ib_device
*
device
);
...
...
drivers/infiniband/core/verbs.c
浏览文件 @
86ef0bea
...
...
@@ -315,7 +315,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
{
struct
ib_ah
*
ah
;
ah
=
pd
->
device
->
create_ah
(
pd
,
ah_attr
);
ah
=
pd
->
device
->
create_ah
(
pd
,
ah_attr
,
NULL
);
if
(
!
IS_ERR
(
ah
))
{
ah
->
device
=
pd
->
device
;
...
...
@@ -328,7 +328,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
}
EXPORT_SYMBOL
(
ib_create_ah
);
static
int
ib_get
_header_version
(
const
union
rdma_network_hdr
*
hdr
)
int
ib_get_rdma
_header_version
(
const
union
rdma_network_hdr
*
hdr
)
{
const
struct
iphdr
*
ip4h
=
(
struct
iphdr
*
)
&
hdr
->
roce4grh
;
struct
iphdr
ip4h_checked
;
...
...
@@ -359,6 +359,7 @@ static int ib_get_header_version(const union rdma_network_hdr *hdr)
return
4
;
return
6
;
}
EXPORT_SYMBOL
(
ib_get_rdma_header_version
);
static
enum
rdma_network_type
ib_get_net_type_by_grh
(
struct
ib_device
*
device
,
u8
port_num
,
...
...
@@ -369,7 +370,7 @@ static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
if
(
rdma_protocol_ib
(
device
,
port_num
))
return
RDMA_NETWORK_IB
;
grh_version
=
ib_get_header_version
((
union
rdma_network_hdr
*
)
grh
);
grh_version
=
ib_get_
rdma_
header_version
((
union
rdma_network_hdr
*
)
grh
);
if
(
grh_version
==
4
)
return
RDMA_NETWORK_IPV4
;
...
...
@@ -415,9 +416,9 @@ static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
&
context
,
gid_index
);
}
static
int
get_gids_from_rdma_hdr
(
union
rdma_network_hdr
*
hdr
,
enum
rdma_network_type
net_type
,
union
ib_gid
*
sgid
,
union
ib_gid
*
dgid
)
int
ib_get_gids_from_rdma_hdr
(
const
union
rdma_network_hdr
*
hdr
,
enum
rdma_network_type
net_type
,
union
ib_gid
*
sgid
,
union
ib_gid
*
dgid
)
{
struct
sockaddr_in
src_in
;
struct
sockaddr_in
dst_in
;
...
...
@@ -447,6 +448,7 @@ static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
return
-
EINVAL
;
}
}
EXPORT_SYMBOL
(
ib_get_gids_from_rdma_hdr
);
int
ib_init_ah_from_wc
(
struct
ib_device
*
device
,
u8
port_num
,
const
struct
ib_wc
*
wc
,
const
struct
ib_grh
*
grh
,
...
...
@@ -469,8 +471,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
net_type
=
ib_get_net_type_by_grh
(
device
,
port_num
,
grh
);
gid_type
=
ib_network_to_gid_type
(
net_type
);
}
ret
=
get_gids_from_rdma_hdr
((
union
rdma_network_hdr
*
)
grh
,
net_type
,
&
sgid
,
&
dgid
);
ret
=
ib_
get_gids_from_rdma_hdr
((
union
rdma_network_hdr
*
)
grh
,
net_type
,
&
sgid
,
&
dgid
);
if
(
ret
)
return
ret
;
...
...
@@ -1014,6 +1016,7 @@ static const struct {
IB_QP_QKEY
),
[
IB_QPT_GSI
]
=
(
IB_QP_CUR_STATE
|
IB_QP_QKEY
),
[
IB_QPT_RAW_PACKET
]
=
IB_QP_RATE_LIMIT
,
}
}
},
...
...
@@ -1047,6 +1050,7 @@ static const struct {
IB_QP_QKEY
),
[
IB_QPT_GSI
]
=
(
IB_QP_CUR_STATE
|
IB_QP_QKEY
),
[
IB_QPT_RAW_PACKET
]
=
IB_QP_RATE_LIMIT
,
}
},
[
IB_QPS_SQD
]
=
{
...
...
@@ -1196,66 +1200,66 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
}
EXPORT_SYMBOL
(
ib_modify_qp_is_ok
);
int
ib_resolve_eth_dmac
(
struct
ib_
qp
*
qp
,
struct
ib_
qp_attr
*
qp_attr
,
int
*
qp_attr_mask
)
int
ib_resolve_eth_dmac
(
struct
ib_
device
*
device
,
struct
ib_
ah_attr
*
ah_attr
)
{
int
ret
=
0
;
if
(
*
qp_attr_mask
&
IB_QP_AV
)
{
if
(
qp_attr
->
ah_attr
.
port_num
<
rdma_start_port
(
qp
->
device
)
||
qp_attr
->
ah_attr
.
port_num
>
rdma_end_port
(
qp
->
device
))
return
-
EINVAL
;
if
(
!
rdma_cap_eth_ah
(
qp
->
device
,
qp_attr
->
ah_attr
.
port_num
))
return
0
;
if
(
rdma_link_local_addr
((
struct
in6_addr
*
)
qp_attr
->
ah_attr
.
grh
.
dgid
.
raw
))
{
rdma_get_ll_mac
((
struct
in6_addr
*
)
qp_attr
->
ah_attr
.
grh
.
dgid
.
raw
,
qp_attr
->
ah_attr
.
dmac
);
}
else
{
union
ib_gid
sgid
;
struct
ib_gid_attr
sgid_attr
;
int
ifindex
;
int
hop_limit
;
ret
=
ib_query_gid
(
qp
->
device
,
qp_attr
->
ah_attr
.
port_num
,
qp_attr
->
ah_attr
.
grh
.
sgid_index
,
&
sgid
,
&
sgid_attr
);
if
(
ret
||
!
sgid_attr
.
ndev
)
{
if
(
!
ret
)
ret
=
-
ENXIO
;
goto
out
;
}
if
(
ah_attr
->
port_num
<
rdma_start_port
(
device
)
||
ah_attr
->
port_num
>
rdma_end_port
(
device
))
return
-
EINVAL
;
ifindex
=
sgid_attr
.
ndev
->
ifindex
;
if
(
!
rdma_cap_eth_ah
(
device
,
ah_attr
->
port_num
))
return
0
;
ret
=
rdma_addr_find_l2_eth_by_grh
(
&
sgid
,
&
qp_attr
->
ah_attr
.
grh
.
dgid
,
qp_attr
->
ah_attr
.
dmac
,
NULL
,
&
ifindex
,
&
hop_limit
);
if
(
rdma_link_local_addr
((
struct
in6_addr
*
)
ah_attr
->
grh
.
dgid
.
raw
))
{
rdma_get_ll_mac
((
struct
in6_addr
*
)
ah_attr
->
grh
.
dgid
.
raw
,
ah_attr
->
dmac
);
}
else
{
union
ib_gid
sgid
;
struct
ib_gid_attr
sgid_attr
;
int
ifindex
;
int
hop_limit
;
ret
=
ib_query_gid
(
device
,
ah_attr
->
port_num
,
ah_attr
->
grh
.
sgid_index
,
&
sgid
,
&
sgid_attr
);
if
(
ret
||
!
sgid_attr
.
ndev
)
{
if
(
!
ret
)
ret
=
-
ENXIO
;
goto
out
;
}
dev_put
(
sgid_attr
.
ndev
)
;
ifindex
=
sgid_attr
.
ndev
->
ifindex
;
qp_attr
->
ah_attr
.
grh
.
hop_limit
=
hop_limit
;
}
ret
=
rdma_addr_find_l2_eth_by_grh
(
&
sgid
,
&
ah_attr
->
grh
.
dgid
,
ah_attr
->
dmac
,
NULL
,
&
ifindex
,
&
hop_limit
);
dev_put
(
sgid_attr
.
ndev
);
ah_attr
->
grh
.
hop_limit
=
hop_limit
;
}
out:
return
ret
;
}
EXPORT_SYMBOL
(
ib_resolve_eth_dmac
);
int
ib_modify_qp
(
struct
ib_qp
*
qp
,
struct
ib_qp_attr
*
qp_attr
,
int
qp_attr_mask
)
{
int
ret
;
ret
=
ib_resolve_eth_dmac
(
qp
,
qp_attr
,
&
qp_attr_mask
);
if
(
ret
)
return
ret
;
if
(
qp_attr_mask
&
IB_QP_AV
)
{
int
ret
;
ret
=
ib_resolve_eth_dmac
(
qp
->
device
,
&
qp_attr
->
ah_attr
);
if
(
ret
)
return
ret
;
}
return
qp
->
device
->
modify_qp
(
qp
->
real_qp
,
qp_attr
,
qp_attr_mask
,
NULL
);
}
...
...
@@ -1734,8 +1738,10 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp,
return
ERR_PTR
(
-
ENOSYS
);
flow_id
=
qp
->
device
->
create_flow
(
qp
,
flow_attr
,
domain
);
if
(
!
IS_ERR
(
flow_id
))
if
(
!
IS_ERR
(
flow_id
))
{
atomic_inc
(
&
qp
->
usecnt
);
flow_id
->
qp
=
qp
;
}
return
flow_id
;
}
EXPORT_SYMBOL
(
ib_create_flow
);
...
...
drivers/infiniband/hw/cxgb3/iwch_provider.c
浏览文件 @
86ef0bea
...
...
@@ -62,7 +62,8 @@
#include "common.h"
static
struct
ib_ah
*
iwch_ah_create
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
)
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
)
{
return
ERR_PTR
(
-
ENOSYS
);
}
...
...
drivers/infiniband/hw/cxgb4/provider.c
浏览文件 @
86ef0bea
...
...
@@ -59,7 +59,9 @@ module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC
(
fastreg_support
,
"Advertise fastreg support (default=1)"
);
static
struct
ib_ah
*
c4iw_ah_create
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
)
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
)
{
return
ERR_PTR
(
-
ENOSYS
);
}
...
...
drivers/infiniband/hw/hns/hns_roce_ah.c
浏览文件 @
86ef0bea
...
...
@@ -39,7 +39,8 @@
#define HNS_ROCE_VLAN_SL_BIT_MASK 7
#define HNS_ROCE_VLAN_SL_SHIFT 13
struct
ib_ah
*
hns_roce_create_ah
(
struct
ib_pd
*
ibpd
,
struct
ib_ah_attr
*
ah_attr
)
struct
ib_ah
*
hns_roce_create_ah
(
struct
ib_pd
*
ibpd
,
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
)
{
struct
hns_roce_dev
*
hr_dev
=
to_hr_dev
(
ibpd
->
device
);
struct
device
*
dev
=
&
hr_dev
->
pdev
->
dev
;
...
...
drivers/infiniband/hw/hns/hns_roce_device.h
浏览文件 @
86ef0bea
...
...
@@ -687,7 +687,8 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
unsigned
long
obj
,
int
cnt
,
int
rr
);
struct
ib_ah
*
hns_roce_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
);
struct
ib_ah
*
hns_roce_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
);
int
hns_roce_query_ah
(
struct
ib_ah
*
ibah
,
struct
ib_ah_attr
*
ah_attr
);
int
hns_roce_destroy_ah
(
struct
ib_ah
*
ah
);
...
...
drivers/infiniband/hw/i40iw/i40iw_verbs.c
浏览文件 @
86ef0bea
...
...
@@ -2704,7 +2704,9 @@ static int i40iw_query_pkey(struct ib_device *ibdev,
* @ah_attr: address handle attributes
*/
static
struct
ib_ah
*
i40iw_create_ah
(
struct
ib_pd
*
ibpd
,
struct
ib_ah_attr
*
attr
)
struct
ib_ah_attr
*
attr
,
struct
ib_udata
*
udata
)
{
return
ERR_PTR
(
-
ENOSYS
);
}
...
...
drivers/infiniband/hw/mlx4/ah.c
浏览文件 @
86ef0bea
...
...
@@ -111,7 +111,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
!
(
1
<<
ah
->
av
.
eth
.
stat_rate
&
dev
->
caps
.
stat_rate_support
))
--
ah
->
av
.
eth
.
stat_rate
;
}
ah
->
av
.
eth
.
sl_tclass_flowlabel
|=
cpu_to_be32
((
ah_attr
->
grh
.
traffic_class
<<
20
)
|
ah_attr
->
grh
.
flow_label
);
/*
* HW requires multicast LID so we just choose one.
*/
...
...
@@ -119,12 +121,14 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
ah
->
av
.
ib
.
dlid
=
cpu_to_be16
(
0xc000
);
memcpy
(
ah
->
av
.
eth
.
dgid
,
ah_attr
->
grh
.
dgid
.
raw
,
16
);
ah
->
av
.
eth
.
sl_tclass_flowlabel
=
cpu_to_be32
(
ah_attr
->
sl
<<
29
);
ah
->
av
.
eth
.
sl_tclass_flowlabel
|
=
cpu_to_be32
(
ah_attr
->
sl
<<
29
);
return
&
ah
->
ibah
;
}
struct
ib_ah
*
mlx4_ib_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
)
struct
ib_ah
*
mlx4_ib_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
)
{
struct
mlx4_ib_ah
*
ah
;
struct
ib_ah
*
ret
;
...
...
drivers/infiniband/hw/mlx4/mad.c
浏览文件 @
86ef0bea
...
...
@@ -39,6 +39,8 @@
#include <linux/mlx4/cmd.h>
#include <linux/gfp.h>
#include <rdma/ib_pma.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <linux/mlx4/driver.h>
#include "mlx4_ib.h"
...
...
@@ -480,6 +482,23 @@ static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
return
-
EINVAL
;
}
static
int
get_gids_from_l3_hdr
(
struct
ib_grh
*
grh
,
union
ib_gid
*
sgid
,
union
ib_gid
*
dgid
)
{
int
version
=
ib_get_rdma_header_version
((
const
union
rdma_network_hdr
*
)
grh
);
enum
rdma_network_type
net_type
;
if
(
version
==
4
)
net_type
=
RDMA_NETWORK_IPV4
;
else
if
(
version
==
6
)
net_type
=
RDMA_NETWORK_IPV6
;
else
return
-
EINVAL
;
return
ib_get_gids_from_rdma_hdr
((
union
rdma_network_hdr
*
)
grh
,
net_type
,
sgid
,
dgid
);
}
int
mlx4_ib_send_to_slave
(
struct
mlx4_ib_dev
*
dev
,
int
slave
,
u8
port
,
enum
ib_qp_type
dest_qpt
,
struct
ib_wc
*
wc
,
struct
ib_grh
*
grh
,
struct
ib_mad
*
mad
)
...
...
@@ -538,7 +557,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
memset
(
&
attr
,
0
,
sizeof
attr
);
attr
.
port_num
=
port
;
if
(
is_eth
)
{
memcpy
(
&
attr
.
grh
.
dgid
.
raw
[
0
],
&
grh
->
dgid
.
raw
[
0
],
16
);
union
ib_gid
sgid
;
if
(
get_gids_from_l3_hdr
(
grh
,
&
sgid
,
&
attr
.
grh
.
dgid
))
return
-
EINVAL
;
attr
.
ah_flags
=
IB_AH_GRH
;
}
ah
=
ib_create_ah
(
tun_ctx
->
pd
,
&
attr
);
...
...
@@ -651,6 +673,11 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
is_eth
=
1
;
if
(
is_eth
)
{
union
ib_gid
dgid
;
union
ib_gid
sgid
;
if
(
get_gids_from_l3_hdr
(
grh
,
&
sgid
,
&
dgid
))
return
-
EINVAL
;
if
(
!
(
wc
->
wc_flags
&
IB_WC_GRH
))
{
mlx4_ib_warn
(
ibdev
,
"RoCE grh not present.
\n
"
);
return
-
EINVAL
;
...
...
@@ -659,10 +686,10 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
mlx4_ib_warn
(
ibdev
,
"RoCE mgmt class is not CM
\n
"
);
return
-
EINVAL
;
}
err
=
mlx4_get_slave_from_roce_gid
(
dev
->
dev
,
port
,
grh
->
dgid
.
raw
,
&
slave
);
err
=
mlx4_get_slave_from_roce_gid
(
dev
->
dev
,
port
,
dgid
.
raw
,
&
slave
);
if
(
err
&&
mlx4_is_mf_bonded
(
dev
->
dev
))
{
other_port
=
(
port
==
1
)
?
2
:
1
;
err
=
mlx4_get_slave_from_roce_gid
(
dev
->
dev
,
other_port
,
grh
->
dgid
.
raw
,
&
slave
);
err
=
mlx4_get_slave_from_roce_gid
(
dev
->
dev
,
other_port
,
dgid
.
raw
,
&
slave
);
if
(
!
err
)
{
port
=
other_port
;
pr_debug
(
"resolved slave %d from gid %pI6 wire port %d other %d
\n
"
,
...
...
@@ -702,10 +729,18 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
/* If a grh is present, we demux according to it */
if
(
wc
->
wc_flags
&
IB_WC_GRH
)
{
slave
=
mlx4_ib_find_real_gid
(
ibdev
,
port
,
grh
->
dgid
.
global
.
interface_id
);
if
(
slave
<
0
)
{
mlx4_ib_warn
(
ibdev
,
"failed matching grh
\n
"
);
return
-
ENOENT
;
if
(
grh
->
dgid
.
global
.
interface_id
==
cpu_to_be64
(
IB_SA_WELL_KNOWN_GUID
)
&&
grh
->
dgid
.
global
.
subnet_prefix
==
cpu_to_be64
(
atomic64_read
(
&
dev
->
sriov
.
demux
[
port
-
1
].
subnet_prefix
)))
{
slave
=
0
;
}
else
{
slave
=
mlx4_ib_find_real_gid
(
ibdev
,
port
,
grh
->
dgid
.
global
.
interface_id
);
if
(
slave
<
0
)
{
mlx4_ib_warn
(
ibdev
,
"failed matching grh
\n
"
);
return
-
ENOENT
;
}
}
}
/* Class-specific handling */
...
...
drivers/infiniband/hw/mlx4/main.c
浏览文件 @
86ef0bea
...
...
@@ -547,6 +547,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props
->
max_map_per_fmr
=
dev
->
dev
->
caps
.
max_fmr_maps
;
props
->
hca_core_clock
=
dev
->
dev
->
caps
.
hca_core_clock
*
1000UL
;
props
->
timestamp_mask
=
0xFFFFFFFFFFFFULL
;
props
->
max_ah
=
INT_MAX
;
if
(
!
mlx4_is_slave
(
dev
->
dev
))
err
=
mlx4_get_internal_clock_params
(
dev
->
dev
,
&
clock_params
);
...
...
@@ -697,9 +698,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
if
(
err
)
goto
out
;
props
->
active_width
=
(((
u8
*
)
mailbox
->
buf
)[
5
]
==
0x40
)
?
IB_WIDTH_4X
:
IB_WIDTH_1X
;
props
->
active_speed
=
IB_SPEED_QDR
;
props
->
active_width
=
(((
u8
*
)
mailbox
->
buf
)[
5
]
==
0x40
)
||
(((
u8
*
)
mailbox
->
buf
)[
5
]
==
0x20
/*56Gb*/
)
?
IB_WIDTH_4X
:
IB_WIDTH_1X
;
props
->
active_speed
=
(((
u8
*
)
mailbox
->
buf
)[
5
]
==
0x20
/*56Gb*/
)
?
IB_SPEED_FDR
:
IB_SPEED_QDR
;
props
->
port_cap_flags
=
IB_PORT_CM_SUP
|
IB_PORT_IP_BASED_GIDS
;
props
->
gid_tbl_len
=
mdev
->
dev
->
caps
.
gid_table_len
[
port
];
props
->
max_msg_sz
=
mdev
->
dev
->
caps
.
max_msg_sz
;
...
...
@@ -2817,14 +2820,19 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if
(
!
ibdev
->
ib_uc_qpns_bitmap
)
goto
err_steer_qp_release
;
bitmap_zero
(
ibdev
->
ib_uc_qpns_bitmap
,
ibdev
->
steer_qpn_count
);
err
=
mlx4_FLOW_STEERING_IB_UC_QP_RANGE
(
dev
,
ibdev
->
steer_qpn_base
,
ibdev
->
steer_qpn_base
+
ibdev
->
steer_qpn_count
-
1
);
if
(
err
)
goto
err_steer_free_bitmap
;
if
(
dev
->
caps
.
flags2
&
MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
)
{
bitmap_zero
(
ibdev
->
ib_uc_qpns_bitmap
,
ibdev
->
steer_qpn_count
);
err
=
mlx4_FLOW_STEERING_IB_UC_QP_RANGE
(
dev
,
ibdev
->
steer_qpn_base
,
ibdev
->
steer_qpn_base
+
ibdev
->
steer_qpn_count
-
1
);
if
(
err
)
goto
err_steer_free_bitmap
;
}
else
{
bitmap_fill
(
ibdev
->
ib_uc_qpns_bitmap
,
ibdev
->
steer_qpn_count
);
}
}
for
(
j
=
1
;
j
<=
ibdev
->
dev
->
caps
.
num_ports
;
j
++
)
...
...
drivers/infiniband/hw/mlx4/mlx4_ib.h
浏览文件 @
86ef0bea
...
...
@@ -742,7 +742,8 @@ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
void
__mlx4_ib_cq_clean
(
struct
mlx4_ib_cq
*
cq
,
u32
qpn
,
struct
mlx4_ib_srq
*
srq
);
void
mlx4_ib_cq_clean
(
struct
mlx4_ib_cq
*
cq
,
u32
qpn
,
struct
mlx4_ib_srq
*
srq
);
struct
ib_ah
*
mlx4_ib_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
);
struct
ib_ah
*
mlx4_ib_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
);
int
mlx4_ib_query_ah
(
struct
ib_ah
*
ibah
,
struct
ib_ah_attr
*
ah_attr
);
int
mlx4_ib_destroy_ah
(
struct
ib_ah
*
ah
);
...
...
drivers/infiniband/hw/mlx4/qp.c
浏览文件 @
86ef0bea
...
...
@@ -644,7 +644,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
int
qpn
;
int
err
;
struct
ib_qp_cap
backup_cap
;
struct
mlx4_ib_sqp
*
sqp
;
struct
mlx4_ib_sqp
*
sqp
=
NULL
;
struct
mlx4_ib_qp
*
qp
;
enum
mlx4_ib_qp_type
qp_type
=
(
enum
mlx4_ib_qp_type
)
init_attr
->
qp_type
;
struct
mlx4_ib_cq
*
mcq
;
...
...
@@ -933,7 +933,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
mlx4_db_free
(
dev
->
dev
,
&
qp
->
db
);
err:
if
(
!*
caller_qp
)
if
(
sqp
)
kfree
(
sqp
);
else
if
(
!*
caller_qp
)
kfree
(
qp
);
return
err
;
}
...
...
@@ -1280,7 +1282,8 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
if
(
is_qp0
(
dev
,
mqp
))
mlx4_CLOSE_PORT
(
dev
->
dev
,
mqp
->
port
);
if
(
dev
->
qp1_proxy
[
mqp
->
port
-
1
]
==
mqp
)
{
if
(
mqp
->
mlx4_ib_qp_type
==
MLX4_IB_QPT_PROXY_GSI
&&
dev
->
qp1_proxy
[
mqp
->
port
-
1
]
==
mqp
)
{
mutex_lock
(
&
dev
->
qp1_proxy_lock
[
mqp
->
port
-
1
]);
dev
->
qp1_proxy
[
mqp
->
port
-
1
]
=
NULL
;
mutex_unlock
(
&
dev
->
qp1_proxy_lock
[
mqp
->
port
-
1
]);
...
...
@@ -1764,14 +1767,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
u8
port_num
=
mlx4_is_bonded
(
to_mdev
(
ibqp
->
device
)
->
dev
)
?
1
:
attr_mask
&
IB_QP_PORT
?
attr
->
port_num
:
qp
->
port
;
union
ib_gid
gid
;
struct
ib_gid_attr
gid_attr
;
struct
ib_gid_attr
gid_attr
=
{.
gid_type
=
IB_GID_TYPE_IB
}
;
u16
vlan
=
0xffff
;
u8
smac
[
ETH_ALEN
];
int
status
=
0
;
int
is_eth
=
rdma_cap_eth_ah
(
&
dev
->
ib_dev
,
port_num
)
&&
attr
->
ah_attr
.
ah_flags
&
IB_AH_GRH
;
if
(
is_eth
)
{
if
(
is_eth
&&
attr
->
ah_attr
.
ah_flags
&
IB_AH_GRH
)
{
int
index
=
attr
->
ah_attr
.
grh
.
sgid_index
;
status
=
ib_get_cached_gid
(
ibqp
->
device
,
port_num
,
...
...
drivers/infiniband/hw/mlx5/ah.c
浏览文件 @
86ef0bea
...
...
@@ -64,7 +64,9 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
return
&
ah
->
ibah
;
}
struct
ib_ah
*
mlx5_ib_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
)
struct
ib_ah
*
mlx5_ib_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
)
{
struct
mlx5_ib_ah
*
ah
;
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
pd
->
device
);
...
...
@@ -75,6 +77,27 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
if
(
ll
==
IB_LINK_LAYER_ETHERNET
&&
!
(
ah_attr
->
ah_flags
&
IB_AH_GRH
))
return
ERR_PTR
(
-
EINVAL
);
if
(
ll
==
IB_LINK_LAYER_ETHERNET
&&
udata
)
{
int
err
;
struct
mlx5_ib_create_ah_resp
resp
=
{};
u32
min_resp_len
=
offsetof
(
typeof
(
resp
),
dmac
)
+
sizeof
(
resp
.
dmac
);
if
(
udata
->
outlen
<
min_resp_len
)
return
ERR_PTR
(
-
EINVAL
);
resp
.
response_length
=
min_resp_len
;
err
=
ib_resolve_eth_dmac
(
pd
->
device
,
ah_attr
);
if
(
err
)
return
ERR_PTR
(
err
);
memcpy
(
resp
.
dmac
,
ah_attr
->
dmac
,
ETH_ALEN
);
err
=
ib_copy_to_udata
(
udata
,
&
resp
,
resp
.
response_length
);
if
(
err
)
return
ERR_PTR
(
err
);
}
ah
=
kzalloc
(
sizeof
(
*
ah
),
GFP_ATOMIC
);
if
(
!
ah
)
return
ERR_PTR
(
-
ENOMEM
);
...
...
drivers/infiniband/hw/mlx5/cq.c
浏览文件 @
86ef0bea
...
...
@@ -731,7 +731,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
int
entries
,
u32
**
cqb
,
int
*
cqe_size
,
int
*
index
,
int
*
inlen
)
{
struct
mlx5_ib_create_cq
ucmd
;
struct
mlx5_ib_create_cq
ucmd
=
{}
;
size_t
ucmdlen
;
int
page_shift
;
__be64
*
pas
;
...
...
@@ -770,7 +770,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if
(
err
)
goto
err_umem
;
mlx5_ib_cont_pages
(
cq
->
buf
.
umem
,
ucmd
.
buf_addr
,
&
npages
,
&
page_shift
,
mlx5_ib_cont_pages
(
cq
->
buf
.
umem
,
ucmd
.
buf_addr
,
0
,
&
npages
,
&
page_shift
,
&
ncont
,
NULL
);
mlx5_ib_dbg
(
dev
,
"addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d
\n
"
,
ucmd
.
buf_addr
,
entries
*
ucmd
.
cqe_size
,
npages
,
page_shift
,
ncont
);
...
...
@@ -792,8 +792,36 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*
index
=
to_mucontext
(
context
)
->
uuari
.
uars
[
0
].
index
;
if
(
ucmd
.
cqe_comp_en
==
1
)
{
if
(
unlikely
((
*
cqe_size
!=
64
)
||
!
MLX5_CAP_GEN
(
dev
->
mdev
,
cqe_compression
)))
{
err
=
-
EOPNOTSUPP
;
mlx5_ib_warn
(
dev
,
"CQE compression is not supported for size %d!
\n
"
,
*
cqe_size
);
goto
err_cqb
;
}
if
(
unlikely
(
!
ucmd
.
cqe_comp_res_format
||
!
(
ucmd
.
cqe_comp_res_format
<
MLX5_IB_CQE_RES_RESERVED
)
||
(
ucmd
.
cqe_comp_res_format
&
(
ucmd
.
cqe_comp_res_format
-
1
))))
{
err
=
-
EOPNOTSUPP
;
mlx5_ib_warn
(
dev
,
"CQE compression res format %d is not supported!
\n
"
,
ucmd
.
cqe_comp_res_format
);
goto
err_cqb
;
}
MLX5_SET
(
cqc
,
cqc
,
cqe_comp_en
,
1
);
MLX5_SET
(
cqc
,
cqc
,
mini_cqe_res_format
,
ilog2
(
ucmd
.
cqe_comp_res_format
));
}
return
0
;
err_cqb:
kfree
(
cqb
);
err_db:
mlx5_ib_db_unmap_user
(
to_mucontext
(
context
),
&
cq
->
db
);
...
...
@@ -1125,7 +1153,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
return
err
;
}
mlx5_ib_cont_pages
(
umem
,
ucmd
.
buf_addr
,
&
npages
,
page_shift
,
mlx5_ib_cont_pages
(
umem
,
ucmd
.
buf_addr
,
0
,
&
npages
,
page_shift
,
npas
,
NULL
);
cq
->
resize_umem
=
umem
;
...
...
drivers/infiniband/hw/mlx5/main.c
浏览文件 @
86ef0bea
此差异已折叠。
点击以展开。
drivers/infiniband/hw/mlx5/mem.c
浏览文件 @
86ef0bea
...
...
@@ -37,12 +37,15 @@
/* @umem: umem object to scan
* @addr: ib virtual address requested by the user
* @max_page_shift: high limit for page_shift - 0 means no limit
* @count: number of PAGE_SIZE pages covered by umem
* @shift: page shift for the compound pages found in the region
* @ncont: number of compund pages
* @order: log2 of the number of compound pages
*/
void
mlx5_ib_cont_pages
(
struct
ib_umem
*
umem
,
u64
addr
,
int
*
count
,
int
*
shift
,
void
mlx5_ib_cont_pages
(
struct
ib_umem
*
umem
,
u64
addr
,
unsigned
long
max_page_shift
,
int
*
count
,
int
*
shift
,
int
*
ncont
,
int
*
order
)
{
unsigned
long
tmp
;
...
...
@@ -72,6 +75,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
addr
=
addr
>>
page_shift
;
tmp
=
(
unsigned
long
)
addr
;
m
=
find_first_bit
(
&
tmp
,
BITS_PER_LONG
);
if
(
max_page_shift
)
m
=
min_t
(
unsigned
long
,
max_page_shift
-
page_shift
,
m
);
skip
=
1
<<
m
;
mask
=
skip
-
1
;
i
=
0
;
...
...
drivers/infiniband/hw/mlx5/mlx5_ib.h
浏览文件 @
86ef0bea
...
...
@@ -63,6 +63,8 @@ pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
#define MLX5_IB_DEFAULT_UIDX 0xffffff
#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
enum
{
MLX5_IB_MMAP_CMD_SHIFT
=
8
,
MLX5_IB_MMAP_CMD_MASK
=
0xff
,
...
...
@@ -387,6 +389,7 @@ struct mlx5_ib_qp {
struct
list_head
qps_list
;
struct
list_head
cq_recv_list
;
struct
list_head
cq_send_list
;
u32
rate_limit
;
};
struct
mlx5_ib_cq_buf
{
...
...
@@ -418,7 +421,7 @@ struct mlx5_umr_wr {
struct
ib_pd
*
pd
;
unsigned
int
page_shift
;
unsigned
int
npages
;
u
32
length
;
u
64
length
;
int
access_flags
;
u32
mkey
;
};
...
...
@@ -737,7 +740,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
int
mlx5_MAD_IFC
(
struct
mlx5_ib_dev
*
dev
,
int
ignore_mkey
,
int
ignore_bkey
,
u8
port
,
const
struct
ib_wc
*
in_wc
,
const
struct
ib_grh
*
in_grh
,
const
void
*
in_mad
,
void
*
response_mad
);
struct
ib_ah
*
mlx5_ib_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
);
struct
ib_ah
*
mlx5_ib_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
);
int
mlx5_ib_query_ah
(
struct
ib_ah
*
ibah
,
struct
ib_ah_attr
*
ah_attr
);
int
mlx5_ib_destroy_ah
(
struct
ib_ah
*
ah
);
struct
ib_srq
*
mlx5_ib_create_srq
(
struct
ib_pd
*
pd
,
...
...
@@ -823,7 +827,9 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct
ib_port_attr
*
props
);
int
mlx5_ib_init_fmr
(
struct
mlx5_ib_dev
*
dev
);
void
mlx5_ib_cleanup_fmr
(
struct
mlx5_ib_dev
*
dev
);
void
mlx5_ib_cont_pages
(
struct
ib_umem
*
umem
,
u64
addr
,
int
*
count
,
int
*
shift
,
void
mlx5_ib_cont_pages
(
struct
ib_umem
*
umem
,
u64
addr
,
unsigned
long
max_page_shift
,
int
*
count
,
int
*
shift
,
int
*
ncont
,
int
*
order
);
void
__mlx5_ib_populate_pas
(
struct
mlx5_ib_dev
*
dev
,
struct
ib_umem
*
umem
,
int
page_shift
,
size_t
offset
,
size_t
num_pages
,
...
...
drivers/infiniband/hw/mlx5/mr.c
浏览文件 @
86ef0bea
...
...
@@ -627,7 +627,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent
->
order
=
i
+
2
;
ent
->
dev
=
dev
;
if
(
dev
->
mdev
->
profile
->
mask
&
MLX5_PROF_MASK_MR_CACHE
)
if
((
dev
->
mdev
->
profile
->
mask
&
MLX5_PROF_MASK_MR_CACHE
)
&&
(
mlx5_core_is_pf
(
dev
->
mdev
)))
limit
=
dev
->
mdev
->
profile
->
mr_cache
[
i
].
limit
;
else
limit
=
0
;
...
...
@@ -645,6 +646,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return
0
;
}
static
void
wait_for_async_commands
(
struct
mlx5_ib_dev
*
dev
)
{
struct
mlx5_mr_cache
*
cache
=
&
dev
->
cache
;
struct
mlx5_cache_ent
*
ent
;
int
total
=
0
;
int
i
;
int
j
;
for
(
i
=
0
;
i
<
MAX_MR_CACHE_ENTRIES
;
i
++
)
{
ent
=
&
cache
->
ent
[
i
];
for
(
j
=
0
;
j
<
1000
;
j
++
)
{
if
(
!
ent
->
pending
)
break
;
msleep
(
50
);
}
}
for
(
i
=
0
;
i
<
MAX_MR_CACHE_ENTRIES
;
i
++
)
{
ent
=
&
cache
->
ent
[
i
];
total
+=
ent
->
pending
;
}
if
(
total
)
mlx5_ib_warn
(
dev
,
"aborted while there are %d pending mr requests
\n
"
,
total
);
else
mlx5_ib_warn
(
dev
,
"done with all pending requests
\n
"
);
}
int
mlx5_mr_cache_cleanup
(
struct
mlx5_ib_dev
*
dev
)
{
int
i
;
...
...
@@ -658,6 +686,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
clean_keys
(
dev
,
i
);
destroy_workqueue
(
dev
->
cache
.
wq
);
wait_for_async_commands
(
dev
);
del_timer_sync
(
&
dev
->
delay_timer
);
return
0
;
...
...
@@ -815,29 +844,34 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
umrwr
->
mkey
=
key
;
}
static
struct
ib_umem
*
mr_umem_get
(
struct
ib_pd
*
pd
,
u64
start
,
u64
length
,
int
access_flags
,
int
*
npages
,
int
*
page_shift
,
int
*
ncont
,
int
*
order
)
static
int
mr_umem_get
(
struct
ib_pd
*
pd
,
u64
start
,
u64
length
,
int
access_flags
,
struct
ib_umem
**
umem
,
int
*
npages
,
int
*
page_shift
,
int
*
ncont
,
int
*
order
)
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
pd
->
device
);
struct
ib_umem
*
umem
=
ib_umem_get
(
pd
->
uobject
->
context
,
start
,
length
,
access_flags
,
0
);
if
(
IS_ERR
(
umem
))
{
int
err
;
*
umem
=
ib_umem_get
(
pd
->
uobject
->
context
,
start
,
length
,
access_flags
,
0
);
err
=
PTR_ERR_OR_ZERO
(
*
umem
);
if
(
err
<
0
)
{
mlx5_ib_err
(
dev
,
"umem get failed (%ld)
\n
"
,
PTR_ERR
(
umem
));
return
(
void
*
)
umem
;
return
err
;
}
mlx5_ib_cont_pages
(
umem
,
start
,
npages
,
page_shift
,
ncont
,
order
);
mlx5_ib_cont_pages
(
*
umem
,
start
,
MLX5_MKEY_PAGE_SHIFT_MASK
,
npages
,
page_shift
,
ncont
,
order
);
if
(
!*
npages
)
{
mlx5_ib_warn
(
dev
,
"avoid zero region
\n
"
);
ib_umem_release
(
umem
);
return
ERR_PTR
(
-
EINVAL
)
;
ib_umem_release
(
*
umem
);
return
-
EINVAL
;
}
mlx5_ib_dbg
(
dev
,
"npages %d, ncont %d, order %d, page_shift %d
\n
"
,
*
npages
,
*
ncont
,
*
order
,
*
page_shift
);
return
umem
;
return
0
;
}
static
void
mlx5_ib_umr_done
(
struct
ib_cq
*
cq
,
struct
ib_wc
*
wc
)
...
...
@@ -1163,11 +1197,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg
(
dev
,
"start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x
\n
"
,
start
,
virt_addr
,
length
,
access_flags
);
umem
=
mr_umem_get
(
pd
,
start
,
length
,
access_flags
,
&
npages
,
err
=
mr_umem_get
(
pd
,
start
,
length
,
access_flags
,
&
umem
,
&
npages
,
&
page_shift
,
&
ncont
,
&
order
);
if
(
IS_ERR
(
umem
)
)
return
(
void
*
)
umem
;
if
(
err
<
0
)
return
ERR_PTR
(
err
)
;
if
(
use_umr
(
order
))
{
mr
=
reg_umr
(
pd
,
umem
,
virt_addr
,
length
,
ncont
,
page_shift
,
...
...
@@ -1341,10 +1375,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
*/
flags
|=
IB_MR_REREG_TRANS
;
ib_umem_release
(
mr
->
umem
);
mr
->
umem
=
mr_umem_get
(
pd
,
addr
,
len
,
access_flags
,
&
npages
,
&
page_shift
,
&
ncont
,
&
order
);
if
(
IS_ERR
(
mr
->
umem
))
{
err
=
PTR_ERR
(
mr
->
umem
);
err
=
mr_umem_get
(
pd
,
addr
,
len
,
access_flags
,
&
mr
->
umem
,
&
npages
,
&
page_shift
,
&
ncont
,
&
order
);
if
(
err
<
0
)
{
mr
->
umem
=
NULL
;
return
err
;
}
...
...
drivers/infiniband/hw/mlx5/qp.c
浏览文件 @
86ef0bea
...
...
@@ -78,12 +78,14 @@ struct mlx5_wqe_eth_pad {
enum
raw_qp_set_mask_map
{
MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
=
1UL
<<
0
,
MLX5_RAW_QP_RATE_LIMIT
=
1UL
<<
1
,
};
struct
mlx5_modify_raw_qp_param
{
u16
operation
;
u32
set_mask
;
/* raw_qp_set_mask_map */
u32
rate_limit
;
u8
rq_q_ctr_id
;
};
...
...
@@ -352,6 +354,29 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
return
ALIGN
(
max_t
(
int
,
inl_size
,
size
),
MLX5_SEND_WQE_BB
);
}
static
int
get_send_sge
(
struct
ib_qp_init_attr
*
attr
,
int
wqe_size
)
{
int
max_sge
;
if
(
attr
->
qp_type
==
IB_QPT_RC
)
max_sge
=
(
min_t
(
int
,
wqe_size
,
512
)
-
sizeof
(
struct
mlx5_wqe_ctrl_seg
)
-
sizeof
(
struct
mlx5_wqe_raddr_seg
))
/
sizeof
(
struct
mlx5_wqe_data_seg
);
else
if
(
attr
->
qp_type
==
IB_QPT_XRC_INI
)
max_sge
=
(
min_t
(
int
,
wqe_size
,
512
)
-
sizeof
(
struct
mlx5_wqe_ctrl_seg
)
-
sizeof
(
struct
mlx5_wqe_xrc_seg
)
-
sizeof
(
struct
mlx5_wqe_raddr_seg
))
/
sizeof
(
struct
mlx5_wqe_data_seg
);
else
max_sge
=
(
wqe_size
-
sq_overhead
(
attr
))
/
sizeof
(
struct
mlx5_wqe_data_seg
);
return
min_t
(
int
,
max_sge
,
wqe_size
-
sq_overhead
(
attr
)
/
sizeof
(
struct
mlx5_wqe_data_seg
));
}
static
int
calc_sq_size
(
struct
mlx5_ib_dev
*
dev
,
struct
ib_qp_init_attr
*
attr
,
struct
mlx5_ib_qp
*
qp
)
{
...
...
@@ -382,13 +407,18 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
wq_size
=
roundup_pow_of_two
(
attr
->
cap
.
max_send_wr
*
wqe_size
);
qp
->
sq
.
wqe_cnt
=
wq_size
/
MLX5_SEND_WQE_BB
;
if
(
qp
->
sq
.
wqe_cnt
>
(
1
<<
MLX5_CAP_GEN
(
dev
->
mdev
,
log_max_qp_sz
)))
{
mlx5_ib_dbg
(
dev
,
"wqe count(%d) exceeds limits(%d)
\n
"
,
mlx5_ib_dbg
(
dev
,
"send queue size (%d * %d / %d -> %d) exceeds limits(%d)
\n
"
,
attr
->
cap
.
max_send_wr
,
wqe_size
,
MLX5_SEND_WQE_BB
,
qp
->
sq
.
wqe_cnt
,
1
<<
MLX5_CAP_GEN
(
dev
->
mdev
,
log_max_qp_sz
));
return
-
ENOMEM
;
}
qp
->
sq
.
wqe_shift
=
ilog2
(
MLX5_SEND_WQE_BB
);
qp
->
sq
.
max_gs
=
attr
->
cap
.
max_send_sge
;
qp
->
sq
.
max_gs
=
get_send_sge
(
attr
,
wqe_size
);
if
(
qp
->
sq
.
max_gs
<
attr
->
cap
.
max_send_sge
)
return
-
ENOMEM
;
attr
->
cap
.
max_send_sge
=
qp
->
sq
.
max_gs
;
qp
->
sq
.
max_post
=
wq_size
/
wqe_size
;
attr
->
cap
.
max_send_wr
=
qp
->
sq
.
max_post
;
...
...
@@ -648,7 +678,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
return
PTR_ERR
(
*
umem
);
}
mlx5_ib_cont_pages
(
*
umem
,
addr
,
npages
,
page_shift
,
ncont
,
NULL
);
mlx5_ib_cont_pages
(
*
umem
,
addr
,
0
,
npages
,
page_shift
,
ncont
,
NULL
);
err
=
mlx5_ib_get_buf_offset
(
addr
,
*
page_shift
,
offset
);
if
(
err
)
{
...
...
@@ -701,7 +731,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return
err
;
}
mlx5_ib_cont_pages
(
rwq
->
umem
,
ucmd
->
buf_addr
,
&
npages
,
&
page_shift
,
mlx5_ib_cont_pages
(
rwq
->
umem
,
ucmd
->
buf_addr
,
0
,
&
npages
,
&
page_shift
,
&
ncont
,
NULL
);
err
=
mlx5_ib_get_buf_offset
(
ucmd
->
buf_addr
,
page_shift
,
&
rwq
->
rq_page_offset
);
...
...
@@ -2443,8 +2473,14 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
}
static
int
modify_raw_packet_qp_sq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_ib_sq
*
sq
,
int
new_state
)
struct
mlx5_ib_sq
*
sq
,
int
new_state
,
const
struct
mlx5_modify_raw_qp_param
*
raw_qp_param
)
{
struct
mlx5_ib_qp
*
ibqp
=
sq
->
base
.
container_mibqp
;
u32
old_rate
=
ibqp
->
rate_limit
;
u32
new_rate
=
old_rate
;
u16
rl_index
=
0
;
void
*
in
;
void
*
sqc
;
int
inlen
;
...
...
@@ -2460,10 +2496,44 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
sqc
=
MLX5_ADDR_OF
(
modify_sq_in
,
in
,
ctx
);
MLX5_SET
(
sqc
,
sqc
,
state
,
new_state
);
if
(
raw_qp_param
->
set_mask
&
MLX5_RAW_QP_RATE_LIMIT
)
{
if
(
new_state
!=
MLX5_SQC_STATE_RDY
)
pr_warn
(
"%s: Rate limit can only be changed when SQ is moving to RDY
\n
"
,
__func__
);
else
new_rate
=
raw_qp_param
->
rate_limit
;
}
if
(
old_rate
!=
new_rate
)
{
if
(
new_rate
)
{
err
=
mlx5_rl_add_rate
(
dev
,
new_rate
,
&
rl_index
);
if
(
err
)
{
pr_err
(
"Failed configuring rate %u: %d
\n
"
,
new_rate
,
err
);
goto
out
;
}
}
MLX5_SET64
(
modify_sq_in
,
in
,
modify_bitmask
,
1
);
MLX5_SET
(
sqc
,
sqc
,
packet_pacing_rate_limit_index
,
rl_index
);
}
err
=
mlx5_core_modify_sq
(
dev
,
sq
->
base
.
mqp
.
qpn
,
in
,
inlen
);
if
(
err
)
if
(
err
)
{
/* Remove new rate from table if failed */
if
(
new_rate
&&
old_rate
!=
new_rate
)
mlx5_rl_remove_rate
(
dev
,
new_rate
);
goto
out
;
}
/* Only remove the old rate after new rate was set */
if
((
old_rate
&&
(
old_rate
!=
new_rate
))
||
(
new_state
!=
MLX5_SQC_STATE_RDY
))
mlx5_rl_remove_rate
(
dev
,
old_rate
);
ibqp
->
rate_limit
=
new_rate
;
sq
->
state
=
new_state
;
out:
...
...
@@ -2478,6 +2548,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct
mlx5_ib_raw_packet_qp
*
raw_packet_qp
=
&
qp
->
raw_packet_qp
;
struct
mlx5_ib_rq
*
rq
=
&
raw_packet_qp
->
rq
;
struct
mlx5_ib_sq
*
sq
=
&
raw_packet_qp
->
sq
;
int
modify_rq
=
!!
qp
->
rq
.
wqe_cnt
;
int
modify_sq
=
!!
qp
->
sq
.
wqe_cnt
;
int
rq_state
;
int
sq_state
;
int
err
;
...
...
@@ -2495,10 +2567,18 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq_state
=
MLX5_RQC_STATE_RST
;
sq_state
=
MLX5_SQC_STATE_RST
;
break
;
case
MLX5_CMD_OP_INIT2INIT_QP
:
case
MLX5_CMD_OP_INIT2RTR_QP
:
case
MLX5_CMD_OP_RTR2RTS_QP
:
case
MLX5_CMD_OP_RTS2RTS_QP
:
if
(
raw_qp_param
->
set_mask
==
MLX5_RAW_QP_RATE_LIMIT
)
{
modify_rq
=
0
;
sq_state
=
sq
->
state
;
}
else
{
return
raw_qp_param
->
set_mask
?
-
EINVAL
:
0
;
}
break
;
case
MLX5_CMD_OP_INIT2INIT_QP
:
case
MLX5_CMD_OP_INIT2RTR_QP
:
if
(
raw_qp_param
->
set_mask
)
return
-
EINVAL
;
else
...
...
@@ -2508,13 +2588,13 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return
-
EINVAL
;
}
if
(
qp
->
rq
.
wqe_cnt
)
{
err
=
modify_raw_packet_qp_rq
(
dev
,
rq
,
rq_state
,
raw_qp_param
);
if
(
modify_rq
)
{
err
=
modify_raw_packet_qp_rq
(
dev
,
rq
,
rq_state
,
raw_qp_param
);
if
(
err
)
return
err
;
}
if
(
qp
->
sq
.
wqe_cnt
)
{
if
(
modify_sq
)
{
if
(
tx_affinity
)
{
err
=
modify_raw_packet_tx_affinity
(
dev
->
mdev
,
sq
,
tx_affinity
);
...
...
@@ -2522,7 +2602,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return
err
;
}
return
modify_raw_packet_qp_sq
(
dev
->
mdev
,
sq
,
sq_state
);
return
modify_raw_packet_qp_sq
(
dev
->
mdev
,
sq
,
sq_state
,
raw_qp_param
);
}
return
0
;
...
...
@@ -2578,7 +2658,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct
mlx5_ib_port
*
mibport
=
NULL
;
enum
mlx5_qp_state
mlx5_cur
,
mlx5_new
;
enum
mlx5_qp_optpar
optpar
;
int
sqd_event
;
int
mlx5_st
;
int
err
;
u16
op
;
...
...
@@ -2725,12 +2804,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if
(
qp
->
rq
.
wqe_cnt
&&
cur_state
==
IB_QPS_RESET
&&
new_state
==
IB_QPS_INIT
)
context
->
db_rec_addr
=
cpu_to_be64
(
qp
->
db
.
dma
);
if
(
cur_state
==
IB_QPS_RTS
&&
new_state
==
IB_QPS_SQD
&&
attr_mask
&
IB_QP_EN_SQD_ASYNC_NOTIFY
&&
attr
->
en_sqd_async_notify
)
sqd_event
=
1
;
else
sqd_event
=
0
;
if
(
cur_state
==
IB_QPS_RESET
&&
new_state
==
IB_QPS_INIT
)
{
u8
port_num
=
(
attr_mask
&
IB_QP_PORT
?
attr
->
port_num
:
qp
->
port
)
-
1
;
...
...
@@ -2777,6 +2850,12 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
raw_qp_param
.
rq_q_ctr_id
=
mibport
->
q_cnt_id
;
raw_qp_param
.
set_mask
|=
MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
;
}
if
(
attr_mask
&
IB_QP_RATE_LIMIT
)
{
raw_qp_param
.
rate_limit
=
attr
->
rate_limit
;
raw_qp_param
.
set_mask
|=
MLX5_RAW_QP_RATE_LIMIT
;
}
err
=
modify_raw_packet_qp
(
dev
,
qp
,
&
raw_qp_param
,
tx_affinity
);
}
else
{
err
=
mlx5_core_qp_modify
(
dev
->
mdev
,
op
,
optpar
,
context
,
...
...
@@ -3068,10 +3147,10 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
{
memset
(
umr
,
0
,
sizeof
(
*
umr
));
umr
->
mkey_mask
=
cpu_to_be64
(
MLX5_MKEY_MASK_FREE
);
umr
->
flags
=
1
<<
7
;
umr
->
flags
=
MLX5_UMR_INLINE
;
}
static
__be64
get_umr_reg_mr_mask
(
void
)
static
__be64
get_umr_reg_mr_mask
(
int
atomic
)
{
u64
result
;
...
...
@@ -3084,9 +3163,11 @@ static __be64 get_umr_reg_mr_mask(void)
MLX5_MKEY_MASK_KEY
|
MLX5_MKEY_MASK_RR
|
MLX5_MKEY_MASK_RW
|
MLX5_MKEY_MASK_A
|
MLX5_MKEY_MASK_FREE
;
if
(
atomic
)
result
|=
MLX5_MKEY_MASK_A
;
return
cpu_to_be64
(
result
);
}
...
...
@@ -3147,7 +3228,7 @@ static __be64 get_umr_update_pd_mask(void)
}
static
void
set_reg_umr_segment
(
struct
mlx5_wqe_umr_ctrl_seg
*
umr
,
struct
ib_send_wr
*
wr
)
struct
ib_send_wr
*
wr
,
int
atomic
)
{
struct
mlx5_umr_wr
*
umrwr
=
umr_wr
(
wr
);
...
...
@@ -3172,7 +3253,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
if
(
wr
->
send_flags
&
MLX5_IB_SEND_UMR_UPDATE_PD
)
umr
->
mkey_mask
|=
get_umr_update_pd_mask
();
if
(
!
umr
->
mkey_mask
)
umr
->
mkey_mask
=
get_umr_reg_mr_mask
();
umr
->
mkey_mask
=
get_umr_reg_mr_mask
(
atomic
);
}
else
{
umr
->
mkey_mask
=
get_umr_unreg_mr_mask
();
}
...
...
@@ -4025,7 +4106,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
qp
->
sq
.
wr_data
[
idx
]
=
MLX5_IB_WR_UMR
;
ctrl
->
imm
=
cpu_to_be32
(
umr_wr
(
wr
)
->
mkey
);
set_reg_umr_segment
(
seg
,
wr
);
set_reg_umr_segment
(
seg
,
wr
,
!!
(
MLX5_CAP_GEN
(
mdev
,
atomic
))
);
seg
+=
sizeof
(
struct
mlx5_wqe_umr_ctrl_seg
);
size
+=
sizeof
(
struct
mlx5_wqe_umr_ctrl_seg
)
/
16
;
if
(
unlikely
((
seg
==
qend
)))
...
...
drivers/infiniband/hw/mlx5/srq.c
浏览文件 @
86ef0bea
...
...
@@ -118,7 +118,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
return
err
;
}
mlx5_ib_cont_pages
(
srq
->
umem
,
ucmd
.
buf_addr
,
&
npages
,
mlx5_ib_cont_pages
(
srq
->
umem
,
ucmd
.
buf_addr
,
0
,
&
npages
,
&
page_shift
,
&
ncont
,
NULL
);
err
=
mlx5_ib_get_buf_offset
(
ucmd
.
buf_addr
,
page_shift
,
&
offset
);
...
...
@@ -280,6 +280,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
mlx5_ib_dbg
(
dev
,
"desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x
\n
"
,
desc_size
,
init_attr
->
attr
.
max_wr
,
srq
->
msrq
.
max
,
srq
->
msrq
.
max_gs
,
srq
->
msrq
.
max_avail_gather
);
in
.
type
=
init_attr
->
srq_type
;
if
(
pd
->
uobject
)
err
=
create_srq_user
(
pd
,
srq
,
&
in
,
udata
,
buf_size
);
...
...
@@ -292,7 +293,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto
err_srq
;
}
in
.
type
=
init_attr
->
srq_type
;
in
.
log_size
=
ilog2
(
srq
->
msrq
.
max
);
in
.
wqe_shift
=
srq
->
msrq
.
wqe_shift
-
4
;
if
(
srq
->
wq_sig
)
...
...
drivers/infiniband/hw/mthca/mthca_av.c
浏览文件 @
86ef0bea
...
...
@@ -186,8 +186,8 @@ int mthca_create_ah(struct mthca_dev *dev,
on_hca_fail:
if
(
ah
->
type
==
MTHCA_AH_PCI_POOL
)
{
ah
->
av
=
pci_pool_alloc
(
dev
->
av_table
.
pool
,
GFP_ATOMIC
,
&
ah
->
avdma
);
ah
->
av
=
pci_pool_
z
alloc
(
dev
->
av_table
.
pool
,
GFP_ATOMIC
,
&
ah
->
avdma
);
if
(
!
ah
->
av
)
return
-
ENOMEM
;
...
...
@@ -196,8 +196,6 @@ int mthca_create_ah(struct mthca_dev *dev,
ah
->
key
=
pd
->
ntmr
.
ibmr
.
lkey
;
memset
(
av
,
0
,
MTHCA_AV_SIZE
);
av
->
port_pd
=
cpu_to_be32
(
pd
->
pd_num
|
(
ah_attr
->
port_num
<<
24
));
av
->
g_slid
=
ah_attr
->
src_path_bits
;
av
->
dlid
=
cpu_to_be16
(
ah_attr
->
dlid
);
...
...
drivers/infiniband/hw/mthca/mthca_provider.c
浏览文件 @
86ef0bea
...
...
@@ -410,7 +410,9 @@ static int mthca_dealloc_pd(struct ib_pd *pd)
}
static
struct
ib_ah
*
mthca_ah_create
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
)
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
)
{
int
err
;
struct
mthca_ah
*
ah
;
...
...
drivers/infiniband/hw/nes/nes_verbs.c
浏览文件 @
86ef0bea
...
...
@@ -771,7 +771,8 @@ static int nes_dealloc_pd(struct ib_pd *ibpd)
/**
* nes_create_ah
*/
static
struct
ib_ah
*
nes_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
)
static
struct
ib_ah
*
nes_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
)
{
return
ERR_PTR
(
-
ENOSYS
);
}
...
...
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
浏览文件 @
86ef0bea
...
...
@@ -154,7 +154,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
return
status
;
}
struct
ib_ah
*
ocrdma_create_ah
(
struct
ib_pd
*
ibpd
,
struct
ib_ah_attr
*
attr
)
struct
ib_ah
*
ocrdma_create_ah
(
struct
ib_pd
*
ibpd
,
struct
ib_ah_attr
*
attr
,
struct
ib_udata
*
udata
)
{
u32
*
ahid_addr
;
int
status
;
...
...
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
浏览文件 @
86ef0bea
...
...
@@ -50,7 +50,9 @@ enum {
OCRDMA_AH_L3_TYPE_MASK
=
0x03
,
OCRDMA_AH_L3_TYPE_SHIFT
=
0x1D
/* 29 bits */
};
struct
ib_ah
*
ocrdma_create_ah
(
struct
ib_pd
*
,
struct
ib_ah_attr
*
);
struct
ib_ah
*
ocrdma_create_ah
(
struct
ib_pd
*
,
struct
ib_ah_attr
*
,
struct
ib_udata
*
);
int
ocrdma_destroy_ah
(
struct
ib_ah
*
);
int
ocrdma_query_ah
(
struct
ib_ah
*
,
struct
ib_ah_attr
*
);
int
ocrdma_modify_ah
(
struct
ib_ah
*
,
struct
ib_ah_attr
*
);
...
...
drivers/infiniband/hw/qedr/verbs.c
浏览文件 @
86ef0bea
...
...
@@ -2094,7 +2094,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
return
rc
;
}
struct
ib_ah
*
qedr_create_ah
(
struct
ib_pd
*
ibpd
,
struct
ib_ah_attr
*
attr
)
struct
ib_ah
*
qedr_create_ah
(
struct
ib_pd
*
ibpd
,
struct
ib_ah_attr
*
attr
,
struct
ib_udata
*
udata
)
{
struct
qedr_ah
*
ah
;
...
...
drivers/infiniband/hw/qedr/verbs.h
浏览文件 @
86ef0bea
...
...
@@ -70,7 +70,8 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
int
qp_attr_mask
,
struct
ib_qp_init_attr
*
);
int
qedr_destroy_qp
(
struct
ib_qp
*
ibqp
);
struct
ib_ah
*
qedr_create_ah
(
struct
ib_pd
*
ibpd
,
struct
ib_ah_attr
*
attr
);
struct
ib_ah
*
qedr_create_ah
(
struct
ib_pd
*
ibpd
,
struct
ib_ah_attr
*
attr
,
struct
ib_udata
*
udata
);
int
qedr_destroy_ah
(
struct
ib_ah
*
ibah
);
int
qedr_dereg_mr
(
struct
ib_mr
*
);
...
...
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
浏览文件 @
86ef0bea
...
...
@@ -738,7 +738,9 @@ int usnic_ib_mmap(struct ib_ucontext *context,
/* In ib callbacks section - Start of stub funcs */
struct
ib_ah
*
usnic_ib_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
)
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
)
{
usnic_dbg
(
"
\n
"
);
return
ERR_PTR
(
-
EPERM
);
...
...
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
浏览文件 @
86ef0bea
...
...
@@ -75,7 +75,9 @@ int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
int
usnic_ib_mmap
(
struct
ib_ucontext
*
context
,
struct
vm_area_struct
*
vma
);
struct
ib_ah
*
usnic_ib_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
);
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
);
int
usnic_ib_destroy_ah
(
struct
ib_ah
*
ah
);
int
usnic_ib_post_send
(
struct
ib_qp
*
ibqp
,
struct
ib_send_wr
*
wr
,
struct
ib_send_wr
**
bad_wr
);
...
...
drivers/infiniband/sw/rxe/rxe_param.h
浏览文件 @
86ef0bea
...
...
@@ -82,7 +82,7 @@ enum rxe_device_param {
RXE_MAX_SGE
=
32
,
RXE_MAX_SGE_RD
=
32
,
RXE_MAX_CQ
=
16384
,
RXE_MAX_LOG_CQE
=
1
3
,
RXE_MAX_LOG_CQE
=
1
5
,
RXE_MAX_MR
=
2
*
1024
,
RXE_MAX_PD
=
0x7ffc
,
RXE_MAX_QP_RD_ATOM
=
128
,
...
...
drivers/infiniband/sw/rxe/rxe_verbs.c
浏览文件 @
86ef0bea
...
...
@@ -316,7 +316,9 @@ static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
return
err
;
}
static
struct
ib_ah
*
rxe_create_ah
(
struct
ib_pd
*
ibpd
,
struct
ib_ah_attr
*
attr
)
static
struct
ib_ah
*
rxe_create_ah
(
struct
ib_pd
*
ibpd
,
struct
ib_ah_attr
*
attr
,
struct
ib_udata
*
udata
)
{
int
err
;
struct
rxe_dev
*
rxe
=
to_rdev
(
ibpd
->
device
);
...
...
drivers/infiniband/ulp/ipoib/ipoib_cm.c
浏览文件 @
86ef0bea
...
...
@@ -1050,8 +1050,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
tx_qp
=
ib_create_qp
(
priv
->
pd
,
&
attr
);
if
(
PTR_ERR
(
tx_qp
)
==
-
EINVAL
)
{
ipoib_warn
(
priv
,
"can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL
\n
"
,
priv
->
ca
->
name
);
attr
.
create_flags
&=
~
IB_QP_CREATE_USE_GFP_NOIO
;
tx_qp
=
ib_create_qp
(
priv
->
pd
,
&
attr
);
}
...
...
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
浏览文件 @
86ef0bea
...
...
@@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
r
->
com
.
from_state
=
r
->
com
.
state
;
r
->
com
.
to_state
=
state
;
r
->
com
.
state
=
RES_EQ_BUSY
;
if
(
eq
)
*
eq
=
r
;
}
}
spin_unlock_irq
(
mlx4_tlock
(
dev
));
if
(
!
err
&&
eq
)
*
eq
=
r
;
return
err
;
}
...
...
include/linux/mlx5/mlx5_ifc.h
浏览文件 @
86ef0bea
...
...
@@ -576,7 +576,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8
self_lb_en_modifiable
[
0x1
];
u8
reserved_at_9
[
0x2
];
u8
max_lso_cap
[
0x5
];
u8
reserved_at_10
[
0x2
];
u8
multi_pkt_send_wqe
[
0x2
];
u8
wqe_inline_mode
[
0x2
];
u8
rss_ind_tbl_cap
[
0x4
];
u8
reg_umr_sq
[
0x1
];
...
...
include/rdma/ib_verbs.h
浏览文件 @
86ef0bea
...
...
@@ -1102,6 +1102,7 @@ enum ib_qp_attr_mask {
IB_QP_RESERVED2
=
(
1
<<
22
),
IB_QP_RESERVED3
=
(
1
<<
23
),
IB_QP_RESERVED4
=
(
1
<<
24
),
IB_QP_RATE_LIMIT
=
(
1
<<
25
),
};
enum
ib_qp_state
{
...
...
@@ -1151,6 +1152,7 @@ struct ib_qp_attr {
u8
rnr_retry
;
u8
alt_port_num
;
u8
alt_timeout
;
u32
rate_limit
;
};
enum
ib_wr_opcode
{
...
...
@@ -1592,17 +1594,19 @@ enum ib_flow_attr_type {
/* Supported steering header types */
enum
ib_flow_spec_type
{
/* L2 headers*/
IB_FLOW_SPEC_ETH
=
0x20
,
IB_FLOW_SPEC_IB
=
0x22
,
IB_FLOW_SPEC_ETH
=
0x20
,
IB_FLOW_SPEC_IB
=
0x22
,
/* L3 header*/
IB_FLOW_SPEC_IPV4
=
0x30
,
IB_FLOW_SPEC_IPV6
=
0x31
,
IB_FLOW_SPEC_IPV4
=
0x30
,
IB_FLOW_SPEC_IPV6
=
0x31
,
/* L4 headers*/
IB_FLOW_SPEC_TCP
=
0x40
,
IB_FLOW_SPEC_UDP
=
0x41
IB_FLOW_SPEC_TCP
=
0x40
,
IB_FLOW_SPEC_UDP
=
0x41
,
IB_FLOW_SPEC_VXLAN_TUNNEL
=
0x50
,
IB_FLOW_SPEC_INNER
=
0x100
,
};
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
#define IB_FLOW_SPEC_SUPPORT_LAYERS
4
#define IB_FLOW_SPEC_SUPPORT_LAYERS
8
/* Flow steering rule priority is set according to it's domain.
* Lower domain value means higher priority.
...
...
@@ -1630,7 +1634,7 @@ struct ib_flow_eth_filter {
};
struct
ib_flow_spec_eth
{
enum
ib_flow_spec_type
type
;
u32
type
;
u16
size
;
struct
ib_flow_eth_filter
val
;
struct
ib_flow_eth_filter
mask
;
...
...
@@ -1644,7 +1648,7 @@ struct ib_flow_ib_filter {
};
struct
ib_flow_spec_ib
{
enum
ib_flow_spec_type
type
;
u32
type
;
u16
size
;
struct
ib_flow_ib_filter
val
;
struct
ib_flow_ib_filter
mask
;
...
...
@@ -1669,7 +1673,7 @@ struct ib_flow_ipv4_filter {
};
struct
ib_flow_spec_ipv4
{
enum
ib_flow_spec_type
type
;
u32
type
;
u16
size
;
struct
ib_flow_ipv4_filter
val
;
struct
ib_flow_ipv4_filter
mask
;
...
...
@@ -1687,7 +1691,7 @@ struct ib_flow_ipv6_filter {
};
struct
ib_flow_spec_ipv6
{
enum
ib_flow_spec_type
type
;
u32
type
;
u16
size
;
struct
ib_flow_ipv6_filter
val
;
struct
ib_flow_ipv6_filter
mask
;
...
...
@@ -1701,15 +1705,30 @@ struct ib_flow_tcp_udp_filter {
};
struct
ib_flow_spec_tcp_udp
{
enum
ib_flow_spec_type
type
;
u32
type
;
u16
size
;
struct
ib_flow_tcp_udp_filter
val
;
struct
ib_flow_tcp_udp_filter
mask
;
};
struct
ib_flow_tunnel_filter
{
__be32
tunnel_id
;
u8
real_sz
[
0
];
};
/* ib_flow_spec_tunnel describes the Vxlan tunnel
* the tunnel_id from val has the vni value
*/
struct
ib_flow_spec_tunnel
{
u32
type
;
u16
size
;
struct
ib_flow_tunnel_filter
val
;
struct
ib_flow_tunnel_filter
mask
;
};
union
ib_flow_spec
{
struct
{
enum
ib_flow_spec_type
type
;
u32
type
;
u16
size
;
};
struct
ib_flow_spec_eth
eth
;
...
...
@@ -1717,6 +1736,7 @@ union ib_flow_spec {
struct
ib_flow_spec_ipv4
ipv4
;
struct
ib_flow_spec_tcp_udp
tcp_udp
;
struct
ib_flow_spec_ipv6
ipv6
;
struct
ib_flow_spec_tunnel
tunnel
;
};
struct
ib_flow_attr
{
...
...
@@ -1933,7 +1953,8 @@ struct ib_device {
struct
ib_udata
*
udata
);
int
(
*
dealloc_pd
)(
struct
ib_pd
*
pd
);
struct
ib_ah
*
(
*
create_ah
)(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
);
struct
ib_ah_attr
*
ah_attr
,
struct
ib_udata
*
udata
);
int
(
*
modify_ah
)(
struct
ib_ah
*
ah
,
struct
ib_ah_attr
*
ah_attr
);
int
(
*
query_ah
)(
struct
ib_ah
*
ah
,
...
...
@@ -2580,6 +2601,24 @@ void ib_dealloc_pd(struct ib_pd *pd);
*/
struct
ib_ah
*
ib_create_ah
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
);
/**
* ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
* work completion.
* @hdr: the L3 header to parse
* @net_type: type of header to parse
* @sgid: place to store source gid
* @dgid: place to store destination gid
*/
int
ib_get_gids_from_rdma_hdr
(
const
union
rdma_network_hdr
*
hdr
,
enum
rdma_network_type
net_type
,
union
ib_gid
*
sgid
,
union
ib_gid
*
dgid
);
/**
* ib_get_rdma_header_version - Get the header version
* @hdr: the L3 header to parse
*/
int
ib_get_rdma_header_version
(
const
union
rdma_network_hdr
*
hdr
);
/**
* ib_init_ah_from_wc - Initializes address handle attributes from a
* work completion.
...
...
@@ -3357,4 +3396,7 @@ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
void
ib_drain_rq
(
struct
ib_qp
*
qp
);
void
ib_drain_sq
(
struct
ib_qp
*
qp
);
void
ib_drain_qp
(
struct
ib_qp
*
qp
);
int
ib_resolve_eth_dmac
(
struct
ib_device
*
device
,
struct
ib_ah_attr
*
ah_attr
);
#endif
/* IB_VERBS_H */
include/uapi/rdma/ib_user_verbs.h
浏览文件 @
86ef0bea
...
...
@@ -37,6 +37,7 @@
#define IB_USER_VERBS_H
#include <linux/types.h>
#include <rdma/ib_verbs.h>
/*
* Increment this value if any changes that break userspace ABI
...
...
@@ -93,6 +94,7 @@ enum {
IB_USER_VERBS_EX_CMD_QUERY_DEVICE
=
IB_USER_VERBS_CMD_QUERY_DEVICE
,
IB_USER_VERBS_EX_CMD_CREATE_CQ
=
IB_USER_VERBS_CMD_CREATE_CQ
,
IB_USER_VERBS_EX_CMD_CREATE_QP
=
IB_USER_VERBS_CMD_CREATE_QP
,
IB_USER_VERBS_EX_CMD_MODIFY_QP
=
IB_USER_VERBS_CMD_MODIFY_QP
,
IB_USER_VERBS_EX_CMD_CREATE_FLOW
=
IB_USER_VERBS_CMD_THRESHOLD
,
IB_USER_VERBS_EX_CMD_DESTROY_FLOW
,
IB_USER_VERBS_EX_CMD_CREATE_WQ
,
...
...
@@ -545,6 +547,14 @@ enum {
IB_UVERBS_CREATE_QP_SUP_COMP_MASK
=
IB_UVERBS_CREATE_QP_MASK_IND_TABLE
,
};
enum
{
IB_USER_LEGACY_LAST_QP_ATTR_MASK
=
IB_QP_DEST_QPN
};
enum
{
IB_USER_LAST_QP_ATTR_MASK
=
IB_QP_RATE_LIMIT
};
struct
ib_uverbs_ex_create_qp
{
__u64
user_handle
;
__u32
pd_handle
;
...
...
@@ -684,9 +694,20 @@ struct ib_uverbs_modify_qp {
__u64
driver_data
[
0
];
};
struct
ib_uverbs_ex_modify_qp
{
struct
ib_uverbs_modify_qp
base
;
__u32
rate_limit
;
__u32
reserved
;
};
struct
ib_uverbs_modify_qp_resp
{
};
struct
ib_uverbs_ex_modify_qp_resp
{
__u32
comp_mask
;
__u32
response_length
;
};
struct
ib_uverbs_destroy_qp
{
__u64
response
;
__u32
qp_handle
;
...
...
@@ -908,6 +929,23 @@ struct ib_uverbs_flow_spec_ipv6 {
struct
ib_uverbs_flow_ipv6_filter
mask
;
};
struct
ib_uverbs_flow_tunnel_filter
{
__be32
tunnel_id
;
};
struct
ib_uverbs_flow_spec_tunnel
{
union
{
struct
ib_uverbs_flow_spec_hdr
hdr
;
struct
{
__u32
type
;
__u16
size
;
__u16
reserved
;
};
};
struct
ib_uverbs_flow_tunnel_filter
val
;
struct
ib_uverbs_flow_tunnel_filter
mask
;
};
struct
ib_uverbs_flow_attr
{
__u32
type
;
__u16
size
;
...
...
include/uapi/rdma/mlx5-abi.h
浏览文件 @
86ef0bea
...
...
@@ -82,6 +82,7 @@ enum mlx5_ib_alloc_ucontext_resp_mask {
enum
mlx5_user_cmds_supp_uhw
{
MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE
=
1
<<
0
,
MLX5_USER_CMDS_SUPP_UHW_CREATE_AH
=
1
<<
1
,
};
struct
mlx5_ib_alloc_ucontext_resp
{
...
...
@@ -124,18 +125,47 @@ struct mlx5_ib_rss_caps {
__u8
reserved
[
7
];
};
enum
mlx5_ib_cqe_comp_res_format
{
MLX5_IB_CQE_RES_FORMAT_HASH
=
1
<<
0
,
MLX5_IB_CQE_RES_FORMAT_CSUM
=
1
<<
1
,
MLX5_IB_CQE_RES_RESERVED
=
1
<<
2
,
};
struct
mlx5_ib_cqe_comp_caps
{
__u32
max_num
;
__u32
supported_format
;
/* enum mlx5_ib_cqe_comp_res_format */
};
struct
mlx5_packet_pacing_caps
{
__u32
qp_rate_limit_min
;
__u32
qp_rate_limit_max
;
/* In kpbs */
/* Corresponding bit will be set if qp type from
* 'enum ib_qp_type' is supported, e.g.
* supported_qpts |= 1 << IB_QPT_RAW_PACKET
*/
__u32
supported_qpts
;
__u32
reserved
;
};
struct
mlx5_ib_query_device_resp
{
__u32
comp_mask
;
__u32
response_length
;
struct
mlx5_ib_tso_caps
tso_caps
;
struct
mlx5_ib_rss_caps
rss_caps
;
struct
mlx5_ib_cqe_comp_caps
cqe_comp_caps
;
struct
mlx5_packet_pacing_caps
packet_pacing_caps
;
__u32
mlx5_ib_support_multi_pkt_send_wqes
;
__u32
reserved
;
};
struct
mlx5_ib_create_cq
{
__u64
buf_addr
;
__u64
db_addr
;
__u32
cqe_size
;
__u32
reserved
;
/* explicit padding (optional on i386) */
__u8
cqe_comp_en
;
__u8
cqe_comp_res_format
;
__u16
reserved
;
/* explicit padding (optional on i386) */
};
struct
mlx5_ib_create_cq_resp
{
...
...
@@ -232,6 +262,12 @@ struct mlx5_ib_create_wq {
__u32
reserved
;
};
struct
mlx5_ib_create_ah_resp
{
__u32
response_length
;
__u8
dmac
[
ETH_ALEN
];
__u8
reserved
[
6
];
};
struct
mlx5_ib_create_wq_resp
{
__u32
response_length
;
__u32
reserved
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录