Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
a1139697
cloud-kernel
项目概览
openanolis
/
cloud-kernel
接近 2 年 前同步成功
通知
170
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a1139697
编写于
8月 24, 2017
作者:
D
Doug Ledford
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'mellanox' into k.o/for-next
Signed-off-by:
N
Doug Ledford
<
dledford@redhat.com
>
上级
accbef5c
050da902
变更
35
隐藏空白更改
内联
并排
Showing
35 changed file
with
330 addition
and
285 deletion
+330
-285
drivers/infiniband/core/cache.c
drivers/infiniband/core/cache.c
+8
-15
drivers/infiniband/core/cma.c
drivers/infiniband/core/cma.c
+2
-2
drivers/infiniband/core/device.c
drivers/infiniband/core/device.c
+2
-6
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/sa_query.c
+1
-2
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_cmd.c
+51
-76
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_main.c
+1
-12
drivers/infiniband/core/verbs.c
drivers/infiniband/core/verbs.c
+12
-22
drivers/infiniband/hw/mlx4/alias_GUID.c
drivers/infiniband/hw/mlx4/alias_GUID.c
+1
-1
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/cq.c
+1
-1
drivers/infiniband/hw/mlx4/mcg.c
drivers/infiniband/hw/mlx4/mcg.c
+4
-5
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/qp.c
+11
-11
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/cq.c
+1
-1
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/mad.c
+1
-1
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/main.c
+75
-11
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mlx5_ib.h
+1
-1
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/mr.c
+66
-55
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/qp.c
+5
-0
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_cmd.c
+1
-1
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/nes/nes_verbs.c
+0
-5
drivers/infiniband/hw/usnic/usnic_fwd.c
drivers/infiniband/hw/usnic/usnic_fwd.c
+2
-10
drivers/infiniband/hw/usnic/usnic_fwd.h
drivers/infiniband/hw/usnic/usnic_fwd.h
+1
-1
drivers/infiniband/hw/usnic/usnic_ib_main.c
drivers/infiniband/hw/usnic/usnic_ib_main.c
+6
-4
drivers/infiniband/sw/rxe/rxe_hw_counters.c
drivers/infiniband/sw/rxe/rxe_hw_counters.c
+1
-1
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib.h
+1
-0
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
+7
-1
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
+6
-9
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+18
-4
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/iser/iser_verbs.c
+2
-4
drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+1
-6
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/infiniband/ulp/srpt/ib_srpt.c
+2
-3
include/linux/mlx5/driver.h
include/linux/mlx5/driver.h
+1
-1
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/mlx5_ifc.h
+8
-4
include/rdma/ib_verbs.h
include/rdma/ib_verbs.h
+5
-5
include/uapi/rdma/mlx4-abi.h
include/uapi/rdma/mlx4-abi.h
+2
-4
include/uapi/rdma/mlx5-abi.h
include/uapi/rdma/mlx5-abi.h
+23
-0
未找到文件。
drivers/infiniband/core/cache.c
浏览文件 @
a1139697
...
...
@@ -1199,30 +1199,23 @@ int ib_cache_setup_one(struct ib_device *device)
device
->
cache
.
ports
=
kzalloc
(
sizeof
(
*
device
->
cache
.
ports
)
*
(
rdma_end_port
(
device
)
-
rdma_start_port
(
device
)
+
1
),
GFP_KERNEL
);
if
(
!
device
->
cache
.
ports
)
{
err
=
-
ENOMEM
;
goto
out
;
}
if
(
!
device
->
cache
.
ports
)
return
-
ENOMEM
;
err
=
gid_table_setup_one
(
device
);
if
(
err
)
goto
out
;
if
(
err
)
{
kfree
(
device
->
cache
.
ports
);
device
->
cache
.
ports
=
NULL
;
return
err
;
}
for
(
p
=
0
;
p
<=
rdma_end_port
(
device
)
-
rdma_start_port
(
device
);
++
p
)
ib_cache_update
(
device
,
p
+
rdma_start_port
(
device
),
true
);
INIT_IB_EVENT_HANDLER
(
&
device
->
cache
.
event_handler
,
device
,
ib_cache_event
);
err
=
ib_register_event_handler
(
&
device
->
cache
.
event_handler
);
if
(
err
)
goto
err
;
ib_register_event_handler
(
&
device
->
cache
.
event_handler
);
return
0
;
err:
gid_table_cleanup_one
(
device
);
out:
return
err
;
}
void
ib_cache_release_one
(
struct
ib_device
*
device
)
...
...
drivers/infiniband/core/cma.c
浏览文件 @
a1139697
...
...
@@ -72,7 +72,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18
#define CMA_PREFERRED_ROCE_GID_TYPE
(1 << IB_GID_TYPE_ROCE_UDP_ENCAP)
#define CMA_PREFERRED_ROCE_GID_TYPE
IB_GID_TYPE_ROCE_UDP_ENCAP
static
const
char
*
const
cma_events
[]
=
{
[
RDMA_CM_EVENT_ADDR_RESOLVED
]
=
"address resolved"
,
...
...
@@ -4282,7 +4282,7 @@ static void cma_add_one(struct ib_device *device)
for
(
i
=
rdma_start_port
(
device
);
i
<=
rdma_end_port
(
device
);
i
++
)
{
supported_gids
=
roce_gid_type_mask_support
(
device
,
i
);
WARN_ON
(
!
supported_gids
);
if
(
supported_gids
&
CMA_PREFERRED_ROCE_GID_TYPE
)
if
(
supported_gids
&
(
1
<<
CMA_PREFERRED_ROCE_GID_TYPE
)
)
cma_dev
->
default_gid_type
[
i
-
rdma_start_port
(
device
)]
=
CMA_PREFERRED_ROCE_GID_TYPE
;
else
...
...
drivers/infiniband/core/device.c
浏览文件 @
a1139697
...
...
@@ -747,7 +747,7 @@ EXPORT_SYMBOL(ib_set_client_data);
* chapter 11 of the InfiniBand Architecture Specification). This
* callback may occur in interrupt context.
*/
int
ib_register_event_handler
(
struct
ib_event_handler
*
event_handler
)
void
ib_register_event_handler
(
struct
ib_event_handler
*
event_handler
)
{
unsigned
long
flags
;
...
...
@@ -755,8 +755,6 @@ int ib_register_event_handler (struct ib_event_handler *event_handler)
list_add_tail
(
&
event_handler
->
list
,
&
event_handler
->
device
->
event_handler_list
);
spin_unlock_irqrestore
(
&
event_handler
->
device
->
event_handler_lock
,
flags
);
return
0
;
}
EXPORT_SYMBOL
(
ib_register_event_handler
);
...
...
@@ -767,15 +765,13 @@ EXPORT_SYMBOL(ib_register_event_handler);
* Unregister an event handler registered with
* ib_register_event_handler().
*/
int
ib_unregister_event_handler
(
struct
ib_event_handler
*
event_handler
)
void
ib_unregister_event_handler
(
struct
ib_event_handler
*
event_handler
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
event_handler
->
device
->
event_handler_lock
,
flags
);
list_del
(
&
event_handler
->
list
);
spin_unlock_irqrestore
(
&
event_handler
->
device
->
event_handler_lock
,
flags
);
return
0
;
}
EXPORT_SYMBOL
(
ib_unregister_event_handler
);
...
...
drivers/infiniband/core/sa_query.c
浏览文件 @
a1139697
...
...
@@ -2417,8 +2417,7 @@ static void ib_sa_add_one(struct ib_device *device)
*/
INIT_IB_EVENT_HANDLER
(
&
sa_dev
->
event_handler
,
device
,
ib_sa_event
);
if
(
ib_register_event_handler
(
&
sa_dev
->
event_handler
))
goto
err
;
ib_register_event_handler
(
&
sa_dev
->
event_handler
);
for
(
i
=
0
;
i
<=
e
-
s
;
++
i
)
{
if
(
rdma_cap_ib_sa
(
device
,
i
+
1
))
...
...
drivers/infiniband/core/uverbs_cmd.c
浏览文件 @
a1139697
...
...
@@ -1820,6 +1820,28 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
return
ret
;
}
static
void
copy_ah_attr_to_uverbs
(
struct
ib_uverbs_qp_dest
*
uverb_attr
,
struct
rdma_ah_attr
*
rdma_attr
)
{
const
struct
ib_global_route
*
grh
;
uverb_attr
->
dlid
=
rdma_ah_get_dlid
(
rdma_attr
);
uverb_attr
->
sl
=
rdma_ah_get_sl
(
rdma_attr
);
uverb_attr
->
src_path_bits
=
rdma_ah_get_path_bits
(
rdma_attr
);
uverb_attr
->
static_rate
=
rdma_ah_get_static_rate
(
rdma_attr
);
uverb_attr
->
is_global
=
!!
(
rdma_ah_get_ah_flags
(
rdma_attr
)
&
IB_AH_GRH
);
if
(
uverb_attr
->
is_global
)
{
grh
=
rdma_ah_read_grh
(
rdma_attr
);
memcpy
(
uverb_attr
->
dgid
,
grh
->
dgid
.
raw
,
16
);
uverb_attr
->
flow_label
=
grh
->
flow_label
;
uverb_attr
->
sgid_index
=
grh
->
sgid_index
;
uverb_attr
->
hop_limit
=
grh
->
hop_limit
;
uverb_attr
->
traffic_class
=
grh
->
traffic_class
;
}
uverb_attr
->
port_num
=
rdma_ah_get_port_num
(
rdma_attr
);
}
ssize_t
ib_uverbs_query_qp
(
struct
ib_uverbs_file
*
file
,
struct
ib_device
*
ib_dev
,
const
char
__user
*
buf
,
int
in_len
,
...
...
@@ -1830,7 +1852,6 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
struct
ib_qp
*
qp
;
struct
ib_qp_attr
*
attr
;
struct
ib_qp_init_attr
*
init_attr
;
const
struct
ib_global_route
*
grh
;
int
ret
;
if
(
copy_from_user
(
&
cmd
,
buf
,
sizeof
cmd
))
...
...
@@ -1880,39 +1901,8 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
resp
.
alt_port_num
=
attr
->
alt_port_num
;
resp
.
alt_timeout
=
attr
->
alt_timeout
;
resp
.
dest
.
dlid
=
rdma_ah_get_dlid
(
&
attr
->
ah_attr
);
resp
.
dest
.
sl
=
rdma_ah_get_sl
(
&
attr
->
ah_attr
);
resp
.
dest
.
src_path_bits
=
rdma_ah_get_path_bits
(
&
attr
->
ah_attr
);
resp
.
dest
.
static_rate
=
rdma_ah_get_static_rate
(
&
attr
->
ah_attr
);
resp
.
dest
.
is_global
=
!!
(
rdma_ah_get_ah_flags
(
&
attr
->
ah_attr
)
&
IB_AH_GRH
);
if
(
resp
.
dest
.
is_global
)
{
grh
=
rdma_ah_read_grh
(
&
attr
->
ah_attr
);
memcpy
(
resp
.
dest
.
dgid
,
grh
->
dgid
.
raw
,
16
);
resp
.
dest
.
flow_label
=
grh
->
flow_label
;
resp
.
dest
.
sgid_index
=
grh
->
sgid_index
;
resp
.
dest
.
hop_limit
=
grh
->
hop_limit
;
resp
.
dest
.
traffic_class
=
grh
->
traffic_class
;
}
resp
.
dest
.
port_num
=
rdma_ah_get_port_num
(
&
attr
->
ah_attr
);
resp
.
alt_dest
.
dlid
=
rdma_ah_get_dlid
(
&
attr
->
alt_ah_attr
);
resp
.
alt_dest
.
sl
=
rdma_ah_get_sl
(
&
attr
->
alt_ah_attr
);
resp
.
alt_dest
.
src_path_bits
=
rdma_ah_get_path_bits
(
&
attr
->
alt_ah_attr
);
resp
.
alt_dest
.
static_rate
=
rdma_ah_get_static_rate
(
&
attr
->
alt_ah_attr
);
resp
.
alt_dest
.
is_global
=
!!
(
rdma_ah_get_ah_flags
(
&
attr
->
alt_ah_attr
)
&
IB_AH_GRH
);
if
(
resp
.
alt_dest
.
is_global
)
{
grh
=
rdma_ah_read_grh
(
&
attr
->
alt_ah_attr
);
memcpy
(
resp
.
alt_dest
.
dgid
,
grh
->
dgid
.
raw
,
16
);
resp
.
alt_dest
.
flow_label
=
grh
->
flow_label
;
resp
.
alt_dest
.
sgid_index
=
grh
->
sgid_index
;
resp
.
alt_dest
.
hop_limit
=
grh
->
hop_limit
;
resp
.
alt_dest
.
traffic_class
=
grh
->
traffic_class
;
}
resp
.
alt_dest
.
port_num
=
rdma_ah_get_port_num
(
&
attr
->
alt_ah_attr
);
copy_ah_attr_to_uverbs
(
&
resp
.
dest
,
&
attr
->
ah_attr
);
copy_ah_attr_to_uverbs
(
&
resp
.
alt_dest
,
&
attr
->
alt_ah_attr
);
resp
.
max_send_wr
=
init_attr
->
cap
.
max_send_wr
;
resp
.
max_recv_wr
=
init_attr
->
cap
.
max_recv_wr
;
...
...
@@ -1946,6 +1936,29 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
}
}
static
void
copy_ah_attr_from_uverbs
(
struct
ib_device
*
dev
,
struct
rdma_ah_attr
*
rdma_attr
,
struct
ib_uverbs_qp_dest
*
uverb_attr
)
{
rdma_attr
->
type
=
rdma_ah_find_type
(
dev
,
uverb_attr
->
port_num
);
if
(
uverb_attr
->
is_global
)
{
rdma_ah_set_grh
(
rdma_attr
,
NULL
,
uverb_attr
->
flow_label
,
uverb_attr
->
sgid_index
,
uverb_attr
->
hop_limit
,
uverb_attr
->
traffic_class
);
rdma_ah_set_dgid_raw
(
rdma_attr
,
uverb_attr
->
dgid
);
}
else
{
rdma_ah_set_ah_flags
(
rdma_attr
,
0
);
}
rdma_ah_set_dlid
(
rdma_attr
,
uverb_attr
->
dlid
);
rdma_ah_set_sl
(
rdma_attr
,
uverb_attr
->
sl
);
rdma_ah_set_path_bits
(
rdma_attr
,
uverb_attr
->
src_path_bits
);
rdma_ah_set_static_rate
(
rdma_attr
,
uverb_attr
->
static_rate
);
rdma_ah_set_port_num
(
rdma_attr
,
uverb_attr
->
port_num
);
rdma_ah_set_make_grd
(
rdma_attr
,
false
);
}
static
int
modify_qp
(
struct
ib_uverbs_file
*
file
,
struct
ib_uverbs_ex_modify_qp
*
cmd
,
struct
ib_udata
*
udata
)
{
...
...
@@ -1993,50 +2006,12 @@ static int modify_qp(struct ib_uverbs_file *file,
attr
->
rate_limit
=
cmd
->
rate_limit
;
if
(
cmd
->
base
.
attr_mask
&
IB_QP_AV
)
attr
->
ah_attr
.
type
=
rdma_ah_find_type
(
qp
->
device
,
cmd
->
base
.
dest
.
port_num
);
if
(
cmd
->
base
.
dest
.
is_global
)
{
rdma_ah_set_grh
(
&
attr
->
ah_attr
,
NULL
,
cmd
->
base
.
dest
.
flow_label
,
cmd
->
base
.
dest
.
sgid_index
,
cmd
->
base
.
dest
.
hop_limit
,
cmd
->
base
.
dest
.
traffic_class
);
rdma_ah_set_dgid_raw
(
&
attr
->
ah_attr
,
cmd
->
base
.
dest
.
dgid
);
}
else
{
rdma_ah_set_ah_flags
(
&
attr
->
ah_attr
,
0
);
}
rdma_ah_set_dlid
(
&
attr
->
ah_attr
,
cmd
->
base
.
dest
.
dlid
);
rdma_ah_set_sl
(
&
attr
->
ah_attr
,
cmd
->
base
.
dest
.
sl
);
rdma_ah_set_path_bits
(
&
attr
->
ah_attr
,
cmd
->
base
.
dest
.
src_path_bits
);
rdma_ah_set_static_rate
(
&
attr
->
ah_attr
,
cmd
->
base
.
dest
.
static_rate
);
rdma_ah_set_port_num
(
&
attr
->
ah_attr
,
cmd
->
base
.
dest
.
port_num
);
rdma_ah_set_make_grd
(
&
attr
->
ah_attr
,
false
);
copy_ah_attr_from_uverbs
(
qp
->
device
,
&
attr
->
ah_attr
,
&
cmd
->
base
.
dest
);
if
(
cmd
->
base
.
attr_mask
&
IB_QP_ALT_PATH
)
attr
->
alt_ah_attr
.
type
=
rdma_ah_find_type
(
qp
->
device
,
cmd
->
base
.
dest
.
port_num
);
if
(
cmd
->
base
.
alt_dest
.
is_global
)
{
rdma_ah_set_grh
(
&
attr
->
alt_ah_attr
,
NULL
,
cmd
->
base
.
alt_dest
.
flow_label
,
cmd
->
base
.
alt_dest
.
sgid_index
,
cmd
->
base
.
alt_dest
.
hop_limit
,
cmd
->
base
.
alt_dest
.
traffic_class
);
rdma_ah_set_dgid_raw
(
&
attr
->
alt_ah_attr
,
cmd
->
base
.
alt_dest
.
dgid
);
}
else
{
rdma_ah_set_ah_flags
(
&
attr
->
alt_ah_attr
,
0
);
}
rdma_ah_set_dlid
(
&
attr
->
alt_ah_attr
,
cmd
->
base
.
alt_dest
.
dlid
);
rdma_ah_set_sl
(
&
attr
->
alt_ah_attr
,
cmd
->
base
.
alt_dest
.
sl
);
rdma_ah_set_path_bits
(
&
attr
->
alt_ah_attr
,
cmd
->
base
.
alt_dest
.
src_path_bits
);
rdma_ah_set_static_rate
(
&
attr
->
alt_ah_attr
,
cmd
->
base
.
alt_dest
.
static_rate
);
rdma_ah_set_port_num
(
&
attr
->
alt_ah_attr
,
cmd
->
base
.
alt_dest
.
port_num
);
rdma_ah_set_make_grd
(
&
attr
->
alt_ah_attr
,
false
);
copy_ah_attr_from_uverbs
(
qp
->
device
,
&
attr
->
alt_ah_attr
,
&
cmd
->
base
.
alt_dest
);
ret
=
ib_modify_qp_with_udata
(
qp
,
attr
,
modify_qp_mask
(
qp
->
qp_type
,
...
...
drivers/infiniband/core/uverbs_main.c
浏览文件 @
a1139697
...
...
@@ -595,7 +595,6 @@ struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file
{
struct
ib_uverbs_async_event_file
*
ev_file
;
struct
file
*
filp
;
int
ret
;
ev_file
=
kzalloc
(
sizeof
(
*
ev_file
),
GFP_KERNEL
);
if
(
!
ev_file
)
...
...
@@ -621,21 +620,11 @@ struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file
INIT_IB_EVENT_HANDLER
(
&
uverbs_file
->
event_handler
,
ib_dev
,
ib_uverbs_event_handler
);
ret
=
ib_register_event_handler
(
&
uverbs_file
->
event_handler
);
if
(
ret
)
goto
err_put_file
;
ib_register_event_handler
(
&
uverbs_file
->
event_handler
);
/* At that point async file stuff was fully set */
return
filp
;
err_put_file:
fput
(
filp
);
kref_put
(
&
uverbs_file
->
async_file
->
ref
,
ib_uverbs_release_async_event_file
);
uverbs_file
->
async_file
=
NULL
;
return
ERR_PTR
(
ret
);
err_put_refs:
kref_put
(
&
ev_file
->
uverbs_file
->
ref
,
ib_uverbs_release_file
);
kref_put
(
&
ev_file
->
ref
,
ib_uverbs_release_async_event_file
);
...
...
drivers/infiniband/core/verbs.c
浏览文件 @
a1139697
...
...
@@ -180,39 +180,29 @@ EXPORT_SYMBOL(ib_rate_to_mbps);
__attribute_const__
enum
rdma_transport_type
rdma_node_get_transport
(
enum
rdma_node_type
node_type
)
{
switch
(
node_type
)
{
case
RDMA_NODE_IB_CA
:
case
RDMA_NODE_IB_SWITCH
:
case
RDMA_NODE_IB_ROUTER
:
return
RDMA_TRANSPORT_IB
;
case
RDMA_NODE_RNIC
:
return
RDMA_TRANSPORT_IWARP
;
case
RDMA_NODE_USNIC
:
if
(
node_type
==
RDMA_NODE_USNIC
)
return
RDMA_TRANSPORT_USNIC
;
case
RDMA_NODE_USNIC_UDP
:
if
(
node_type
==
RDMA_NODE_USNIC_UDP
)
return
RDMA_TRANSPORT_USNIC_UDP
;
default:
BUG
()
;
return
0
;
}
if
(
node_type
==
RDMA_NODE_RNIC
)
return
RDMA_TRANSPORT_IWARP
;
return
RDMA_TRANSPORT_IB
;
}
EXPORT_SYMBOL
(
rdma_node_get_transport
);
enum
rdma_link_layer
rdma_port_get_link_layer
(
struct
ib_device
*
device
,
u8
port_num
)
{
enum
rdma_transport_type
lt
;
if
(
device
->
get_link_layer
)
return
device
->
get_link_layer
(
device
,
port_num
);
switch
(
rdma_node_get_transport
(
device
->
node_type
))
{
case
RDMA_TRANSPORT_IB
:
lt
=
rdma_node_get_transport
(
device
->
node_type
);
if
(
lt
==
RDMA_TRANSPORT_IB
)
return
IB_LINK_LAYER_INFINIBAND
;
case
RDMA_TRANSPORT_IWARP
:
case
RDMA_TRANSPORT_USNIC
:
case
RDMA_TRANSPORT_USNIC_UDP
:
return
IB_LINK_LAYER_ETHERNET
;
default:
return
IB_LINK_LAYER_UNSPECIFIED
;
}
return
IB_LINK_LAYER_ETHERNET
;
}
EXPORT_SYMBOL
(
rdma_port_get_link_layer
);
...
...
drivers/infiniband/hw/mlx4/alias_GUID.c
浏览文件 @
a1139697
...
...
@@ -781,7 +781,7 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
spin_lock_irqsave
(
&
dev
->
sriov
.
going_down_lock
,
flags
);
spin_lock_irqsave
(
&
dev
->
sriov
.
alias_guid
.
ag_work_lock
,
flags1
);
if
(
!
dev
->
sriov
.
is_going_down
)
{
/* If there is pending one should cancel
l
then run, otherwise
/* If there is pending one should cancel then run, otherwise
* won't run till previous one is ended as same work
* struct is used.
*/
...
...
drivers/infiniband/hw/mlx4/cq.c
浏览文件 @
a1139697
...
...
@@ -637,7 +637,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
struct
mlx4_ib_qp
*
qp
;
*
npolled
=
0
;
/* Find uncompleted WQEs belonging to that cq and ret
ru
n
/* Find uncompleted WQEs belonging to that cq and ret
ur
n
* simulated FLUSH_ERR completions
*/
list_for_each_entry
(
qp
,
&
cq
->
send_qp_list
,
cq_send_list
)
{
...
...
drivers/infiniband/hw/mlx4/mcg.c
浏览文件 @
a1139697
...
...
@@ -808,8 +808,7 @@ static ssize_t sysfs_show_group(struct device *dev,
struct
device_attribute
*
attr
,
char
*
buf
);
static
struct
mcast_group
*
acquire_group
(
struct
mlx4_ib_demux_ctx
*
ctx
,
union
ib_gid
*
mgid
,
int
create
,
gfp_t
gfp_mask
)
union
ib_gid
*
mgid
,
int
create
)
{
struct
mcast_group
*
group
,
*
cur_group
;
int
is_mgid0
;
...
...
@@ -825,7 +824,7 @@ static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
if
(
!
create
)
return
ERR_PTR
(
-
ENOENT
);
group
=
kzalloc
(
sizeof
*
group
,
gfp_mask
);
group
=
kzalloc
(
sizeof
(
*
group
),
GFP_KERNEL
);
if
(
!
group
)
return
ERR_PTR
(
-
ENOMEM
);
...
...
@@ -892,7 +891,7 @@ int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
case
IB_MGMT_METHOD_GET_RESP
:
case
IB_SA_METHOD_DELETE_RESP
:
mutex_lock
(
&
ctx
->
mcg_table_lock
);
group
=
acquire_group
(
ctx
,
&
rec
->
mgid
,
0
,
GFP_KERNEL
);
group
=
acquire_group
(
ctx
,
&
rec
->
mgid
,
0
);
mutex_unlock
(
&
ctx
->
mcg_table_lock
);
if
(
IS_ERR
(
group
))
{
if
(
mad
->
mad_hdr
.
method
==
IB_MGMT_METHOD_GET_RESP
)
{
...
...
@@ -954,7 +953,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
req
->
sa_mad
=
*
sa_mad
;
mutex_lock
(
&
ctx
->
mcg_table_lock
);
group
=
acquire_group
(
ctx
,
&
rec
->
mgid
,
may_create
,
GFP_KERNEL
);
group
=
acquire_group
(
ctx
,
&
rec
->
mgid
,
may_create
);
mutex_unlock
(
&
ctx
->
mcg_table_lock
);
if
(
IS_ERR
(
group
))
{
kfree
(
req
);
...
...
drivers/infiniband/hw/mlx4/qp.c
浏览文件 @
a1139697
...
...
@@ -748,7 +748,7 @@ static int create_qp_rss(struct mlx4_ib_dev *dev, struct ib_pd *ibpd,
INIT_LIST_HEAD
(
&
qp
->
gid_list
);
INIT_LIST_HEAD
(
&
qp
->
steering_rules
);
qp
->
mlx4_ib_qp_type
=
MLX4_IB_QPT_RAW_
ETHERTYPE
;
qp
->
mlx4_ib_qp_type
=
MLX4_IB_QPT_RAW_
PACKET
;
qp
->
state
=
IB_QPS_RESET
;
/* Set dummy send resources to be compatible with HV and PRM */
...
...
@@ -812,6 +812,9 @@ static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd,
return
ERR_PTR
(
-
EFAULT
);
}
if
(
memchr_inv
(
ucmd
.
reserved
,
0
,
sizeof
(
ucmd
.
reserved
)))
return
ERR_PTR
(
-
EOPNOTSUPP
);
if
(
ucmd
.
comp_mask
||
ucmd
.
reserved1
)
return
ERR_PTR
(
-
EOPNOTSUPP
);
...
...
@@ -1046,9 +1049,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
if
(
src
==
MLX4_IB_RWQ_SRC
)
{
if
(
ucmd
.
wq
.
comp_mask
||
ucmd
.
wq
.
reserved1
||
ucmd
.
wq
.
reserved
[
0
]
||
ucmd
.
wq
.
reserved
[
1
]
||
ucmd
.
wq
.
reserved
[
2
])
{
if
(
ucmd
.
wq
.
comp_mask
||
ucmd
.
wq
.
reserved
[
0
]
||
ucmd
.
wq
.
reserved
[
1
]
||
ucmd
.
wq
.
reserved
[
2
])
{
pr_debug
(
"user command isn't supported
\n
"
);
err
=
-
EOPNOTSUPP
;
goto
err
;
...
...
@@ -2027,8 +2029,8 @@ static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
*/
static
int
bringup_rss_rwqs
(
struct
ib_rwq_ind_table
*
ind_tbl
,
u8
port_num
)
{
int
err
=
0
;
int
i
;
int
err
;
for
(
i
=
0
;
i
<
(
1
<<
ind_tbl
->
log_ind_tbl_size
);
i
++
)
{
struct
ib_wq
*
ibwq
=
ind_tbl
->
ind_tbl
[
i
];
...
...
@@ -2723,19 +2725,17 @@ enum {
static
int
_mlx4_ib_modify_qp
(
struct
ib_qp
*
ibqp
,
struct
ib_qp_attr
*
attr
,
int
attr_mask
,
struct
ib_udata
*
udata
)
{
enum
rdma_link_layer
ll
=
IB_LINK_LAYER_UNSPECIFIED
;
struct
mlx4_ib_dev
*
dev
=
to_mdev
(
ibqp
->
device
);
struct
mlx4_ib_qp
*
qp
=
to_mqp
(
ibqp
);
enum
ib_qp_state
cur_state
,
new_state
;
int
err
=
-
EINVAL
;
int
ll
;
mutex_lock
(
&
qp
->
mutex
);
cur_state
=
attr_mask
&
IB_QP_CUR_STATE
?
attr
->
cur_qp_state
:
qp
->
state
;
new_state
=
attr_mask
&
IB_QP_STATE
?
attr
->
qp_state
:
cur_state
;
if
(
cur_state
==
new_state
&&
cur_state
==
IB_QPS_RESET
)
{
ll
=
IB_LINK_LAYER_UNSPECIFIED
;
}
else
{
if
(
cur_state
!=
new_state
||
cur_state
!=
IB_QPS_RESET
)
{
int
port
=
attr_mask
&
IB_QP_PORT
?
attr
->
port_num
:
qp
->
port
;
ll
=
rdma_port_get_link_layer
(
&
dev
->
ib_dev
,
port
);
}
...
...
@@ -4146,8 +4146,8 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
if
(
!
(
udata
&&
pd
->
uobject
))
return
ERR_PTR
(
-
EINVAL
);
required_cmd_sz
=
offsetof
(
typeof
(
ucmd
),
reserved
)
+
sizeof
(
ucmd
.
reserved
);
required_cmd_sz
=
offsetof
(
typeof
(
ucmd
),
comp_mask
)
+
sizeof
(
ucmd
.
comp_mask
);
if
(
udata
->
inlen
<
required_cmd_sz
)
{
pr_debug
(
"invalid inlen
\n
"
);
return
ERR_PTR
(
-
EINVAL
);
...
...
drivers/infiniband/hw/mlx5/cq.c
浏览文件 @
a1139697
...
...
@@ -499,7 +499,7 @@ static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
struct
mlx5_ib_qp
*
qp
;
*
npolled
=
0
;
/* Find uncompleted WQEs belonging to that cq and ret
ru
n mmics ones */
/* Find uncompleted WQEs belonging to that cq and ret
ur
n mmics ones */
list_for_each_entry
(
qp
,
&
cq
->
list_send_qp
,
cq_send_list
)
{
sw_send_comp
(
qp
,
num_entries
,
wc
+
*
npolled
,
npolled
);
if
(
*
npolled
>=
num_entries
)
...
...
drivers/infiniband/hw/mlx5/mad.c
浏览文件 @
a1139697
...
...
@@ -204,7 +204,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
int
err
;
void
*
out_cnt
;
/* Dec
al
ring support of extended counters */
/* Dec
la
ring support of extended counters */
if
(
in_mad
->
mad_hdr
.
attr_id
==
IB_PMA_CLASS_PORT_INFO
)
{
struct
ib_class_port_info
cpi
=
{};
...
...
drivers/infiniband/hw/mlx5/main.c
浏览文件 @
a1139697
...
...
@@ -802,8 +802,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if
(
field_avail
(
typeof
(
resp
),
mlx5_ib_support_multi_pkt_send_wqes
,
uhw
->
outlen
))
{
resp
.
mlx5_ib_support_multi_pkt_send_wqes
=
MLX5_CAP_ETH
(
mdev
,
multi_pkt_send_wqe
);
if
(
MLX5_CAP_ETH
(
mdev
,
multi_pkt_send_wqe
))
resp
.
mlx5_ib_support_multi_pkt_send_wqes
=
MLX5_IB_ALLOW_MPW
;
if
(
MLX5_CAP_ETH
(
mdev
,
enhanced_multi_pkt_send_wqe
))
resp
.
mlx5_ib_support_multi_pkt_send_wqes
|=
MLX5_IB_SUPPORT_EMPW
;
resp
.
response_length
+=
sizeof
(
resp
.
mlx5_ib_support_multi_pkt_send_wqes
);
}
...
...
@@ -811,6 +817,27 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if
(
field_avail
(
typeof
(
resp
),
reserved
,
uhw
->
outlen
))
resp
.
response_length
+=
sizeof
(
resp
.
reserved
);
if
(
field_avail
(
typeof
(
resp
),
sw_parsing_caps
,
uhw
->
outlen
))
{
resp
.
response_length
+=
sizeof
(
resp
.
sw_parsing_caps
);
if
(
MLX5_CAP_ETH
(
mdev
,
swp
))
{
resp
.
sw_parsing_caps
.
sw_parsing_offloads
|=
MLX5_IB_SW_PARSING
;
if
(
MLX5_CAP_ETH
(
mdev
,
swp_csum
))
resp
.
sw_parsing_caps
.
sw_parsing_offloads
|=
MLX5_IB_SW_PARSING_CSUM
;
if
(
MLX5_CAP_ETH
(
mdev
,
swp_lso
))
resp
.
sw_parsing_caps
.
sw_parsing_offloads
|=
MLX5_IB_SW_PARSING_LSO
;
if
(
resp
.
sw_parsing_caps
.
sw_parsing_offloads
)
resp
.
sw_parsing_caps
.
supported_qpts
=
BIT
(
IB_QPT_RAW_PACKET
);
}
}
if
(
uhw
->
outlen
)
{
err
=
ib_copy_to_udata
(
uhw
,
&
resp
,
resp
.
response_length
);
...
...
@@ -2104,7 +2131,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
* it won't fall into the multicast flow steering table and this rule
* could steal other multicast packets.
*/
static
bool
flow_is_multicast_only
(
struct
ib_flow_attr
*
ib_attr
)
static
bool
flow_is_multicast_only
(
const
struct
ib_flow_attr
*
ib_attr
)
{
union
ib_flow_spec
*
flow_spec
;
...
...
@@ -2316,10 +2343,31 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
return
err
?
ERR_PTR
(
err
)
:
prio
;
}
static
struct
mlx5_ib_flow_handler
*
create_flow_rule
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_flow_prio
*
ft_prio
,
const
struct
ib_flow_attr
*
flow_attr
,
struct
mlx5_flow_destination
*
dst
)
static
void
set_underlay_qp
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_flow_spec
*
spec
,
u32
underlay_qpn
)
{
void
*
misc_params_c
=
MLX5_ADDR_OF
(
fte_match_param
,
spec
->
match_criteria
,
misc_parameters
);
void
*
misc_params_v
=
MLX5_ADDR_OF
(
fte_match_param
,
spec
->
match_value
,
misc_parameters
);
if
(
underlay_qpn
&&
MLX5_CAP_FLOWTABLE_NIC_RX
(
dev
->
mdev
,
ft_field_support
.
bth_dst_qp
))
{
MLX5_SET
(
fte_match_set_misc
,
misc_params_v
,
bth_dst_qp
,
underlay_qpn
);
MLX5_SET
(
fte_match_set_misc
,
misc_params_c
,
bth_dst_qp
,
0xffffff
);
}
}
static
struct
mlx5_ib_flow_handler
*
_create_flow_rule
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_flow_prio
*
ft_prio
,
const
struct
ib_flow_attr
*
flow_attr
,
struct
mlx5_flow_destination
*
dst
,
u32
underlay_qpn
)
{
struct
mlx5_flow_table
*
ft
=
ft_prio
->
flow_table
;
struct
mlx5_ib_flow_handler
*
handler
;
...
...
@@ -2355,6 +2403,9 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
ib_flow
+=
((
union
ib_flow_spec
*
)
ib_flow
)
->
size
;
}
if
(
!
flow_is_multicast_only
(
flow_attr
))
set_underlay_qp
(
dev
,
spec
,
underlay_qpn
);
spec
->
match_criteria_enable
=
get_match_criteria_enable
(
spec
->
match_criteria
);
if
(
is_drop
)
{
flow_act
.
action
=
MLX5_FLOW_CONTEXT_ACTION_DROP
;
...
...
@@ -2394,6 +2445,14 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
return
err
?
ERR_PTR
(
err
)
:
handler
;
}
static
struct
mlx5_ib_flow_handler
*
create_flow_rule
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_flow_prio
*
ft_prio
,
const
struct
ib_flow_attr
*
flow_attr
,
struct
mlx5_flow_destination
*
dst
)
{
return
_create_flow_rule
(
dev
,
ft_prio
,
flow_attr
,
dst
,
0
);
}
static
struct
mlx5_ib_flow_handler
*
create_dont_trap_rule
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_flow_prio
*
ft_prio
,
struct
ib_flow_attr
*
flow_attr
,
...
...
@@ -2530,6 +2589,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct
mlx5_ib_flow_prio
*
ft_prio_tx
=
NULL
;
struct
mlx5_ib_flow_prio
*
ft_prio
;
int
err
;
int
underlay_qpn
;
if
(
flow_attr
->
priority
>
MLX5_IB_FLOW_LAST_PRIO
)
return
ERR_PTR
(
-
ENOMEM
);
...
...
@@ -2570,8 +2630,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
handler
=
create_dont_trap_rule
(
dev
,
ft_prio
,
flow_attr
,
dst
);
}
else
{
handler
=
create_flow_rule
(
dev
,
ft_prio
,
flow_attr
,
dst
);
underlay_qpn
=
(
mqp
->
flags
&
MLX5_IB_QP_UNDERLAY
)
?
mqp
->
underlay_qpn
:
0
;
handler
=
_create_flow_rule
(
dev
,
ft_prio
,
flow_attr
,
dst
,
underlay_qpn
);
}
}
else
if
(
flow_attr
->
type
==
IB_FLOW_ATTR_ALL_DEFAULT
||
flow_attr
->
type
==
IB_FLOW_ATTR_MC_DEFAULT
)
{
...
...
@@ -3793,6 +3855,8 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
if
(
!
dbg
->
timeout_debugfs
)
goto
out_debugfs
;
dev
->
delay_drop
.
dbg
=
dbg
;
return
0
;
out_debugfs:
...
...
@@ -3817,8 +3881,8 @@ static void init_delay_drop(struct mlx5_ib_dev *dev)
mlx5_ib_warn
(
dev
,
"Failed to init delay drop debugfs
\n
"
);
}
const
struct
cpumask
*
mlx5_ib_get_vector_affinity
(
struct
ib_device
*
ibdev
,
int
comp_vector
)
static
const
struct
cpumask
*
mlx5_ib_get_vector_affinity
(
struct
ib_device
*
ibdev
,
int
comp_vector
)
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
ibdev
);
...
...
drivers/infiniband/hw/mlx5/mlx5_ib.h
浏览文件 @
a1139697
...
...
@@ -503,7 +503,7 @@ struct mlx5_ib_mr {
struct
mlx5_shared_mr_info
*
smr_info
;
struct
list_head
list
;
int
order
;
int
umred
;
bool
allocated_from_cache
;
int
npages
;
struct
mlx5_ib_dev
*
dev
;
u32
out
[
MLX5_ST_SZ_DW
(
create_mkey_out
)];
...
...
drivers/infiniband/hw/mlx5/mr.c
浏览文件 @
a1139697
...
...
@@ -48,8 +48,7 @@ enum {
#define MLX5_UMR_ALIGN 2048
static
int
clean_mr
(
struct
mlx5_ib_mr
*
mr
);
static
int
max_umr_order
(
struct
mlx5_ib_dev
*
dev
);
static
int
use_umr
(
struct
mlx5_ib_dev
*
dev
,
int
order
);
static
int
mr_cache_max_order
(
struct
mlx5_ib_dev
*
dev
);
static
int
unreg_umr
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_mr
*
mr
);
static
int
destroy_mkey
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_mr
*
mr
)
...
...
@@ -184,7 +183,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
break
;
}
mr
->
order
=
ent
->
order
;
mr
->
umred
=
1
;
mr
->
allocated_from_cache
=
1
;
mr
->
dev
=
dev
;
MLX5_SET
(
mkc
,
mkc
,
free
,
1
);
...
...
@@ -497,7 +496,7 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
int
i
;
c
=
order2idx
(
dev
,
order
);
last_umr_cache_entry
=
order2idx
(
dev
,
m
ax_umr
_order
(
dev
));
last_umr_cache_entry
=
order2idx
(
dev
,
m
r_cache_max
_order
(
dev
));
if
(
c
<
0
||
c
>
last_umr_cache_entry
)
{
mlx5_ib_warn
(
dev
,
"order %d, cache index %d
\n
"
,
order
,
c
);
return
NULL
;
...
...
@@ -677,12 +676,12 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
INIT_DELAYED_WORK
(
&
ent
->
dwork
,
delayed_cache_work_func
);
queue_work
(
cache
->
wq
,
&
ent
->
work
);
if
(
i
>
M
AX_UMR_CACHE
_ENTRY
)
{
if
(
i
>
M
R_CACHE_LAST_STD
_ENTRY
)
{
mlx5_odp_init_mr_cache_entry
(
ent
);
continue
;
}
if
(
!
use_umr
(
dev
,
ent
->
order
))
if
(
ent
->
order
>
mr_cache_max_order
(
dev
))
continue
;
ent
->
page
=
PAGE_SHIFT
;
...
...
@@ -809,28 +808,24 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
return
ERR_PTR
(
err
);
}
static
int
get_octo_len
(
u64
addr
,
u64
len
,
int
page_s
ize
)
static
int
get_octo_len
(
u64
addr
,
u64
len
,
int
page_s
hift
)
{
u64
page_size
=
1ULL
<<
page_shift
;
u64
offset
;
int
npages
;
offset
=
addr
&
(
page_size
-
1
);
npages
=
ALIGN
(
len
+
offset
,
page_size
)
>>
ilog2
(
page_size
)
;
npages
=
ALIGN
(
len
+
offset
,
page_size
)
>>
page_shift
;
return
(
npages
+
1
)
/
2
;
}
static
int
m
ax_umr
_order
(
struct
mlx5_ib_dev
*
dev
)
static
int
m
r_cache_max
_order
(
struct
mlx5_ib_dev
*
dev
)
{
if
(
MLX5_CAP_GEN
(
dev
->
mdev
,
umr_extended_translation_offset
))
return
M
AX_UMR_CACHE
_ENTRY
+
2
;
return
M
R_CACHE_LAST_STD
_ENTRY
+
2
;
return
MLX5_MAX_UMR_SHIFT
;
}
static
int
use_umr
(
struct
mlx5_ib_dev
*
dev
,
int
order
)
{
return
order
<=
max_umr_order
(
dev
);
}
static
int
mr_umem_get
(
struct
ib_pd
*
pd
,
u64
start
,
u64
length
,
int
access_flags
,
struct
ib_umem
**
umem
,
int
*
npages
,
int
*
page_shift
,
int
*
ncont
,
...
...
@@ -904,7 +899,8 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
return
err
;
}
static
struct
mlx5_ib_mr
*
reg_umr
(
struct
ib_pd
*
pd
,
struct
ib_umem
*
umem
,
static
struct
mlx5_ib_mr
*
alloc_mr_from_cache
(
struct
ib_pd
*
pd
,
struct
ib_umem
*
umem
,
u64
virt_addr
,
u64
len
,
int
npages
,
int
page_shift
,
int
order
,
int
access_flags
)
{
...
...
@@ -936,16 +932,6 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
mr
->
mmkey
.
size
=
len
;
mr
->
mmkey
.
pd
=
to_mpd
(
pd
)
->
pdn
;
err
=
mlx5_ib_update_xlt
(
mr
,
0
,
npages
,
page_shift
,
MLX5_IB_UPD_XLT_ENABLE
);
if
(
err
)
{
mlx5_mr_cache_free
(
dev
,
mr
);
return
ERR_PTR
(
err
);
}
mr
->
live
=
1
;
return
mr
;
}
...
...
@@ -1111,7 +1097,8 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
static
struct
mlx5_ib_mr
*
reg_create
(
struct
ib_mr
*
ibmr
,
struct
ib_pd
*
pd
,
u64
virt_addr
,
u64
length
,
struct
ib_umem
*
umem
,
int
npages
,
int
page_shift
,
int
access_flags
)
int
page_shift
,
int
access_flags
,
bool
populate
)
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
pd
->
device
);
struct
mlx5_ib_mr
*
mr
;
...
...
@@ -1126,15 +1113,19 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
if
(
!
mr
)
return
ERR_PTR
(
-
ENOMEM
);
inlen
=
MLX5_ST_SZ_BYTES
(
create_mkey_in
)
+
sizeof
(
*
pas
)
*
((
npages
+
1
)
/
2
)
*
2
;
mr
->
ibmr
.
pd
=
pd
;
mr
->
access_flags
=
access_flags
;
inlen
=
MLX5_ST_SZ_BYTES
(
create_mkey_in
);
if
(
populate
)
inlen
+=
sizeof
(
*
pas
)
*
roundup
(
npages
,
2
);
in
=
kvzalloc
(
inlen
,
GFP_KERNEL
);
if
(
!
in
)
{
err
=
-
ENOMEM
;
goto
err_1
;
}
pas
=
(
__be64
*
)
MLX5_ADDR_OF
(
create_mkey_in
,
in
,
klm_pas_mtt
);
if
(
!
(
access_flags
&
IB_ACCESS_ON_DEMAND
))
if
(
populate
&&
!
(
access_flags
&
IB_ACCESS_ON_DEMAND
))
mlx5_ib_populate_pas
(
dev
,
umem
,
page_shift
,
pas
,
pg_cap
?
MLX5_IB_MTT_PRESENT
:
0
);
...
...
@@ -1143,23 +1134,27 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
MLX5_SET
(
create_mkey_in
,
in
,
pg_access
,
!!
(
pg_cap
));
mkc
=
MLX5_ADDR_OF
(
create_mkey_in
,
in
,
memory_key_mkey_entry
);
MLX5_SET
(
mkc
,
mkc
,
free
,
!
populate
);
MLX5_SET
(
mkc
,
mkc
,
access_mode
,
MLX5_MKC_ACCESS_MODE_MTT
);
MLX5_SET
(
mkc
,
mkc
,
a
,
!!
(
access_flags
&
IB_ACCESS_REMOTE_ATOMIC
));
MLX5_SET
(
mkc
,
mkc
,
rw
,
!!
(
access_flags
&
IB_ACCESS_REMOTE_WRITE
));
MLX5_SET
(
mkc
,
mkc
,
rr
,
!!
(
access_flags
&
IB_ACCESS_REMOTE_READ
));
MLX5_SET
(
mkc
,
mkc
,
lw
,
!!
(
access_flags
&
IB_ACCESS_LOCAL_WRITE
));
MLX5_SET
(
mkc
,
mkc
,
lr
,
1
);
MLX5_SET
(
mkc
,
mkc
,
umr_en
,
1
);
MLX5_SET64
(
mkc
,
mkc
,
start_addr
,
virt_addr
);
MLX5_SET64
(
mkc
,
mkc
,
len
,
length
);
MLX5_SET
(
mkc
,
mkc
,
pd
,
to_mpd
(
pd
)
->
pdn
);
MLX5_SET
(
mkc
,
mkc
,
bsf_octword_size
,
0
);
MLX5_SET
(
mkc
,
mkc
,
translations_octword_size
,
get_octo_len
(
virt_addr
,
length
,
1
<<
page_shift
));
get_octo_len
(
virt_addr
,
length
,
page_shift
));
MLX5_SET
(
mkc
,
mkc
,
log_page_size
,
page_shift
);
MLX5_SET
(
mkc
,
mkc
,
qpn
,
0xffffff
);
MLX5_SET
(
create_mkey_in
,
in
,
translations_octword_actual_size
,
get_octo_len
(
virt_addr
,
length
,
1
<<
page_shift
));
if
(
populate
)
{
MLX5_SET
(
create_mkey_in
,
in
,
translations_octword_actual_size
,
get_octo_len
(
virt_addr
,
length
,
page_shift
));
}
err
=
mlx5_core_create_mkey
(
dev
->
mdev
,
&
mr
->
mmkey
,
in
,
inlen
);
if
(
err
)
{
...
...
@@ -1168,9 +1163,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
}
mr
->
mmkey
.
type
=
MLX5_MKEY_MR
;
mr
->
desc_size
=
sizeof
(
struct
mlx5_mtt
);
mr
->
umem
=
umem
;
mr
->
dev
=
dev
;
mr
->
live
=
1
;
kvfree
(
in
);
mlx5_ib_dbg
(
dev
,
"mkey = 0x%x
\n
"
,
mr
->
mmkey
.
key
);
...
...
@@ -1210,6 +1203,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int
ncont
;
int
order
;
int
err
;
bool
use_umr
=
true
;
mlx5_ib_dbg
(
dev
,
"start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x
\n
"
,
start
,
virt_addr
,
length
,
access_flags
);
...
...
@@ -1228,27 +1222,29 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
err
=
mr_umem_get
(
pd
,
start
,
length
,
access_flags
,
&
umem
,
&
npages
,
&
page_shift
,
&
ncont
,
&
order
);
if
(
err
<
0
)
if
(
err
<
0
)
return
ERR_PTR
(
err
);
if
(
use_umr
(
dev
,
order
))
{
mr
=
reg_umr
(
pd
,
umem
,
virt_addr
,
length
,
ncont
,
page_shif
t
,
order
,
access_flags
);
if
(
order
<=
mr_cache_max_order
(
dev
))
{
mr
=
alloc_mr_from_cache
(
pd
,
umem
,
virt_addr
,
length
,
ncon
t
,
page_shift
,
order
,
access_flags
);
if
(
PTR_ERR
(
mr
)
==
-
EAGAIN
)
{
mlx5_ib_dbg
(
dev
,
"cache empty for order %d"
,
order
);
mr
=
NULL
;
}
}
else
if
(
access_flags
&
IB_ACCESS_ON_DEMAND
&&
!
MLX5_CAP_GEN
(
dev
->
mdev
,
umr_extended_translation_offset
))
{
err
=
-
EINVAL
;
pr_err
(
"Got MR registration for ODP MR > 512MB, not supported for Connect-IB"
);
goto
error
;
}
else
if
(
!
MLX5_CAP_GEN
(
dev
->
mdev
,
umr_extended_translation_offset
))
{
if
(
access_flags
&
IB_ACCESS_ON_DEMAND
)
{
err
=
-
EINVAL
;
pr_err
(
"Got MR registration for ODP MR > 512MB, not supported for Connect-IB"
);
goto
error
;
}
use_umr
=
false
;
}
if
(
!
mr
)
{
mutex_lock
(
&
dev
->
slow_path_mutex
);
mr
=
reg_create
(
NULL
,
pd
,
virt_addr
,
length
,
umem
,
ncont
,
page_shift
,
access_flags
);
page_shift
,
access_flags
,
!
use_umr
);
mutex_unlock
(
&
dev
->
slow_path_mutex
);
}
...
...
@@ -1266,8 +1262,22 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
update_odp_mr
(
mr
);
#endif
return
&
mr
->
ibmr
;
if
(
use_umr
)
{
int
update_xlt_flags
=
MLX5_IB_UPD_XLT_ENABLE
;
if
(
access_flags
&
IB_ACCESS_ON_DEMAND
)
update_xlt_flags
|=
MLX5_IB_UPD_XLT_ZAP
;
err
=
mlx5_ib_update_xlt
(
mr
,
0
,
ncont
,
page_shift
,
update_xlt_flags
);
if
(
err
)
{
mlx5_ib_dereg_mr
(
&
mr
->
ibmr
);
return
ERR_PTR
(
err
);
}
}
mr
->
live
=
1
;
return
&
mr
->
ibmr
;
error:
ib_umem_release
(
umem
);
return
ERR_PTR
(
err
);
...
...
@@ -1355,7 +1365,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
/*
* UMR can't be used - MKey needs to be replaced.
*/
if
(
mr
->
umred
)
{
if
(
mr
->
allocated_from_cache
)
{
err
=
unreg_umr
(
dev
,
mr
);
if
(
err
)
mlx5_ib_warn
(
dev
,
"Failed to unregister MR
\n
"
);
...
...
@@ -1368,12 +1378,13 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
return
err
;
mr
=
reg_create
(
ib_mr
,
pd
,
addr
,
len
,
mr
->
umem
,
ncont
,
page_shift
,
access_flags
);
page_shift
,
access_flags
,
true
);
if
(
IS_ERR
(
mr
))
return
PTR_ERR
(
mr
);
mr
->
umred
=
0
;
mr
->
allocated_from_cache
=
0
;
mr
->
live
=
1
;
}
else
{
/*
* Send a UMR WQE
...
...
@@ -1461,7 +1472,7 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
static
int
clean_mr
(
struct
mlx5_ib_mr
*
mr
)
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
mr
->
ibmr
.
device
);
int
umred
=
mr
->
umred
;
int
allocated_from_cache
=
mr
->
allocated_from_cache
;
int
err
;
if
(
mr
->
sig
)
{
...
...
@@ -1479,20 +1490,20 @@ static int clean_mr(struct mlx5_ib_mr *mr)
mlx5_free_priv_descs
(
mr
);
if
(
!
umred
)
{
if
(
!
allocated_from_cache
)
{
u32
key
=
mr
->
mmkey
.
key
;
err
=
destroy_mkey
(
dev
,
mr
);
kfree
(
mr
);
if
(
err
)
{
mlx5_ib_warn
(
dev
,
"failed to destroy mkey 0x%x (%d)
\n
"
,
mr
->
mmkey
.
key
,
err
);
key
,
err
);
return
err
;
}
}
else
{
mlx5_mr_cache_free
(
dev
,
mr
);
}
if
(
!
umred
)
kfree
(
mr
);
return
0
;
}
...
...
drivers/infiniband/hw/mlx5/qp.c
浏览文件 @
a1139697
...
...
@@ -1083,11 +1083,16 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
sqc
=
MLX5_ADDR_OF
(
create_sq_in
,
in
,
ctx
);
MLX5_SET
(
sqc
,
sqc
,
flush_in_error_en
,
1
);
if
(
MLX5_CAP_ETH
(
dev
->
mdev
,
multi_pkt_send_wqe
))
MLX5_SET
(
sqc
,
sqc
,
allow_multi_pkt_send_wqe
,
1
);
MLX5_SET
(
sqc
,
sqc
,
state
,
MLX5_SQC_STATE_RST
);
MLX5_SET
(
sqc
,
sqc
,
user_index
,
MLX5_GET
(
qpc
,
qpc
,
user_index
));
MLX5_SET
(
sqc
,
sqc
,
cqn
,
MLX5_GET
(
qpc
,
qpc
,
cqn_snd
));
MLX5_SET
(
sqc
,
sqc
,
tis_lst_sz
,
1
);
MLX5_SET
(
sqc
,
sqc
,
tis_num_0
,
sq
->
tisn
);
if
(
MLX5_CAP_GEN
(
dev
->
mdev
,
eth_net_offloads
)
&&
MLX5_CAP_ETH
(
dev
->
mdev
,
swp
))
MLX5_SET
(
sqc
,
sqc
,
allow_swp
,
1
);
wq
=
MLX5_ADDR_OF
(
sqc
,
sqc
,
wq
);
MLX5_SET
(
wq
,
wq
,
wq_type
,
MLX5_WQ_TYPE_CYCLIC
);
...
...
drivers/infiniband/hw/mthca/mthca_cmd.c
浏览文件 @
a1139697
...
...
@@ -698,7 +698,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
for
(
i
=
0
;
i
<
mthca_icm_size
(
&
iter
)
>>
lg
;
++
i
)
{
if
(
virt
!=
-
1
)
{
pages
[
nent
*
2
]
=
cpu_to_be64
(
virt
);
virt
+=
1
<<
lg
;
virt
+=
1
ULL
<<
lg
;
}
pages
[
nent
*
2
+
1
]
=
...
...
drivers/infiniband/hw/nes/nes_verbs.c
浏览文件 @
a1139697
...
...
@@ -481,21 +481,16 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
props
->
active_mtu
=
ib_mtu_int_to_enum
(
netdev
->
mtu
);
props
->
lid
=
1
;
props
->
lmc
=
0
;
props
->
sm_lid
=
0
;
props
->
sm_sl
=
0
;
if
(
netif_queue_stopped
(
netdev
))
props
->
state
=
IB_PORT_DOWN
;
else
if
(
nesvnic
->
linkup
)
props
->
state
=
IB_PORT_ACTIVE
;
else
props
->
state
=
IB_PORT_DOWN
;
props
->
phys_state
=
0
;
props
->
port_cap_flags
=
IB_PORT_CM_SUP
|
IB_PORT_REINIT_SUP
|
IB_PORT_VENDOR_CLASS_SUP
|
IB_PORT_BOOT_MGMT_SUP
;
props
->
gid_tbl_len
=
1
;
props
->
pkey_tbl_len
=
1
;
props
->
qkey_viol_cntr
=
0
;
props
->
active_width
=
IB_WIDTH_4X
;
props
->
active_speed
=
IB_SPEED_SDR
;
props
->
max_msg_sz
=
0x80000000
;
...
...
drivers/infiniband/hw/usnic/usnic_fwd.c
浏览文件 @
a1139697
...
...
@@ -110,20 +110,12 @@ void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN])
spin_unlock
(
&
ufdev
->
lock
);
}
int
usnic_fwd_add_ipaddr
(
struct
usnic_fwd_dev
*
ufdev
,
__be32
inaddr
)
void
usnic_fwd_add_ipaddr
(
struct
usnic_fwd_dev
*
ufdev
,
__be32
inaddr
)
{
int
status
;
spin_lock
(
&
ufdev
->
lock
);
if
(
ufdev
->
inaddr
==
0
)
{
if
(
!
ufdev
->
inaddr
)
ufdev
->
inaddr
=
inaddr
;
status
=
0
;
}
else
{
status
=
-
EFAULT
;
}
spin_unlock
(
&
ufdev
->
lock
);
return
status
;
}
void
usnic_fwd_del_ipaddr
(
struct
usnic_fwd_dev
*
ufdev
)
...
...
drivers/infiniband/hw/usnic/usnic_fwd.h
浏览文件 @
a1139697
...
...
@@ -75,7 +75,7 @@ struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev);
void
usnic_fwd_dev_free
(
struct
usnic_fwd_dev
*
ufdev
);
void
usnic_fwd_set_mac
(
struct
usnic_fwd_dev
*
ufdev
,
char
mac
[
ETH_ALEN
]);
int
usnic_fwd_add_ipaddr
(
struct
usnic_fwd_dev
*
ufdev
,
__be32
inaddr
);
void
usnic_fwd_add_ipaddr
(
struct
usnic_fwd_dev
*
ufdev
,
__be32
inaddr
);
void
usnic_fwd_del_ipaddr
(
struct
usnic_fwd_dev
*
ufdev
);
void
usnic_fwd_carrier_up
(
struct
usnic_fwd_dev
*
ufdev
);
void
usnic_fwd_carrier_down
(
struct
usnic_fwd_dev
*
ufdev
);
...
...
drivers/infiniband/hw/usnic/usnic_ib_main.c
浏览文件 @
a1139697
...
...
@@ -351,7 +351,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
{
struct
usnic_ib_dev
*
us_ibdev
;
union
ib_gid
gid
;
struct
in_
ifaddr
*
in
;
struct
in_
device
*
ind
;
struct
net_device
*
netdev
;
usnic_dbg
(
"
\n
"
);
...
...
@@ -441,9 +441,11 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
if
(
netif_carrier_ok
(
us_ibdev
->
netdev
))
usnic_fwd_carrier_up
(
us_ibdev
->
ufdev
);
in
=
((
struct
in_device
*
)(
netdev
->
ip_ptr
))
->
ifa_list
;
if
(
in
!=
NULL
)
usnic_fwd_add_ipaddr
(
us_ibdev
->
ufdev
,
in
->
ifa_address
);
ind
=
in_dev_get
(
netdev
);
if
(
ind
->
ifa_list
)
usnic_fwd_add_ipaddr
(
us_ibdev
->
ufdev
,
ind
->
ifa_list
->
ifa_address
);
in_dev_put
(
ind
);
usnic_mac_ip_to_gid
(
us_ibdev
->
netdev
->
perm_addr
,
us_ibdev
->
ufdev
->
inaddr
,
&
gid
.
raw
[
0
]);
...
...
drivers/infiniband/sw/rxe/rxe_hw_counters.c
浏览文件 @
a1139697
...
...
@@ -33,7 +33,7 @@
#include "rxe.h"
#include "rxe_hw_counters.h"
const
char
*
const
rxe_counter_name
[]
=
{
static
const
char
*
const
rxe_counter_name
[]
=
{
[
RXE_CNT_SENT_PKTS
]
=
"sent_pkts"
,
[
RXE_CNT_RCVD_PKTS
]
=
"rcvd_pkts"
,
[
RXE_CNT_DUP_REQ
]
=
"duplicate_request"
,
...
...
drivers/infiniband/ulp/ipoib/ipoib.h
浏览文件 @
a1139697
...
...
@@ -337,6 +337,7 @@ struct ipoib_dev_priv {
struct
rw_semaphore
vlan_rwsem
;
struct
mutex
mcast_mutex
;
struct
mutex
sysfs_mutex
;
struct
rb_root
path_tree
;
struct
list_head
path_list
;
...
...
drivers/infiniband/ulp/ipoib/ipoib_cm.c
浏览文件 @
a1139697
...
...
@@ -1506,9 +1506,14 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
if
(
test_bit
(
IPOIB_FLAG_GOING_DOWN
,
&
priv
->
flags
))
return
-
EPERM
;
if
(
!
rtnl_trylock
(
))
if
(
!
mutex_trylock
(
&
priv
->
sysfs_mutex
))
return
restart_syscall
();
if
(
!
rtnl_trylock
())
{
mutex_unlock
(
&
priv
->
sysfs_mutex
);
return
restart_syscall
();
}
ret
=
ipoib_set_mode
(
dev
,
buf
);
/* The assumption is that the function ipoib_set_mode returned
...
...
@@ -1517,6 +1522,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
*/
if
(
ret
!=
-
EBUSY
)
rtnl_unlock
();
mutex_unlock
(
&
priv
->
sysfs_mutex
);
return
(
!
ret
||
ret
==
-
EBUSY
)
?
count
:
ret
;
}
...
...
drivers/infiniband/ulp/ipoib/ipoib_main.c
浏览文件 @
a1139697
...
...
@@ -1893,6 +1893,7 @@ static void ipoib_build_priv(struct net_device *dev)
spin_lock_init
(
&
priv
->
lock
);
init_rwsem
(
&
priv
->
vlan_rwsem
);
mutex_init
(
&
priv
->
mcast_mutex
);
mutex_init
(
&
priv
->
sysfs_mutex
);
INIT_LIST_HEAD
(
&
priv
->
path_list
);
INIT_LIST_HEAD
(
&
priv
->
child_intfs
);
...
...
@@ -2242,13 +2243,7 @@ static struct net_device *ipoib_add_port(const char *format,
INIT_IB_EVENT_HANDLER
(
&
priv
->
event_handler
,
priv
->
ca
,
ipoib_event
);
result
=
ib_register_event_handler
(
&
priv
->
event_handler
);
if
(
result
<
0
)
{
printk
(
KERN_WARNING
"%s: ib_register_event_handler failed for "
"port %d (ret = %d)
\n
"
,
hca
->
name
,
port
,
result
);
goto
event_failed
;
}
ib_register_event_handler
(
&
priv
->
event_handler
);
result
=
register_netdev
(
priv
->
dev
);
if
(
result
)
{
...
...
@@ -2281,8 +2276,6 @@ static struct net_device *ipoib_add_port(const char *format,
set_bit
(
IPOIB_STOP_NEIGH_GC
,
&
priv
->
flags
);
cancel_delayed_work
(
&
priv
->
neigh_reap_task
);
flush_workqueue
(
priv
->
wq
);
event_failed:
ipoib_dev_cleanup
(
priv
->
dev
);
device_init_failed:
...
...
@@ -2352,7 +2345,11 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
cancel_delayed_work
(
&
priv
->
neigh_reap_task
);
flush_workqueue
(
priv
->
wq
);
/* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */
mutex_lock
(
&
priv
->
sysfs_mutex
);
unregister_netdev
(
priv
->
dev
);
mutex_unlock
(
&
priv
->
sysfs_mutex
);
rn
->
free_rdma_netdev
(
priv
->
dev
);
list_for_each_entry_safe
(
cpriv
,
tcpriv
,
&
priv
->
child_intfs
,
list
)
...
...
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
浏览文件 @
a1139697
...
...
@@ -133,12 +133,20 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
snprintf
(
intf_name
,
sizeof
intf_name
,
"%s.%04x"
,
ppriv
->
dev
->
name
,
pkey
);
if
(
!
rtnl_trylock
(
))
if
(
!
mutex_trylock
(
&
ppriv
->
sysfs_mutex
))
return
restart_syscall
();
if
(
!
rtnl_trylock
())
{
mutex_unlock
(
&
ppriv
->
sysfs_mutex
);
return
restart_syscall
();
}
priv
=
ipoib_intf_alloc
(
ppriv
->
ca
,
ppriv
->
port
,
intf_name
);
if
(
!
priv
)
if
(
!
priv
)
{
rtnl_unlock
();
mutex_unlock
(
&
ppriv
->
sysfs_mutex
);
return
-
ENOMEM
;
}
down_write
(
&
ppriv
->
vlan_rwsem
);
...
...
@@ -164,8 +172,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
out:
up_write
(
&
ppriv
->
vlan_rwsem
);
rtnl_unlock
();
mutex_unlock
(
&
ppriv
->
sysfs_mutex
);
if
(
result
)
{
free_netdev
(
priv
->
dev
);
...
...
@@ -188,8 +196,13 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
if
(
test_bit
(
IPOIB_FLAG_GOING_DOWN
,
&
ppriv
->
flags
))
return
-
EPERM
;
if
(
!
rtnl_trylock
())
if
(
!
mutex_trylock
(
&
ppriv
->
sysfs_mutex
))
return
restart_syscall
();
if
(
!
rtnl_trylock
())
{
mutex_unlock
(
&
ppriv
->
sysfs_mutex
);
return
restart_syscall
();
}
down_write
(
&
ppriv
->
vlan_rwsem
);
list_for_each_entry_safe
(
priv
,
tpriv
,
&
ppriv
->
child_intfs
,
list
)
{
...
...
@@ -208,6 +221,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
}
rtnl_unlock
();
mutex_unlock
(
&
ppriv
->
sysfs_mutex
);
if
(
dev
)
{
free_netdev
(
dev
);
...
...
drivers/infiniband/ulp/iser/iser_verbs.c
浏览文件 @
a1139697
...
...
@@ -106,9 +106,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
INIT_IB_EVENT_HANDLER
(
&
device
->
event_handler
,
ib_dev
,
iser_event_handler
);
if
(
ib_register_event_handler
(
&
device
->
event_handler
))
goto
cq_err
;
ib_register_event_handler
(
&
device
->
event_handler
);
return
0
;
cq_err:
...
...
@@ -141,7 +139,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
comp
->
cq
=
NULL
;
}
(
void
)
ib_unregister_event_handler
(
&
device
->
event_handler
);
ib_unregister_event_handler
(
&
device
->
event_handler
);
ib_dealloc_pd
(
device
->
pd
);
kfree
(
device
->
comps
);
...
...
drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
浏览文件 @
a1139697
...
...
@@ -954,12 +954,7 @@ static int vema_register(struct opa_vnic_ctrl_port *cport)
INIT_IB_EVENT_HANDLER
(
&
port
->
event_handler
,
cport
->
ibdev
,
opa_vnic_event
);
ret
=
ib_register_event_handler
(
&
port
->
event_handler
);
if
(
ret
)
{
c_err
(
"port %d: event handler register failed
\n
"
,
i
);
vema_unregister
(
cport
);
return
ret
;
}
ib_register_event_handler
(
&
port
->
event_handler
);
idr_init
(
&
port
->
vport_idr
);
mutex_init
(
&
port
->
lock
);
...
...
drivers/infiniband/ulp/srpt/ib_srpt.c
浏览文件 @
a1139697
...
...
@@ -2238,7 +2238,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
cqe
,
first_wr
);
cqe
=
NULL
;
}
ret
=
ib_post_send
(
ch
->
qp
,
first_wr
,
&
bad_wr
);
if
(
ret
)
{
pr_err
(
"%s: ib_post_send() returned %d for %d (avail: %d)
\n
"
,
...
...
@@ -2530,8 +2530,7 @@ static void srpt_add_one(struct ib_device *device)
INIT_IB_EVENT_HANDLER
(
&
sdev
->
event_handler
,
sdev
->
device
,
srpt_event_handler
);
if
(
ib_register_event_handler
(
&
sdev
->
event_handler
))
goto
err_cm
;
ib_register_event_handler
(
&
sdev
->
event_handler
);
sdev
->
ioctx_ring
=
(
struct
srpt_recv_ioctx
**
)
srpt_alloc_ioctx_ring
(
sdev
,
sdev
->
srq_size
,
...
...
include/linux/mlx5/driver.h
浏览文件 @
a1139697
...
...
@@ -1093,7 +1093,7 @@ enum {
};
enum
{
M
AX_UMR_CACHE
_ENTRY
=
20
,
M
R_CACHE_LAST_STD
_ENTRY
=
20
,
MLX5_IMR_MTT_CACHE_ENTRY
,
MLX5_IMR_KSM_CACHE_ENTRY
,
MAX_MR_CACHE_ENTRIES
...
...
include/linux/mlx5/mlx5_ifc.h
浏览文件 @
a1139697
...
...
@@ -295,8 +295,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8
inner_tcp_dport
[
0x1
];
u8
inner_tcp_flags
[
0x1
];
u8
reserved_at_37
[
0x9
];
u8
reserved_at_40
[
0x1a
];
u8
bth_dst_qp
[
0x1
];
u8
reserved_at_
40
[
0x40
];
u8
reserved_at_
5b
[
0x25
];
};
struct
mlx5_ifc_flow_table_prop_layout_bits
{
...
...
@@ -432,7 +434,9 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8
reserved_at_100
[
0xc
];
u8
inner_ipv6_flow_label
[
0x14
];
u8
reserved_at_120
[
0xe0
];
u8
reserved_at_120
[
0x28
];
u8
bth_dst_qp
[
0x18
];
u8
reserved_at_160
[
0xa0
];
};
struct
mlx5_ifc_cmd_pas_bits
{
...
...
@@ -600,7 +604,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8
rss_ind_tbl_cap
[
0x4
];
u8
reg_umr_sq
[
0x1
];
u8
scatter_fcs
[
0x1
];
u8
reserved_at_1a
[
0x1
];
u8
enhanced_multi_pkt_send_wqe
[
0x1
];
u8
tunnel_lso_const_out_ip_id
[
0x1
];
u8
reserved_at_1c
[
0x2
];
u8
tunnel_statless_gre
[
0x1
];
...
...
@@ -2441,7 +2445,7 @@ struct mlx5_ifc_sqc_bits {
u8
cd_master
[
0x1
];
u8
fre
[
0x1
];
u8
flush_in_error_en
[
0x1
];
u8
reserved_at_4
[
0x1
];
u8
allow_multi_pkt_send_wqe
[
0x1
];
u8
min_wqe_inline_mode
[
0x3
];
u8
state
[
0x4
];
u8
reg_umr
[
0x1
];
...
...
include/rdma/ib_verbs.h
浏览文件 @
a1139697
...
...
@@ -170,7 +170,7 @@ enum ib_device_cap_flags {
IB_DEVICE_UD_AV_PORT_ENFORCE
=
(
1
<<
6
),
IB_DEVICE_CURR_QP_STATE_MOD
=
(
1
<<
7
),
IB_DEVICE_SHUTDOWN_PORT
=
(
1
<<
8
),
IB_DEVICE_INIT_TYPE
=
(
1
<<
9
),
/* Not in use, former INIT_TYPE = (1 << 9),*/
IB_DEVICE_PORT_ACTIVE_EVENT
=
(
1
<<
10
),
IB_DEVICE_SYS_IMAGE_GUID
=
(
1
<<
11
),
IB_DEVICE_RC_RNR_NAK_GEN
=
(
1
<<
12
),
...
...
@@ -185,7 +185,7 @@ enum ib_device_cap_flags {
* which will always contain a usable lkey.
*/
IB_DEVICE_LOCAL_DMA_LKEY
=
(
1
<<
15
),
IB_DEVICE_RESERVED
/* old SEND_W_INV */
=
(
1
<<
16
),
/* Reserved, old SEND_W_INV = (1 << 16),*/
IB_DEVICE_MEM_WINDOW
=
(
1
<<
17
),
/*
* Devices should set IB_DEVICE_UD_IP_SUM if they support
...
...
@@ -220,7 +220,7 @@ enum ib_device_cap_flags {
* of I/O operations with single completion queue managed
* by hardware.
*/
IB_DEVICE_CROSS_CHANNEL
=
(
1
<<
27
),
IB_DEVICE_CROSS_CHANNEL
=
(
1
<<
27
),
IB_DEVICE_MANAGED_FLOW_STEERING
=
(
1
<<
29
),
IB_DEVICE_SIGNATURE_HANDOVER
=
(
1
<<
30
),
IB_DEVICE_ON_DEMAND_PAGING
=
(
1ULL
<<
31
),
...
...
@@ -2413,8 +2413,8 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
enum
ib_qp_type
type
,
enum
ib_qp_attr_mask
mask
,
enum
rdma_link_layer
ll
);
int
ib_register_event_handler
(
struct
ib_event_handler
*
event_handler
);
int
ib_unregister_event_handler
(
struct
ib_event_handler
*
event_handler
);
void
ib_register_event_handler
(
struct
ib_event_handler
*
event_handler
);
void
ib_unregister_event_handler
(
struct
ib_event_handler
*
event_handler
);
void
ib_dispatch_event
(
struct
ib_event
*
event
);
int
ib_query_port
(
struct
ib_device
*
device
,
...
...
include/uapi/rdma/mlx4-abi.h
浏览文件 @
a1139697
...
...
@@ -98,8 +98,7 @@ struct mlx4_ib_create_srq_resp {
struct
mlx4_ib_create_qp_rss
{
__u64
rx_hash_fields_mask
;
__u8
rx_hash_function
;
__u8
rx_key_len
;
__u8
reserved
[
6
];
__u8
reserved
[
7
];
__u8
rx_hash_key
[
40
];
__u32
comp_mask
;
__u32
reserved1
;
...
...
@@ -111,8 +110,8 @@ struct mlx4_ib_create_qp {
__u8
log_sq_bb_count
;
__u8
log_sq_stride
;
__u8
sq_no_prefetch
;
__u32
inl_recv_sz
;
__u8
reserved
;
__u32
inl_recv_sz
;
};
struct
mlx4_ib_create_wq
{
...
...
@@ -121,7 +120,6 @@ struct mlx4_ib_create_wq {
__u8
log_range_size
;
__u8
reserved
[
3
];
__u32
comp_mask
;
__u32
reserved1
;
};
struct
mlx4_ib_modify_wq
{
...
...
include/uapi/rdma/mlx5-abi.h
浏览文件 @
a1139697
...
...
@@ -168,6 +168,28 @@ struct mlx5_packet_pacing_caps {
__u32
reserved
;
};
enum
mlx5_ib_mpw_caps
{
MPW_RESERVED
=
1
<<
0
,
MLX5_IB_ALLOW_MPW
=
1
<<
1
,
MLX5_IB_SUPPORT_EMPW
=
1
<<
2
,
};
enum
mlx5_ib_sw_parsing_offloads
{
MLX5_IB_SW_PARSING
=
1
<<
0
,
MLX5_IB_SW_PARSING_CSUM
=
1
<<
1
,
MLX5_IB_SW_PARSING_LSO
=
1
<<
2
,
};
struct
mlx5_ib_sw_parsing_caps
{
__u32
sw_parsing_offloads
;
/* enum mlx5_ib_sw_parsing_offloads */
/* Corresponding bit will be set if qp type from
* 'enum ib_qp_type' is supported, e.g.
* supported_qpts |= 1 << IB_QPT_RAW_PACKET
*/
__u32
supported_qpts
;
};
struct
mlx5_ib_query_device_resp
{
__u32
comp_mask
;
__u32
response_length
;
...
...
@@ -177,6 +199,7 @@ struct mlx5_ib_query_device_resp {
struct
mlx5_packet_pacing_caps
packet_pacing_caps
;
__u32
mlx5_ib_support_multi_pkt_send_wqes
;
__u32
reserved
;
struct
mlx5_ib_sw_parsing_caps
sw_parsing_caps
;
};
struct
mlx5_ib_create_cq
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录