Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
175e8efe
cloud-kernel
项目概览
openanolis
/
cloud-kernel
接近 2 年 前同步成功
通知
169
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
175e8efe
编写于
5月 20, 2015
作者:
D
Doug Ledford
浏览文件
操作
浏览文件
下载
差异文件
Merge branches 'bart-srp', 'generic-errors', 'ira-cleanups' and 'mwang-v8' into k.o/for-4.2
上级
985aa495
3c88f3dc
f766c58f
5d9fb044
变更
48
隐藏空白更改
内联
并排
Showing
48 changed file
with
1008 addition
and
644 deletion
+1008
-644
drivers/infiniband/core/agent.c
drivers/infiniband/core/agent.c
+1
-1
drivers/infiniband/core/cache.c
drivers/infiniband/core/cache.c
+25
-36
drivers/infiniband/core/cm.c
drivers/infiniband/core/cm.c
+17
-3
drivers/infiniband/core/cma.c
drivers/infiniband/core/cma.c
+128
-157
drivers/infiniband/core/device.c
drivers/infiniband/core/device.c
+31
-50
drivers/infiniband/core/mad.c
drivers/infiniband/core/mad.c
+51
-51
drivers/infiniband/core/mad_priv.h
drivers/infiniband/core/mad_priv.h
+2
-2
drivers/infiniband/core/multicast.c
drivers/infiniband/core/multicast.c
+3
-9
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/sa_query.c
+17
-13
drivers/infiniband/core/smi.c
drivers/infiniband/core/smi.c
+2
-2
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/sysfs.c
+1
-0
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucm.c
+1
-2
drivers/infiniband/core/ucma.c
drivers/infiniband/core/ucma.c
+6
-19
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/user_mad.c
+24
-20
drivers/infiniband/core/verbs.c
drivers/infiniband/core/verbs.c
+67
-4
drivers/infiniband/hw/amso1100/c2_provider.c
drivers/infiniband/hw/amso1100/c2_provider.c
+18
-0
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
+18
-0
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/provider.c
+18
-0
drivers/infiniband/hw/ehca/ehca_iverbs.h
drivers/infiniband/hw/ehca/ehca_iverbs.h
+3
-0
drivers/infiniband/hw/ehca/ehca_main.c
drivers/infiniband/hw/ehca/ehca_main.c
+18
-0
drivers/infiniband/hw/ipath/ipath_verbs.c
drivers/infiniband/hw/ipath/ipath_verbs.c
+18
-0
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/main.c
+22
-0
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/main.c
+18
-0
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_provider.c
+18
-0
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/nes/nes_verbs.c
+17
-1
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
+18
-0
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+3
-0
drivers/infiniband/hw/qib/qib_verbs.c
drivers/infiniband/hw/qib/qib_verbs.c
+18
-0
drivers/infiniband/hw/usnic/usnic_ib_main.c
drivers/infiniband/hw/usnic/usnic_ib_main.c
+17
-0
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+2
-0
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
+8
-7
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/iser/iser_verbs.c
+18
-10
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.c
+13
-6
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srp/ib_srp.c
+9
-7
include/rdma/ib_mad.h
include/rdma/ib_mad.h
+2
-2
include/rdma/ib_verbs.h
include/rdma/ib_verbs.h
+299
-2
include/rdma/rdma_cm.h
include/rdma/rdma_cm.h
+2
-0
net/rds/af_rds.c
net/rds/af_rds.c
+0
-9
net/rds/ib.h
net/rds/ib.h
+0
-1
net/rds/ib_cm.c
net/rds/ib_cm.c
+3
-33
net/rds/ib_recv.c
net/rds/ib_recv.c
+2
-2
net/rds/ib_send.c
net/rds/ib_send.c
+2
-36
net/rds/rdma_transport.c
net/rds/rdma_transport.c
+3
-31
net/rds/rds.h
net/rds/rds.h
+0
-1
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/frwr_ops.c
+2
-2
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+2
-2
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
+36
-38
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/verbs.c
+5
-85
未找到文件。
drivers/infiniband/core/agent.c
浏览文件 @
175e8efe
...
@@ -156,7 +156,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
...
@@ -156,7 +156,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
goto
error1
;
goto
error1
;
}
}
if
(
rdma_
port_get_link_layer
(
device
,
port_num
)
==
IB_LINK_LAYER_INFINIBAND
)
{
if
(
rdma_
cap_ib_smi
(
device
,
port_num
)
)
{
/* Obtain send only MAD agent for SMI QP */
/* Obtain send only MAD agent for SMI QP */
port_priv
->
agent
[
0
]
=
ib_register_mad_agent
(
device
,
port_num
,
port_priv
->
agent
[
0
]
=
ib_register_mad_agent
(
device
,
port_num
,
IB_QPT_SMI
,
NULL
,
0
,
IB_QPT_SMI
,
NULL
,
0
,
...
...
drivers/infiniband/core/cache.c
浏览文件 @
175e8efe
...
@@ -58,17 +58,6 @@ struct ib_update_work {
...
@@ -58,17 +58,6 @@ struct ib_update_work {
u8
port_num
;
u8
port_num
;
};
};
static
inline
int
start_port
(
struct
ib_device
*
device
)
{
return
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
?
0
:
1
;
}
static
inline
int
end_port
(
struct
ib_device
*
device
)
{
return
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
?
0
:
device
->
phys_port_cnt
;
}
int
ib_get_cached_gid
(
struct
ib_device
*
device
,
int
ib_get_cached_gid
(
struct
ib_device
*
device
,
u8
port_num
,
u8
port_num
,
int
index
,
int
index
,
...
@@ -78,12 +67,12 @@ int ib_get_cached_gid(struct ib_device *device,
...
@@ -78,12 +67,12 @@ int ib_get_cached_gid(struct ib_device *device,
unsigned
long
flags
;
unsigned
long
flags
;
int
ret
=
0
;
int
ret
=
0
;
if
(
port_num
<
start_port
(
device
)
||
port_num
>
end_port
(
device
))
if
(
port_num
<
rdma_start_port
(
device
)
||
port_num
>
rdma_
end_port
(
device
))
return
-
EINVAL
;
return
-
EINVAL
;
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
cache
=
device
->
cache
.
gid_cache
[
port_num
-
start_port
(
device
)];
cache
=
device
->
cache
.
gid_cache
[
port_num
-
rdma_
start_port
(
device
)];
if
(
index
<
0
||
index
>=
cache
->
table_len
)
if
(
index
<
0
||
index
>=
cache
->
table_len
)
ret
=
-
EINVAL
;
ret
=
-
EINVAL
;
...
@@ -112,11 +101,11 @@ int ib_find_cached_gid(struct ib_device *device,
...
@@ -112,11 +101,11 @@ int ib_find_cached_gid(struct ib_device *device,
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
for
(
p
=
0
;
p
<=
end_port
(
device
)
-
start_port
(
device
);
++
p
)
{
for
(
p
=
0
;
p
<=
rdma_end_port
(
device
)
-
rdma_
start_port
(
device
);
++
p
)
{
cache
=
device
->
cache
.
gid_cache
[
p
];
cache
=
device
->
cache
.
gid_cache
[
p
];
for
(
i
=
0
;
i
<
cache
->
table_len
;
++
i
)
{
for
(
i
=
0
;
i
<
cache
->
table_len
;
++
i
)
{
if
(
!
memcmp
(
gid
,
&
cache
->
table
[
i
],
sizeof
*
gid
))
{
if
(
!
memcmp
(
gid
,
&
cache
->
table
[
i
],
sizeof
*
gid
))
{
*
port_num
=
p
+
start_port
(
device
);
*
port_num
=
p
+
rdma_
start_port
(
device
);
if
(
index
)
if
(
index
)
*
index
=
i
;
*
index
=
i
;
ret
=
0
;
ret
=
0
;
...
@@ -140,12 +129,12 @@ int ib_get_cached_pkey(struct ib_device *device,
...
@@ -140,12 +129,12 @@ int ib_get_cached_pkey(struct ib_device *device,
unsigned
long
flags
;
unsigned
long
flags
;
int
ret
=
0
;
int
ret
=
0
;
if
(
port_num
<
start_port
(
device
)
||
port_num
>
end_port
(
device
))
if
(
port_num
<
rdma_start_port
(
device
)
||
port_num
>
rdma_
end_port
(
device
))
return
-
EINVAL
;
return
-
EINVAL
;
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
cache
=
device
->
cache
.
pkey_cache
[
port_num
-
start_port
(
device
)];
cache
=
device
->
cache
.
pkey_cache
[
port_num
-
rdma_
start_port
(
device
)];
if
(
index
<
0
||
index
>=
cache
->
table_len
)
if
(
index
<
0
||
index
>=
cache
->
table_len
)
ret
=
-
EINVAL
;
ret
=
-
EINVAL
;
...
@@ -169,12 +158,12 @@ int ib_find_cached_pkey(struct ib_device *device,
...
@@ -169,12 +158,12 @@ int ib_find_cached_pkey(struct ib_device *device,
int
ret
=
-
ENOENT
;
int
ret
=
-
ENOENT
;
int
partial_ix
=
-
1
;
int
partial_ix
=
-
1
;
if
(
port_num
<
start_port
(
device
)
||
port_num
>
end_port
(
device
))
if
(
port_num
<
rdma_start_port
(
device
)
||
port_num
>
rdma_
end_port
(
device
))
return
-
EINVAL
;
return
-
EINVAL
;
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
cache
=
device
->
cache
.
pkey_cache
[
port_num
-
start_port
(
device
)];
cache
=
device
->
cache
.
pkey_cache
[
port_num
-
rdma_
start_port
(
device
)];
*
index
=
-
1
;
*
index
=
-
1
;
...
@@ -209,12 +198,12 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
...
@@ -209,12 +198,12 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
int
i
;
int
i
;
int
ret
=
-
ENOENT
;
int
ret
=
-
ENOENT
;
if
(
port_num
<
start_port
(
device
)
||
port_num
>
end_port
(
device
))
if
(
port_num
<
rdma_start_port
(
device
)
||
port_num
>
rdma_
end_port
(
device
))
return
-
EINVAL
;
return
-
EINVAL
;
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
cache
=
device
->
cache
.
pkey_cache
[
port_num
-
start_port
(
device
)];
cache
=
device
->
cache
.
pkey_cache
[
port_num
-
rdma_
start_port
(
device
)];
*
index
=
-
1
;
*
index
=
-
1
;
...
@@ -238,11 +227,11 @@ int ib_get_cached_lmc(struct ib_device *device,
...
@@ -238,11 +227,11 @@ int ib_get_cached_lmc(struct ib_device *device,
unsigned
long
flags
;
unsigned
long
flags
;
int
ret
=
0
;
int
ret
=
0
;
if
(
port_num
<
start_port
(
device
)
||
port_num
>
end_port
(
device
))
if
(
port_num
<
rdma_start_port
(
device
)
||
port_num
>
rdma_
end_port
(
device
))
return
-
EINVAL
;
return
-
EINVAL
;
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
read_lock_irqsave
(
&
device
->
cache
.
lock
,
flags
);
*
lmc
=
device
->
cache
.
lmc_cache
[
port_num
-
start_port
(
device
)];
*
lmc
=
device
->
cache
.
lmc_cache
[
port_num
-
rdma_
start_port
(
device
)];
read_unlock_irqrestore
(
&
device
->
cache
.
lock
,
flags
);
read_unlock_irqrestore
(
&
device
->
cache
.
lock
,
flags
);
return
ret
;
return
ret
;
...
@@ -303,13 +292,13 @@ static void ib_cache_update(struct ib_device *device,
...
@@ -303,13 +292,13 @@ static void ib_cache_update(struct ib_device *device,
write_lock_irq
(
&
device
->
cache
.
lock
);
write_lock_irq
(
&
device
->
cache
.
lock
);
old_pkey_cache
=
device
->
cache
.
pkey_cache
[
port
-
start_port
(
device
)];
old_pkey_cache
=
device
->
cache
.
pkey_cache
[
port
-
rdma_
start_port
(
device
)];
old_gid_cache
=
device
->
cache
.
gid_cache
[
port
-
start_port
(
device
)];
old_gid_cache
=
device
->
cache
.
gid_cache
[
port
-
rdma_
start_port
(
device
)];
device
->
cache
.
pkey_cache
[
port
-
start_port
(
device
)]
=
pkey_cache
;
device
->
cache
.
pkey_cache
[
port
-
rdma_
start_port
(
device
)]
=
pkey_cache
;
device
->
cache
.
gid_cache
[
port
-
start_port
(
device
)]
=
gid_cache
;
device
->
cache
.
gid_cache
[
port
-
rdma_
start_port
(
device
)]
=
gid_cache
;
device
->
cache
.
lmc_cache
[
port
-
start_port
(
device
)]
=
tprops
->
lmc
;
device
->
cache
.
lmc_cache
[
port
-
rdma_
start_port
(
device
)]
=
tprops
->
lmc
;
write_unlock_irq
(
&
device
->
cache
.
lock
);
write_unlock_irq
(
&
device
->
cache
.
lock
);
...
@@ -363,14 +352,14 @@ static void ib_cache_setup_one(struct ib_device *device)
...
@@ -363,14 +352,14 @@ static void ib_cache_setup_one(struct ib_device *device)
device
->
cache
.
pkey_cache
=
device
->
cache
.
pkey_cache
=
kmalloc
(
sizeof
*
device
->
cache
.
pkey_cache
*
kmalloc
(
sizeof
*
device
->
cache
.
pkey_cache
*
(
end_port
(
device
)
-
start_port
(
device
)
+
1
),
GFP_KERNEL
);
(
rdma_end_port
(
device
)
-
rdma_
start_port
(
device
)
+
1
),
GFP_KERNEL
);
device
->
cache
.
gid_cache
=
device
->
cache
.
gid_cache
=
kmalloc
(
sizeof
*
device
->
cache
.
gid_cache
*
kmalloc
(
sizeof
*
device
->
cache
.
gid_cache
*
(
end_port
(
device
)
-
start_port
(
device
)
+
1
),
GFP_KERNEL
);
(
rdma_end_port
(
device
)
-
rdma_
start_port
(
device
)
+
1
),
GFP_KERNEL
);
device
->
cache
.
lmc_cache
=
kmalloc
(
sizeof
*
device
->
cache
.
lmc_cache
*
device
->
cache
.
lmc_cache
=
kmalloc
(
sizeof
*
device
->
cache
.
lmc_cache
*
(
end_port
(
device
)
-
(
rdma_
end_port
(
device
)
-
start_port
(
device
)
+
1
),
rdma_
start_port
(
device
)
+
1
),
GFP_KERNEL
);
GFP_KERNEL
);
if
(
!
device
->
cache
.
pkey_cache
||
!
device
->
cache
.
gid_cache
||
if
(
!
device
->
cache
.
pkey_cache
||
!
device
->
cache
.
gid_cache
||
...
@@ -380,10 +369,10 @@ static void ib_cache_setup_one(struct ib_device *device)
...
@@ -380,10 +369,10 @@ static void ib_cache_setup_one(struct ib_device *device)
goto
err
;
goto
err
;
}
}
for
(
p
=
0
;
p
<=
end_port
(
device
)
-
start_port
(
device
);
++
p
)
{
for
(
p
=
0
;
p
<=
rdma_end_port
(
device
)
-
rdma_
start_port
(
device
);
++
p
)
{
device
->
cache
.
pkey_cache
[
p
]
=
NULL
;
device
->
cache
.
pkey_cache
[
p
]
=
NULL
;
device
->
cache
.
gid_cache
[
p
]
=
NULL
;
device
->
cache
.
gid_cache
[
p
]
=
NULL
;
ib_cache_update
(
device
,
p
+
start_port
(
device
));
ib_cache_update
(
device
,
p
+
rdma_
start_port
(
device
));
}
}
INIT_IB_EVENT_HANDLER
(
&
device
->
cache
.
event_handler
,
INIT_IB_EVENT_HANDLER
(
&
device
->
cache
.
event_handler
,
...
@@ -394,7 +383,7 @@ static void ib_cache_setup_one(struct ib_device *device)
...
@@ -394,7 +383,7 @@ static void ib_cache_setup_one(struct ib_device *device)
return
;
return
;
err_cache:
err_cache:
for
(
p
=
0
;
p
<=
end_port
(
device
)
-
start_port
(
device
);
++
p
)
{
for
(
p
=
0
;
p
<=
rdma_end_port
(
device
)
-
rdma_
start_port
(
device
);
++
p
)
{
kfree
(
device
->
cache
.
pkey_cache
[
p
]);
kfree
(
device
->
cache
.
pkey_cache
[
p
]);
kfree
(
device
->
cache
.
gid_cache
[
p
]);
kfree
(
device
->
cache
.
gid_cache
[
p
]);
}
}
...
@@ -412,7 +401,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
...
@@ -412,7 +401,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
ib_unregister_event_handler
(
&
device
->
cache
.
event_handler
);
ib_unregister_event_handler
(
&
device
->
cache
.
event_handler
);
flush_workqueue
(
ib_wq
);
flush_workqueue
(
ib_wq
);
for
(
p
=
0
;
p
<=
end_port
(
device
)
-
start_port
(
device
);
++
p
)
{
for
(
p
=
0
;
p
<=
rdma_end_port
(
device
)
-
rdma_
start_port
(
device
);
++
p
)
{
kfree
(
device
->
cache
.
pkey_cache
[
p
]);
kfree
(
device
->
cache
.
pkey_cache
[
p
]);
kfree
(
device
->
cache
.
gid_cache
[
p
]);
kfree
(
device
->
cache
.
gid_cache
[
p
]);
}
}
...
...
drivers/infiniband/core/cm.c
浏览文件 @
175e8efe
...
@@ -3759,11 +3759,9 @@ static void cm_add_one(struct ib_device *ib_device)
...
@@ -3759,11 +3759,9 @@ static void cm_add_one(struct ib_device *ib_device)
};
};
unsigned
long
flags
;
unsigned
long
flags
;
int
ret
;
int
ret
;
int
count
=
0
;
u8
i
;
u8
i
;
if
(
rdma_node_get_transport
(
ib_device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
return
;
cm_dev
=
kzalloc
(
sizeof
(
*
cm_dev
)
+
sizeof
(
*
port
)
*
cm_dev
=
kzalloc
(
sizeof
(
*
cm_dev
)
+
sizeof
(
*
port
)
*
ib_device
->
phys_port_cnt
,
GFP_KERNEL
);
ib_device
->
phys_port_cnt
,
GFP_KERNEL
);
if
(
!
cm_dev
)
if
(
!
cm_dev
)
...
@@ -3782,6 +3780,9 @@ static void cm_add_one(struct ib_device *ib_device)
...
@@ -3782,6 +3780,9 @@ static void cm_add_one(struct ib_device *ib_device)
set_bit
(
IB_MGMT_METHOD_SEND
,
reg_req
.
method_mask
);
set_bit
(
IB_MGMT_METHOD_SEND
,
reg_req
.
method_mask
);
for
(
i
=
1
;
i
<=
ib_device
->
phys_port_cnt
;
i
++
)
{
for
(
i
=
1
;
i
<=
ib_device
->
phys_port_cnt
;
i
++
)
{
if
(
!
rdma_cap_ib_cm
(
ib_device
,
i
))
continue
;
port
=
kzalloc
(
sizeof
*
port
,
GFP_KERNEL
);
port
=
kzalloc
(
sizeof
*
port
,
GFP_KERNEL
);
if
(
!
port
)
if
(
!
port
)
goto
error1
;
goto
error1
;
...
@@ -3808,7 +3809,13 @@ static void cm_add_one(struct ib_device *ib_device)
...
@@ -3808,7 +3809,13 @@ static void cm_add_one(struct ib_device *ib_device)
ret
=
ib_modify_port
(
ib_device
,
i
,
0
,
&
port_modify
);
ret
=
ib_modify_port
(
ib_device
,
i
,
0
,
&
port_modify
);
if
(
ret
)
if
(
ret
)
goto
error3
;
goto
error3
;
count
++
;
}
}
if
(
!
count
)
goto
free
;
ib_set_client_data
(
ib_device
,
&
cm_client
,
cm_dev
);
ib_set_client_data
(
ib_device
,
&
cm_client
,
cm_dev
);
write_lock_irqsave
(
&
cm
.
device_lock
,
flags
);
write_lock_irqsave
(
&
cm
.
device_lock
,
flags
);
...
@@ -3824,11 +3831,15 @@ static void cm_add_one(struct ib_device *ib_device)
...
@@ -3824,11 +3831,15 @@ static void cm_add_one(struct ib_device *ib_device)
port_modify
.
set_port_cap_mask
=
0
;
port_modify
.
set_port_cap_mask
=
0
;
port_modify
.
clr_port_cap_mask
=
IB_PORT_CM_SUP
;
port_modify
.
clr_port_cap_mask
=
IB_PORT_CM_SUP
;
while
(
--
i
)
{
while
(
--
i
)
{
if
(
!
rdma_cap_ib_cm
(
ib_device
,
i
))
continue
;
port
=
cm_dev
->
port
[
i
-
1
];
port
=
cm_dev
->
port
[
i
-
1
];
ib_modify_port
(
ib_device
,
port
->
port_num
,
0
,
&
port_modify
);
ib_modify_port
(
ib_device
,
port
->
port_num
,
0
,
&
port_modify
);
ib_unregister_mad_agent
(
port
->
mad_agent
);
ib_unregister_mad_agent
(
port
->
mad_agent
);
cm_remove_port_fs
(
port
);
cm_remove_port_fs
(
port
);
}
}
free:
device_unregister
(
cm_dev
->
device
);
device_unregister
(
cm_dev
->
device
);
kfree
(
cm_dev
);
kfree
(
cm_dev
);
}
}
...
@@ -3852,6 +3863,9 @@ static void cm_remove_one(struct ib_device *ib_device)
...
@@ -3852,6 +3863,9 @@ static void cm_remove_one(struct ib_device *ib_device)
write_unlock_irqrestore
(
&
cm
.
device_lock
,
flags
);
write_unlock_irqrestore
(
&
cm
.
device_lock
,
flags
);
for
(
i
=
1
;
i
<=
ib_device
->
phys_port_cnt
;
i
++
)
{
for
(
i
=
1
;
i
<=
ib_device
->
phys_port_cnt
;
i
++
)
{
if
(
!
rdma_cap_ib_cm
(
ib_device
,
i
))
continue
;
port
=
cm_dev
->
port
[
i
-
1
];
port
=
cm_dev
->
port
[
i
-
1
];
ib_modify_port
(
ib_device
,
port
->
port_num
,
0
,
&
port_modify
);
ib_modify_port
(
ib_device
,
port
->
port_num
,
0
,
&
port_modify
);
ib_unregister_mad_agent
(
port
->
mad_agent
);
ib_unregister_mad_agent
(
port
->
mad_agent
);
...
...
drivers/infiniband/core/cma.c
浏览文件 @
175e8efe
...
@@ -65,6 +65,34 @@ MODULE_LICENSE("Dual BSD/GPL");
...
@@ -65,6 +65,34 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18
#define CMA_IBOE_PACKET_LIFETIME 18
static
const
char
*
const
cma_events
[]
=
{
[
RDMA_CM_EVENT_ADDR_RESOLVED
]
=
"address resolved"
,
[
RDMA_CM_EVENT_ADDR_ERROR
]
=
"address error"
,
[
RDMA_CM_EVENT_ROUTE_RESOLVED
]
=
"route resolved "
,
[
RDMA_CM_EVENT_ROUTE_ERROR
]
=
"route error"
,
[
RDMA_CM_EVENT_CONNECT_REQUEST
]
=
"connect request"
,
[
RDMA_CM_EVENT_CONNECT_RESPONSE
]
=
"connect response"
,
[
RDMA_CM_EVENT_CONNECT_ERROR
]
=
"connect error"
,
[
RDMA_CM_EVENT_UNREACHABLE
]
=
"unreachable"
,
[
RDMA_CM_EVENT_REJECTED
]
=
"rejected"
,
[
RDMA_CM_EVENT_ESTABLISHED
]
=
"established"
,
[
RDMA_CM_EVENT_DISCONNECTED
]
=
"disconnected"
,
[
RDMA_CM_EVENT_DEVICE_REMOVAL
]
=
"device removal"
,
[
RDMA_CM_EVENT_MULTICAST_JOIN
]
=
"multicast join"
,
[
RDMA_CM_EVENT_MULTICAST_ERROR
]
=
"multicast error"
,
[
RDMA_CM_EVENT_ADDR_CHANGE
]
=
"address change"
,
[
RDMA_CM_EVENT_TIMEWAIT_EXIT
]
=
"timewait exit"
,
};
const
char
*
rdma_event_msg
(
enum
rdma_cm_event_type
event
)
{
size_t
index
=
event
;
return
(
index
<
ARRAY_SIZE
(
cma_events
)
&&
cma_events
[
index
])
?
cma_events
[
index
]
:
"unrecognized event"
;
}
EXPORT_SYMBOL
(
rdma_event_msg
);
static
void
cma_add_one
(
struct
ib_device
*
device
);
static
void
cma_add_one
(
struct
ib_device
*
device
);
static
void
cma_remove_one
(
struct
ib_device
*
device
);
static
void
cma_remove_one
(
struct
ib_device
*
device
);
...
@@ -349,18 +377,35 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
...
@@ -349,18 +377,35 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
return
ret
;
return
ret
;
}
}
static
inline
int
cma_validate_port
(
struct
ib_device
*
device
,
u8
port
,
union
ib_gid
*
gid
,
int
dev_type
)
{
u8
found_port
;
int
ret
=
-
ENODEV
;
if
((
dev_type
==
ARPHRD_INFINIBAND
)
&&
!
rdma_protocol_ib
(
device
,
port
))
return
ret
;
if
((
dev_type
!=
ARPHRD_INFINIBAND
)
&&
rdma_protocol_ib
(
device
,
port
))
return
ret
;
ret
=
ib_find_cached_gid
(
device
,
gid
,
&
found_port
,
NULL
);
if
(
port
!=
found_port
)
return
-
ENODEV
;
return
ret
;
}
static
int
cma_acquire_dev
(
struct
rdma_id_private
*
id_priv
,
static
int
cma_acquire_dev
(
struct
rdma_id_private
*
id_priv
,
struct
rdma_id_private
*
listen_id_priv
)
struct
rdma_id_private
*
listen_id_priv
)
{
{
struct
rdma_dev_addr
*
dev_addr
=
&
id_priv
->
id
.
route
.
addr
.
dev_addr
;
struct
rdma_dev_addr
*
dev_addr
=
&
id_priv
->
id
.
route
.
addr
.
dev_addr
;
struct
cma_device
*
cma_dev
;
struct
cma_device
*
cma_dev
;
union
ib_gid
gid
,
iboe_gid
;
union
ib_gid
gid
,
iboe_gid
,
*
gidp
;
int
ret
=
-
ENODEV
;
int
ret
=
-
ENODEV
;
u8
port
,
found_port
;
u8
port
;
enum
rdma_link_layer
dev_ll
=
dev_addr
->
dev_type
==
ARPHRD_INFINIBAND
?
IB_LINK_LAYER_INFINIBAND
:
IB_LINK_LAYER_ETHERNET
;
if
(
dev_
ll
!=
IB_LINK_LAYER
_INFINIBAND
&&
if
(
dev_
addr
->
dev_type
!=
ARPHRD
_INFINIBAND
&&
id_priv
->
id
.
ps
==
RDMA_PS_IPOIB
)
id_priv
->
id
.
ps
==
RDMA_PS_IPOIB
)
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -370,41 +415,36 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
...
@@ -370,41 +415,36 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
memcpy
(
&
gid
,
dev_addr
->
src_dev_addr
+
memcpy
(
&
gid
,
dev_addr
->
src_dev_addr
+
rdma_addr_gid_offset
(
dev_addr
),
sizeof
gid
);
rdma_addr_gid_offset
(
dev_addr
),
sizeof
gid
);
if
(
listen_id_priv
&&
rdma_port_get_link_layer
(
listen_id_priv
->
id
.
device
,
if
(
listen_id_priv
)
{
listen_id_priv
->
id
.
port_num
)
==
dev_ll
)
{
cma_dev
=
listen_id_priv
->
cma_dev
;
cma_dev
=
listen_id_priv
->
cma_dev
;
port
=
listen_id_priv
->
id
.
port_num
;
port
=
listen_id_priv
->
id
.
port_num
;
if
(
rdma_node_get_transport
(
cma_dev
->
device
->
node_type
)
==
RDMA_TRANSPORT_IB
&&
gidp
=
rdma_protocol_roce
(
cma_dev
->
device
,
port
)
?
rdma_port_get_link_layer
(
cma_dev
->
device
,
port
)
==
IB_LINK_LAYER_ETHERNET
)
&
iboe_gid
:
&
gid
;
ret
=
ib_find_cached_gid
(
cma_dev
->
device
,
&
iboe_gid
,
&
found_port
,
NULL
);
else
ret
=
ib_find_cached_gid
(
cma_dev
->
device
,
&
gid
,
&
found_port
,
NULL
);
if
(
!
ret
&&
(
port
==
found_port
))
{
ret
=
cma_validate_port
(
cma_dev
->
device
,
port
,
gidp
,
id_priv
->
id
.
port_num
=
found_port
;
dev_addr
->
dev_type
);
if
(
!
ret
)
{
id_priv
->
id
.
port_num
=
port
;
goto
out
;
goto
out
;
}
}
}
}
list_for_each_entry
(
cma_dev
,
&
dev_list
,
list
)
{
list_for_each_entry
(
cma_dev
,
&
dev_list
,
list
)
{
for
(
port
=
1
;
port
<=
cma_dev
->
device
->
phys_port_cnt
;
++
port
)
{
for
(
port
=
1
;
port
<=
cma_dev
->
device
->
phys_port_cnt
;
++
port
)
{
if
(
listen_id_priv
&&
if
(
listen_id_priv
&&
listen_id_priv
->
cma_dev
==
cma_dev
&&
listen_id_priv
->
cma_dev
==
cma_dev
&&
listen_id_priv
->
id
.
port_num
==
port
)
listen_id_priv
->
id
.
port_num
==
port
)
continue
;
continue
;
if
(
rdma_port_get_link_layer
(
cma_dev
->
device
,
port
)
==
dev_ll
)
{
if
(
rdma_node_get_transport
(
cma_dev
->
device
->
node_type
)
==
RDMA_TRANSPORT_IB
&&
gidp
=
rdma_protocol_roce
(
cma_dev
->
device
,
port
)
?
rdma_port_get_link_layer
(
cma_dev
->
device
,
port
)
==
IB_LINK_LAYER_ETHERNET
)
&
iboe_gid
:
&
gid
;
ret
=
ib_find_cached_gid
(
cma_dev
->
device
,
&
iboe_gid
,
&
found_port
,
NULL
);
else
ret
=
cma_validate_port
(
cma_dev
->
device
,
port
,
gidp
,
ret
=
ib_find_cached_gid
(
cma_dev
->
device
,
&
gid
,
&
found_port
,
NULL
);
dev_addr
->
dev_type
);
if
(
!
ret
)
{
if
(
!
ret
&&
(
port
==
found_port
))
{
id_priv
->
id
.
port_num
=
port
;
id_priv
->
id
.
port_num
=
found_port
;
goto
out
;
goto
out
;
}
}
}
}
}
}
}
...
@@ -435,10 +475,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
...
@@ -435,10 +475,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
pkey
=
ntohs
(
addr
->
sib_pkey
);
pkey
=
ntohs
(
addr
->
sib_pkey
);
list_for_each_entry
(
cur_dev
,
&
dev_list
,
list
)
{
list_for_each_entry
(
cur_dev
,
&
dev_list
,
list
)
{
if
(
rdma_node_get_transport
(
cur_dev
->
device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
continue
;
for
(
p
=
1
;
p
<=
cur_dev
->
device
->
phys_port_cnt
;
++
p
)
{
for
(
p
=
1
;
p
<=
cur_dev
->
device
->
phys_port_cnt
;
++
p
)
{
if
(
!
rdma_cap_af_ib
(
cur_dev
->
device
,
p
))
continue
;
if
(
ib_find_cached_pkey
(
cur_dev
->
device
,
p
,
pkey
,
&
index
))
if
(
ib_find_cached_pkey
(
cur_dev
->
device
,
p
,
pkey
,
&
index
))
continue
;
continue
;
...
@@ -633,10 +673,9 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
...
@@ -633,10 +673,9 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
if
(
ret
)
if
(
ret
)
goto
out
;
goto
out
;
if
(
rdma_node_get_transport
(
id_priv
->
cma_dev
->
device
->
node_type
)
BUG_ON
(
id_priv
->
cma_dev
->
device
!=
id_priv
->
id
.
device
);
==
RDMA_TRANSPORT_IB
&&
rdma_port_get_link_layer
(
id_priv
->
id
.
device
,
id_priv
->
id
.
port_num
)
if
(
rdma_protocol_roce
(
id_priv
->
id
.
device
,
id_priv
->
id
.
port_num
))
{
==
IB_LINK_LAYER_ETHERNET
)
{
ret
=
rdma_addr_find_smac_by_sgid
(
&
sgid
,
qp_attr
.
smac
,
NULL
);
ret
=
rdma_addr_find_smac_by_sgid
(
&
sgid
,
qp_attr
.
smac
,
NULL
);
if
(
ret
)
if
(
ret
)
...
@@ -700,11 +739,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
...
@@ -700,11 +739,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
int
ret
;
int
ret
;
u16
pkey
;
u16
pkey
;
if
(
rdma_port_get_link_layer
(
id_priv
->
id
.
device
,
id_priv
->
id
.
port_num
)
==
if
(
rdma_cap_eth_ah
(
id_priv
->
id
.
device
,
id_priv
->
id
.
port_num
))
IB_LINK_LAYER_INFINIBAND
)
pkey
=
ib_addr_get_pkey
(
dev_addr
);
else
pkey
=
0xffff
;
pkey
=
0xffff
;
else
pkey
=
ib_addr_get_pkey
(
dev_addr
);
ret
=
ib_find_cached_pkey
(
id_priv
->
id
.
device
,
id_priv
->
id
.
port_num
,
ret
=
ib_find_cached_pkey
(
id_priv
->
id
.
device
,
id_priv
->
id
.
port_num
,
pkey
,
&
qp_attr
->
pkey_index
);
pkey
,
&
qp_attr
->
pkey_index
);
...
@@ -735,8 +773,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
...
@@ -735,8 +773,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
int
ret
=
0
;
int
ret
=
0
;
id_priv
=
container_of
(
id
,
struct
rdma_id_private
,
id
);
id_priv
=
container_of
(
id
,
struct
rdma_id_private
,
id
);
switch
(
rdma_node_get_transport
(
id_priv
->
id
.
device
->
node_type
))
{
if
(
rdma_cap_ib_cm
(
id
->
device
,
id
->
port_num
))
{
case
RDMA_TRANSPORT_IB
:
if
(
!
id_priv
->
cm_id
.
ib
||
(
id_priv
->
id
.
qp_type
==
IB_QPT_UD
))
if
(
!
id_priv
->
cm_id
.
ib
||
(
id_priv
->
id
.
qp_type
==
IB_QPT_UD
))
ret
=
cma_ib_init_qp_attr
(
id_priv
,
qp_attr
,
qp_attr_mask
);
ret
=
cma_ib_init_qp_attr
(
id_priv
,
qp_attr
,
qp_attr_mask
);
else
else
...
@@ -745,19 +782,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
...
@@ -745,19 +782,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
if
(
qp_attr
->
qp_state
==
IB_QPS_RTR
)
if
(
qp_attr
->
qp_state
==
IB_QPS_RTR
)
qp_attr
->
rq_psn
=
id_priv
->
seq_num
;
qp_attr
->
rq_psn
=
id_priv
->
seq_num
;
break
;
}
else
if
(
rdma_cap_iw_cm
(
id
->
device
,
id
->
port_num
))
{
case
RDMA_TRANSPORT_IWARP
:
if
(
!
id_priv
->
cm_id
.
iw
)
{
if
(
!
id_priv
->
cm_id
.
iw
)
{
qp_attr
->
qp_access_flags
=
0
;
qp_attr
->
qp_access_flags
=
0
;
*
qp_attr_mask
=
IB_QP_STATE
|
IB_QP_ACCESS_FLAGS
;
*
qp_attr_mask
=
IB_QP_STATE
|
IB_QP_ACCESS_FLAGS
;
}
else
}
else
ret
=
iw_cm_init_qp_attr
(
id_priv
->
cm_id
.
iw
,
qp_attr
,
ret
=
iw_cm_init_qp_attr
(
id_priv
->
cm_id
.
iw
,
qp_attr
,
qp_attr_mask
);
qp_attr_mask
);
break
;
}
else
default:
ret
=
-
ENOSYS
;
ret
=
-
ENOSYS
;
break
;
}
return
ret
;
return
ret
;
}
}
...
@@ -935,13 +968,9 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
...
@@ -935,13 +968,9 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
static
void
cma_cancel_route
(
struct
rdma_id_private
*
id_priv
)
static
void
cma_cancel_route
(
struct
rdma_id_private
*
id_priv
)
{
{
switch
(
rdma_port_get_link_layer
(
id_priv
->
id
.
device
,
id_priv
->
id
.
port_num
))
{
if
(
rdma_cap_ib_sa
(
id_priv
->
id
.
device
,
id_priv
->
id
.
port_num
))
{
case
IB_LINK_LAYER_INFINIBAND
:
if
(
id_priv
->
query
)
if
(
id_priv
->
query
)
ib_sa_cancel_query
(
id_priv
->
query_id
,
id_priv
->
query
);
ib_sa_cancel_query
(
id_priv
->
query_id
,
id_priv
->
query
);
break
;
default:
break
;
}
}
}
}
...
@@ -1013,17 +1042,12 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
...
@@ -1013,17 +1042,12 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
mc
=
container_of
(
id_priv
->
mc_list
.
next
,
mc
=
container_of
(
id_priv
->
mc_list
.
next
,
struct
cma_multicast
,
list
);
struct
cma_multicast
,
list
);
list_del
(
&
mc
->
list
);
list_del
(
&
mc
->
list
);
switch
(
rdma_port_get_link_layer
(
id_priv
->
cma_dev
->
device
,
id_priv
->
id
.
port_num
))
{
if
(
rdma_cap_ib_mcast
(
id_priv
->
cma_dev
->
device
,
case
IB_LINK_LAYER_INFINIBAND
:
id_priv
->
id
.
port_num
))
{
ib_sa_free_multicast
(
mc
->
multicast
.
ib
);
ib_sa_free_multicast
(
mc
->
multicast
.
ib
);
kfree
(
mc
);
kfree
(
mc
);
break
;
}
else
case
IB_LINK_LAYER_ETHERNET
:
kref_put
(
&
mc
->
mcref
,
release_mc
);
kref_put
(
&
mc
->
mcref
,
release_mc
);
break
;
default:
break
;
}
}
}
}
}
...
@@ -1044,17 +1068,12 @@ void rdma_destroy_id(struct rdma_cm_id *id)
...
@@ -1044,17 +1068,12 @@ void rdma_destroy_id(struct rdma_cm_id *id)
mutex_unlock
(
&
id_priv
->
handler_mutex
);
mutex_unlock
(
&
id_priv
->
handler_mutex
);
if
(
id_priv
->
cma_dev
)
{
if
(
id_priv
->
cma_dev
)
{
switch
(
rdma_node_get_transport
(
id_priv
->
id
.
device
->
node_type
))
{
if
(
rdma_cap_ib_cm
(
id_priv
->
id
.
device
,
1
))
{
case
RDMA_TRANSPORT_IB
:
if
(
id_priv
->
cm_id
.
ib
)
if
(
id_priv
->
cm_id
.
ib
)
ib_destroy_cm_id
(
id_priv
->
cm_id
.
ib
);
ib_destroy_cm_id
(
id_priv
->
cm_id
.
ib
);
break
;
}
else
if
(
rdma_cap_iw_cm
(
id_priv
->
id
.
device
,
1
))
{
case
RDMA_TRANSPORT_IWARP
:
if
(
id_priv
->
cm_id
.
iw
)
if
(
id_priv
->
cm_id
.
iw
)
iw_destroy_cm_id
(
id_priv
->
cm_id
.
iw
);
iw_destroy_cm_id
(
id_priv
->
cm_id
.
iw
);
break
;
default:
break
;
}
}
cma_leave_mc_groups
(
id_priv
);
cma_leave_mc_groups
(
id_priv
);
cma_release_dev
(
id_priv
);
cma_release_dev
(
id_priv
);
...
@@ -1632,8 +1651,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
...
@@ -1632,8 +1651,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
struct
rdma_cm_id
*
id
;
struct
rdma_cm_id
*
id
;
int
ret
;
int
ret
;
if
(
cma_family
(
id_priv
)
==
AF_IB
&&
if
(
cma_family
(
id_priv
)
==
AF_IB
&&
!
rdma_cap_ib_cm
(
cma_dev
->
device
,
1
))
rdma_node_get_transport
(
cma_dev
->
device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
return
;
return
;
id
=
rdma_create_id
(
cma_listen_handler
,
id_priv
,
id_priv
->
id
.
ps
,
id
=
rdma_create_id
(
cma_listen_handler
,
id_priv
,
id_priv
->
id
.
ps
,
...
@@ -1974,26 +1992,15 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
...
@@ -1974,26 +1992,15 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
return
-
EINVAL
;
return
-
EINVAL
;
atomic_inc
(
&
id_priv
->
refcount
);
atomic_inc
(
&
id_priv
->
refcount
);
switch
(
rdma_node_get_transport
(
id
->
device
->
node_type
))
{
if
(
rdma_cap_ib_sa
(
id
->
device
,
id
->
port_num
))
case
RDMA_TRANSPORT_IB
:
ret
=
cma_resolve_ib_route
(
id_priv
,
timeout_ms
);
switch
(
rdma_port_get_link_layer
(
id
->
device
,
id
->
port_num
))
{
else
if
(
rdma_protocol_roce
(
id
->
device
,
id
->
port_num
))
case
IB_LINK_LAYER_INFINIBAND
:
ret
=
cma_resolve_iboe_route
(
id_priv
);
ret
=
cma_resolve_ib_route
(
id_priv
,
timeout_ms
);
else
if
(
rdma_protocol_iwarp
(
id
->
device
,
id
->
port_num
))
break
;
case
IB_LINK_LAYER_ETHERNET
:
ret
=
cma_resolve_iboe_route
(
id_priv
);
break
;
default:
ret
=
-
ENOSYS
;
}
break
;
case
RDMA_TRANSPORT_IWARP
:
ret
=
cma_resolve_iw_route
(
id_priv
,
timeout_ms
);
ret
=
cma_resolve_iw_route
(
id_priv
,
timeout_ms
);
break
;
else
default:
ret
=
-
ENOSYS
;
ret
=
-
ENOSYS
;
break
;
}
if
(
ret
)
if
(
ret
)
goto
err
;
goto
err
;
...
@@ -2035,7 +2042,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
...
@@ -2035,7 +2042,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
mutex_lock
(
&
lock
);
mutex_lock
(
&
lock
);
list_for_each_entry
(
cur_dev
,
&
dev_list
,
list
)
{
list_for_each_entry
(
cur_dev
,
&
dev_list
,
list
)
{
if
(
cma_family
(
id_priv
)
==
AF_IB
&&
if
(
cma_family
(
id_priv
)
==
AF_IB
&&
rdma_node_get_transport
(
cur_dev
->
device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
!
rdma_cap_ib_cm
(
cur_dev
->
device
,
1
)
)
continue
;
continue
;
if
(
!
cma_dev
)
if
(
!
cma_dev
)
...
@@ -2067,7 +2074,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
...
@@ -2067,7 +2074,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
goto
out
;
goto
out
;
id_priv
->
id
.
route
.
addr
.
dev_addr
.
dev_type
=
id_priv
->
id
.
route
.
addr
.
dev_addr
.
dev_type
=
(
rdma_p
ort_get_link_layer
(
cma_dev
->
device
,
p
)
==
IB_LINK_LAYER_INFINIBAND
)
?
(
rdma_p
rotocol_ib
(
cma_dev
->
device
,
p
)
)
?
ARPHRD_INFINIBAND
:
ARPHRD_ETHER
;
ARPHRD_INFINIBAND
:
ARPHRD_ETHER
;
rdma_addr_set_sgid
(
&
id_priv
->
id
.
route
.
addr
.
dev_addr
,
&
gid
);
rdma_addr_set_sgid
(
&
id_priv
->
id
.
route
.
addr
.
dev_addr
,
&
gid
);
...
@@ -2544,18 +2551,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
...
@@ -2544,18 +2551,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
id_priv
->
backlog
=
backlog
;
id_priv
->
backlog
=
backlog
;
if
(
id
->
device
)
{
if
(
id
->
device
)
{
switch
(
rdma_node_get_transport
(
id
->
device
->
node_type
))
{
if
(
rdma_cap_ib_cm
(
id
->
device
,
1
))
{
case
RDMA_TRANSPORT_IB
:
ret
=
cma_ib_listen
(
id_priv
);
ret
=
cma_ib_listen
(
id_priv
);
if
(
ret
)
if
(
ret
)
goto
err
;
goto
err
;
break
;
}
else
if
(
rdma_cap_iw_cm
(
id
->
device
,
1
))
{
case
RDMA_TRANSPORT_IWARP
:
ret
=
cma_iw_listen
(
id_priv
,
backlog
);
ret
=
cma_iw_listen
(
id_priv
,
backlog
);
if
(
ret
)
if
(
ret
)
goto
err
;
goto
err
;
break
;
}
else
{
default:
ret
=
-
ENOSYS
;
ret
=
-
ENOSYS
;
goto
err
;
goto
err
;
}
}
...
@@ -2891,20 +2895,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
...
@@ -2891,20 +2895,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
id_priv
->
srq
=
conn_param
->
srq
;
id_priv
->
srq
=
conn_param
->
srq
;
}
}
switch
(
rdma_node_get_transport
(
id
->
device
->
node_type
))
{
if
(
rdma_cap_ib_cm
(
id
->
device
,
id
->
port_num
))
{
case
RDMA_TRANSPORT_IB
:
if
(
id
->
qp_type
==
IB_QPT_UD
)
if
(
id
->
qp_type
==
IB_QPT_UD
)
ret
=
cma_resolve_ib_udp
(
id_priv
,
conn_param
);
ret
=
cma_resolve_ib_udp
(
id_priv
,
conn_param
);
else
else
ret
=
cma_connect_ib
(
id_priv
,
conn_param
);
ret
=
cma_connect_ib
(
id_priv
,
conn_param
);
break
;
}
else
if
(
rdma_cap_iw_cm
(
id
->
device
,
id
->
port_num
))
case
RDMA_TRANSPORT_IWARP
:
ret
=
cma_connect_iw
(
id_priv
,
conn_param
);
ret
=
cma_connect_iw
(
id_priv
,
conn_param
);
break
;
else
default:
ret
=
-
ENOSYS
;
ret
=
-
ENOSYS
;
break
;
}
if
(
ret
)
if
(
ret
)
goto
err
;
goto
err
;
...
@@ -3007,8 +3006,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
...
@@ -3007,8 +3006,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
id_priv
->
srq
=
conn_param
->
srq
;
id_priv
->
srq
=
conn_param
->
srq
;
}
}
switch
(
rdma_node_get_transport
(
id
->
device
->
node_type
))
{
if
(
rdma_cap_ib_cm
(
id
->
device
,
id
->
port_num
))
{
case
RDMA_TRANSPORT_IB
:
if
(
id
->
qp_type
==
IB_QPT_UD
)
{
if
(
id
->
qp_type
==
IB_QPT_UD
)
{
if
(
conn_param
)
if
(
conn_param
)
ret
=
cma_send_sidr_rep
(
id_priv
,
IB_SIDR_SUCCESS
,
ret
=
cma_send_sidr_rep
(
id_priv
,
IB_SIDR_SUCCESS
,
...
@@ -3024,14 +3022,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
...
@@ -3024,14 +3022,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
else
else
ret
=
cma_rep_recv
(
id_priv
);
ret
=
cma_rep_recv
(
id_priv
);
}
}
break
;
}
else
if
(
rdma_cap_iw_cm
(
id
->
device
,
id
->
port_num
))
case
RDMA_TRANSPORT_IWARP
:
ret
=
cma_accept_iw
(
id_priv
,
conn_param
);
ret
=
cma_accept_iw
(
id_priv
,
conn_param
);
break
;
else
default:
ret
=
-
ENOSYS
;
ret
=
-
ENOSYS
;
break
;
}
if
(
ret
)
if
(
ret
)
goto
reject
;
goto
reject
;
...
@@ -3075,8 +3069,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
...
@@ -3075,8 +3069,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
if
(
!
id_priv
->
cm_id
.
ib
)
if
(
!
id_priv
->
cm_id
.
ib
)
return
-
EINVAL
;
return
-
EINVAL
;
switch
(
rdma_node_get_transport
(
id
->
device
->
node_type
))
{
if
(
rdma_cap_ib_cm
(
id
->
device
,
id
->
port_num
))
{
case
RDMA_TRANSPORT_IB
:
if
(
id
->
qp_type
==
IB_QPT_UD
)
if
(
id
->
qp_type
==
IB_QPT_UD
)
ret
=
cma_send_sidr_rep
(
id_priv
,
IB_SIDR_REJECT
,
0
,
ret
=
cma_send_sidr_rep
(
id_priv
,
IB_SIDR_REJECT
,
0
,
private_data
,
private_data_len
);
private_data
,
private_data_len
);
...
@@ -3084,15 +3077,12 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
...
@@ -3084,15 +3077,12 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
ret
=
ib_send_cm_rej
(
id_priv
->
cm_id
.
ib
,
ret
=
ib_send_cm_rej
(
id_priv
->
cm_id
.
ib
,
IB_CM_REJ_CONSUMER_DEFINED
,
NULL
,
IB_CM_REJ_CONSUMER_DEFINED
,
NULL
,
0
,
private_data
,
private_data_len
);
0
,
private_data
,
private_data_len
);
break
;
}
else
if
(
rdma_cap_iw_cm
(
id
->
device
,
id
->
port_num
))
{
case
RDMA_TRANSPORT_IWARP
:
ret
=
iw_cm_reject
(
id_priv
->
cm_id
.
iw
,
ret
=
iw_cm_reject
(
id_priv
->
cm_id
.
iw
,
private_data
,
private_data_len
);
private_data
,
private_data_len
);
break
;
}
else
default:
ret
=
-
ENOSYS
;
ret
=
-
ENOSYS
;
break
;
}
return
ret
;
return
ret
;
}
}
EXPORT_SYMBOL
(
rdma_reject
);
EXPORT_SYMBOL
(
rdma_reject
);
...
@@ -3106,22 +3096,18 @@ int rdma_disconnect(struct rdma_cm_id *id)
...
@@ -3106,22 +3096,18 @@ int rdma_disconnect(struct rdma_cm_id *id)
if
(
!
id_priv
->
cm_id
.
ib
)
if
(
!
id_priv
->
cm_id
.
ib
)
return
-
EINVAL
;
return
-
EINVAL
;
switch
(
rdma_node_get_transport
(
id
->
device
->
node_type
))
{
if
(
rdma_cap_ib_cm
(
id
->
device
,
id
->
port_num
))
{
case
RDMA_TRANSPORT_IB
:
ret
=
cma_modify_qp_err
(
id_priv
);
ret
=
cma_modify_qp_err
(
id_priv
);
if
(
ret
)
if
(
ret
)
goto
out
;
goto
out
;
/* Initiate or respond to a disconnect. */
/* Initiate or respond to a disconnect. */
if
(
ib_send_cm_dreq
(
id_priv
->
cm_id
.
ib
,
NULL
,
0
))
if
(
ib_send_cm_dreq
(
id_priv
->
cm_id
.
ib
,
NULL
,
0
))
ib_send_cm_drep
(
id_priv
->
cm_id
.
ib
,
NULL
,
0
);
ib_send_cm_drep
(
id_priv
->
cm_id
.
ib
,
NULL
,
0
);
break
;
}
else
if
(
rdma_cap_iw_cm
(
id
->
device
,
id
->
port_num
))
{
case
RDMA_TRANSPORT_IWARP
:
ret
=
iw_cm_disconnect
(
id_priv
->
cm_id
.
iw
,
0
);
ret
=
iw_cm_disconnect
(
id_priv
->
cm_id
.
iw
,
0
);
break
;
}
else
default:
ret
=
-
EINVAL
;
ret
=
-
EINVAL
;
break
;
}
out:
out:
return
ret
;
return
ret
;
}
}
...
@@ -3367,24 +3353,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
...
@@ -3367,24 +3353,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
list_add
(
&
mc
->
list
,
&
id_priv
->
mc_list
);
list_add
(
&
mc
->
list
,
&
id_priv
->
mc_list
);
spin_unlock
(
&
id_priv
->
lock
);
spin_unlock
(
&
id_priv
->
lock
);
switch
(
rdma_node_get_transport
(
id
->
device
->
node_type
))
{
if
(
rdma_protocol_roce
(
id
->
device
,
id
->
port_num
))
{
case
RDMA_TRANSPORT_IB
:
kref_init
(
&
mc
->
mcref
);
switch
(
rdma_port_get_link_layer
(
id
->
device
,
id
->
port_num
))
{
ret
=
cma_iboe_join_multicast
(
id_priv
,
mc
);
case
IB_LINK_LAYER_INFINIBAND
:
}
else
if
(
rdma_cap_ib_mcast
(
id
->
device
,
id
->
port_num
))
ret
=
cma_join_ib_multicast
(
id_priv
,
mc
);
ret
=
cma_join_ib_multicast
(
id_priv
,
mc
);
break
;
else
case
IB_LINK_LAYER_ETHERNET
:
kref_init
(
&
mc
->
mcref
);
ret
=
cma_iboe_join_multicast
(
id_priv
,
mc
);
break
;
default:
ret
=
-
EINVAL
;
}
break
;
default:
ret
=
-
ENOSYS
;
ret
=
-
ENOSYS
;
break
;
}
if
(
ret
)
{
if
(
ret
)
{
spin_lock_irq
(
&
id_priv
->
lock
);
spin_lock_irq
(
&
id_priv
->
lock
);
...
@@ -3412,19 +3387,15 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
...
@@ -3412,19 +3387,15 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
ib_detach_mcast
(
id
->
qp
,
ib_detach_mcast
(
id
->
qp
,
&
mc
->
multicast
.
ib
->
rec
.
mgid
,
&
mc
->
multicast
.
ib
->
rec
.
mgid
,
be16_to_cpu
(
mc
->
multicast
.
ib
->
rec
.
mlid
));
be16_to_cpu
(
mc
->
multicast
.
ib
->
rec
.
mlid
));
if
(
rdma_node_get_transport
(
id_priv
->
cma_dev
->
device
->
node_type
)
==
RDMA_TRANSPORT_IB
)
{
switch
(
rdma_port_get_link_layer
(
id
->
device
,
id
->
port_num
))
{
BUG_ON
(
id_priv
->
cma_dev
->
device
!=
id
->
device
);
case
IB_LINK_LAYER_INFINIBAND
:
ib_sa_free_multicast
(
mc
->
multicast
.
ib
);
if
(
rdma_cap_ib_mcast
(
id
->
device
,
id
->
port_num
))
{
kfree
(
mc
);
ib_sa_free_multicast
(
mc
->
multicast
.
ib
);
break
;
kfree
(
mc
);
case
IB_LINK_LAYER_ETHERNET
:
}
else
if
(
rdma_protocol_roce
(
id
->
device
,
id
->
port_num
))
kref_put
(
&
mc
->
mcref
,
release_mc
);
kref_put
(
&
mc
->
mcref
,
release_mc
);
break
;
default:
break
;
}
}
return
;
return
;
}
}
}
}
...
...
drivers/infiniband/core/device.c
浏览文件 @
175e8efe
...
@@ -92,7 +92,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
...
@@ -92,7 +92,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
IB_MANDATORY_FUNC
(
poll_cq
),
IB_MANDATORY_FUNC
(
poll_cq
),
IB_MANDATORY_FUNC
(
req_notify_cq
),
IB_MANDATORY_FUNC
(
req_notify_cq
),
IB_MANDATORY_FUNC
(
get_dma_mr
),
IB_MANDATORY_FUNC
(
get_dma_mr
),
IB_MANDATORY_FUNC
(
dereg_mr
)
IB_MANDATORY_FUNC
(
dereg_mr
),
IB_MANDATORY_FUNC
(
get_port_immutable
)
};
};
int
i
;
int
i
;
...
@@ -151,18 +152,6 @@ static int alloc_name(char *name)
...
@@ -151,18 +152,6 @@ static int alloc_name(char *name)
return
0
;
return
0
;
}
}
static
int
start_port
(
struct
ib_device
*
device
)
{
return
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
?
0
:
1
;
}
static
int
end_port
(
struct
ib_device
*
device
)
{
return
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
?
0
:
device
->
phys_port_cnt
;
}
/**
/**
* ib_alloc_device - allocate an IB device struct
* ib_alloc_device - allocate an IB device struct
* @size:size of structure to allocate
* @size:size of structure to allocate
...
@@ -222,42 +211,38 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
...
@@ -222,42 +211,38 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
return
0
;
return
0
;
}
}
static
int
read_port_
table_lengths
(
struct
ib_device
*
device
)
static
int
read_port_
immutable
(
struct
ib_device
*
device
)
{
{
struct
ib_port_attr
*
tprops
=
NULL
;
int
ret
=
-
ENOMEM
;
int
num_ports
,
ret
=
-
ENOMEM
;
u8
start_port
=
rdma_start_port
(
device
);
u8
port_index
;
u8
end_port
=
rdma_end_port
(
device
);
u8
port
;
tprops
=
kmalloc
(
sizeof
*
tprops
,
GFP_KERNEL
);
if
(
!
tprops
)
/**
goto
out
;
* device->port_immutable is indexed directly by the port number to make
* access to this data as efficient as possible.
num_ports
=
end_port
(
device
)
-
start_port
(
device
)
+
1
;
*
* Therefore port_immutable is declared as a 1 based array with
device
->
pkey_tbl_len
=
kmalloc
(
sizeof
*
device
->
pkey_tbl_len
*
num_ports
,
* potential empty slots at the beginning.
GFP_KERNEL
);
*/
device
->
gid_tbl_len
=
kmalloc
(
sizeof
*
device
->
gid_tbl_len
*
num_ports
,
device
->
port_immutable
=
kzalloc
(
sizeof
(
*
device
->
port_immutable
)
GFP_KERNEL
);
*
(
end_port
+
1
),
if
(
!
device
->
pkey_tbl_len
||
!
device
->
gid_tbl_len
)
GFP_KERNEL
);
if
(
!
device
->
port_immutable
)
goto
err
;
goto
err
;
for
(
port
_index
=
0
;
port_index
<
num_ports
;
++
port_index
)
{
for
(
port
=
start_port
;
port
<=
end_port
;
++
port
)
{
ret
=
ib_query_port
(
device
,
port_index
+
start_port
(
device
)
,
ret
=
device
->
get_port_immutable
(
device
,
port
,
tprops
);
&
device
->
port_immutable
[
port
]
);
if
(
ret
)
if
(
ret
)
goto
err
;
goto
err
;
device
->
pkey_tbl_len
[
port_index
]
=
tprops
->
pkey_tbl_len
;
device
->
gid_tbl_len
[
port_index
]
=
tprops
->
gid_tbl_len
;
}
}
ret
=
0
;
ret
=
0
;
goto
out
;
goto
out
;
err:
err:
kfree
(
device
->
gid_tbl_len
);
kfree
(
device
->
port_immutable
);
kfree
(
device
->
pkey_tbl_len
);
out:
out:
kfree
(
tprops
);
return
ret
;
return
ret
;
}
}
...
@@ -294,9 +279,9 @@ int ib_register_device(struct ib_device *device,
...
@@ -294,9 +279,9 @@ int ib_register_device(struct ib_device *device,
spin_lock_init
(
&
device
->
event_handler_lock
);
spin_lock_init
(
&
device
->
event_handler_lock
);
spin_lock_init
(
&
device
->
client_data_lock
);
spin_lock_init
(
&
device
->
client_data_lock
);
ret
=
read_port_
table_lengths
(
device
);
ret
=
read_port_
immutable
(
device
);
if
(
ret
)
{
if
(
ret
)
{
printk
(
KERN_WARNING
"Couldn't create
table lengths cache for device
%s
\n
"
,
printk
(
KERN_WARNING
"Couldn't create
per port immutable data
%s
\n
"
,
device
->
name
);
device
->
name
);
goto
out
;
goto
out
;
}
}
...
@@ -305,8 +290,7 @@ int ib_register_device(struct ib_device *device,
...
@@ -305,8 +290,7 @@ int ib_register_device(struct ib_device *device,
if
(
ret
)
{
if
(
ret
)
{
printk
(
KERN_WARNING
"Couldn't register device %s with driver model
\n
"
,
printk
(
KERN_WARNING
"Couldn't register device %s with driver model
\n
"
,
device
->
name
);
device
->
name
);
kfree
(
device
->
gid_tbl_len
);
kfree
(
device
->
port_immutable
);
kfree
(
device
->
pkey_tbl_len
);
goto
out
;
goto
out
;
}
}
...
@@ -348,9 +332,6 @@ void ib_unregister_device(struct ib_device *device)
...
@@ -348,9 +332,6 @@ void ib_unregister_device(struct ib_device *device)
list_del
(
&
device
->
core_list
);
list_del
(
&
device
->
core_list
);
kfree
(
device
->
gid_tbl_len
);
kfree
(
device
->
pkey_tbl_len
);
mutex_unlock
(
&
device_mutex
);
mutex_unlock
(
&
device_mutex
);
ib_device_unregister_sysfs
(
device
);
ib_device_unregister_sysfs
(
device
);
...
@@ -575,7 +556,7 @@ int ib_query_port(struct ib_device *device,
...
@@ -575,7 +556,7 @@ int ib_query_port(struct ib_device *device,
u8
port_num
,
u8
port_num
,
struct
ib_port_attr
*
port_attr
)
struct
ib_port_attr
*
port_attr
)
{
{
if
(
port_num
<
start_port
(
device
)
||
port_num
>
end_port
(
device
))
if
(
port_num
<
rdma_start_port
(
device
)
||
port_num
>
rdma_
end_port
(
device
))
return
-
EINVAL
;
return
-
EINVAL
;
return
device
->
query_port
(
device
,
port_num
,
port_attr
);
return
device
->
query_port
(
device
,
port_num
,
port_attr
);
...
@@ -653,7 +634,7 @@ int ib_modify_port(struct ib_device *device,
...
@@ -653,7 +634,7 @@ int ib_modify_port(struct ib_device *device,
if
(
!
device
->
modify_port
)
if
(
!
device
->
modify_port
)
return
-
ENOSYS
;
return
-
ENOSYS
;
if
(
port_num
<
start_port
(
device
)
||
port_num
>
end_port
(
device
))
if
(
port_num
<
rdma_start_port
(
device
)
||
port_num
>
rdma_
end_port
(
device
))
return
-
EINVAL
;
return
-
EINVAL
;
return
device
->
modify_port
(
device
,
port_num
,
port_modify_mask
,
return
device
->
modify_port
(
device
,
port_num
,
port_modify_mask
,
...
@@ -676,8 +657,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
...
@@ -676,8 +657,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
union
ib_gid
tmp_gid
;
union
ib_gid
tmp_gid
;
int
ret
,
port
,
i
;
int
ret
,
port
,
i
;
for
(
port
=
start_port
(
device
);
port
<=
end_port
(
device
);
++
port
)
{
for
(
port
=
rdma_start_port
(
device
);
port
<=
rdma_
end_port
(
device
);
++
port
)
{
for
(
i
=
0
;
i
<
device
->
gid_tbl_len
[
port
-
start_port
(
device
)]
;
++
i
)
{
for
(
i
=
0
;
i
<
device
->
port_immutable
[
port
].
gid_tbl_len
;
++
i
)
{
ret
=
ib_query_gid
(
device
,
port
,
i
,
&
tmp_gid
);
ret
=
ib_query_gid
(
device
,
port
,
i
,
&
tmp_gid
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
...
@@ -709,7 +690,7 @@ int ib_find_pkey(struct ib_device *device,
...
@@ -709,7 +690,7 @@ int ib_find_pkey(struct ib_device *device,
u16
tmp_pkey
;
u16
tmp_pkey
;
int
partial_ix
=
-
1
;
int
partial_ix
=
-
1
;
for
(
i
=
0
;
i
<
device
->
p
key_tbl_len
[
port_num
-
start_port
(
device
)]
;
++
i
)
{
for
(
i
=
0
;
i
<
device
->
p
ort_immutable
[
port_num
].
pkey_tbl_len
;
++
i
)
{
ret
=
ib_query_pkey
(
device
,
port_num
,
i
,
&
tmp_pkey
);
ret
=
ib_query_pkey
(
device
,
port_num
,
i
,
&
tmp_pkey
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
...
...
drivers/infiniband/core/mad.c
浏览文件 @
175e8efe
...
@@ -179,12 +179,12 @@ static int is_vendor_method_in_use(
...
@@ -179,12 +179,12 @@ static int is_vendor_method_in_use(
return
0
;
return
0
;
}
}
int
ib_response_mad
(
struct
ib_mad
*
mad
)
int
ib_response_mad
(
const
struct
ib_mad_hdr
*
hdr
)
{
{
return
((
mad
->
mad_hdr
.
method
&
IB_MGMT_METHOD_RESP
)
||
return
((
hdr
->
method
&
IB_MGMT_METHOD_RESP
)
||
(
mad
->
mad_hdr
.
method
==
IB_MGMT_METHOD_TRAP_REPRESS
)
||
(
hdr
->
method
==
IB_MGMT_METHOD_TRAP_REPRESS
)
||
((
mad
->
mad_hdr
.
mgmt_class
==
IB_MGMT_CLASS_BM
)
&&
((
hdr
->
mgmt_class
==
IB_MGMT_CLASS_BM
)
&&
(
mad
->
mad_hdr
.
attr_mod
&
IB_BM_ATTR_MOD_RESP
)));
(
hdr
->
attr_mod
&
IB_BM_ATTR_MOD_RESP
)));
}
}
EXPORT_SYMBOL
(
ib_response_mad
);
EXPORT_SYMBOL
(
ib_response_mad
);
...
@@ -791,7 +791,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
...
@@ -791,7 +791,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
switch
(
ret
)
switch
(
ret
)
{
{
case
IB_MAD_RESULT_SUCCESS
|
IB_MAD_RESULT_REPLY
:
case
IB_MAD_RESULT_SUCCESS
|
IB_MAD_RESULT_REPLY
:
if
(
ib_response_mad
(
&
mad_priv
->
mad
.
mad
)
&&
if
(
ib_response_mad
(
&
mad_priv
->
mad
.
mad
.
mad_hdr
)
&&
mad_agent_priv
->
agent
.
recv_handler
)
{
mad_agent_priv
->
agent
.
recv_handler
)
{
local
->
mad_priv
=
mad_priv
;
local
->
mad_priv
=
mad_priv
;
local
->
recv_mad_agent
=
mad_agent_priv
;
local
->
recv_mad_agent
=
mad_agent_priv
;
...
@@ -910,7 +910,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
...
@@ -910,7 +910,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
return
0
;
return
0
;
}
}
int
ib_mad_kernel_rmpp_agent
(
struct
ib_mad_agent
*
agent
)
int
ib_mad_kernel_rmpp_agent
(
const
struct
ib_mad_agent
*
agent
)
{
{
return
agent
->
rmpp_version
&&
!
(
agent
->
flags
&
IB_MAD_USER_RMPP
);
return
agent
->
rmpp_version
&&
!
(
agent
->
flags
&
IB_MAD_USER_RMPP
);
}
}
...
@@ -1628,7 +1628,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
...
@@ -1628,7 +1628,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
unsigned
long
flags
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
port_priv
->
reg_lock
,
flags
);
spin_lock_irqsave
(
&
port_priv
->
reg_lock
,
flags
);
if
(
ib_response_mad
(
mad
))
{
if
(
ib_response_mad
(
&
mad
->
mad_hdr
))
{
u32
hi_tid
;
u32
hi_tid
;
struct
ib_mad_agent_private
*
entry
;
struct
ib_mad_agent_private
*
entry
;
...
@@ -1708,20 +1708,20 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
...
@@ -1708,20 +1708,20 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
return
mad_agent
;
return
mad_agent
;
}
}
static
int
validate_mad
(
struct
ib_mad
*
mad
,
u32
qp_num
)
static
int
validate_mad
(
const
struct
ib_mad_hdr
*
mad_hdr
,
u32
qp_num
)
{
{
int
valid
=
0
;
int
valid
=
0
;
/* Make sure MAD base version is understood */
/* Make sure MAD base version is understood */
if
(
mad
->
mad_hdr
.
base_version
!=
IB_MGMT_BASE_VERSION
)
{
if
(
mad
_hdr
->
base_version
!=
IB_MGMT_BASE_VERSION
)
{
pr_err
(
"MAD received with unsupported base version %d
\n
"
,
pr_err
(
"MAD received with unsupported base version %d
\n
"
,
mad
->
mad_hdr
.
base_version
);
mad
_hdr
->
base_version
);
goto
out
;
goto
out
;
}
}
/* Filter SMI packets sent to other than QP0 */
/* Filter SMI packets sent to other than QP0 */
if
((
mad
->
mad_hdr
.
mgmt_class
==
IB_MGMT_CLASS_SUBN_LID_ROUTED
)
||
if
((
mad
_hdr
->
mgmt_class
==
IB_MGMT_CLASS_SUBN_LID_ROUTED
)
||
(
mad
->
mad_hdr
.
mgmt_class
==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
{
(
mad
_hdr
->
mgmt_class
==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
{
if
(
qp_num
==
0
)
if
(
qp_num
==
0
)
valid
=
1
;
valid
=
1
;
}
else
{
}
else
{
...
@@ -1734,8 +1734,8 @@ static int validate_mad(struct ib_mad *mad, u32 qp_num)
...
@@ -1734,8 +1734,8 @@ static int validate_mad(struct ib_mad *mad, u32 qp_num)
return
valid
;
return
valid
;
}
}
static
int
is_
data_mad
(
struct
ib_mad_agent_private
*
mad_agent_priv
,
static
int
is_
rmpp_data_mad
(
const
struct
ib_mad_agent_private
*
mad_agent_priv
,
struct
ib_mad_hdr
*
mad_hdr
)
const
struct
ib_mad_hdr
*
mad_hdr
)
{
{
struct
ib_rmpp_mad
*
rmpp_mad
;
struct
ib_rmpp_mad
*
rmpp_mad
;
...
@@ -1747,16 +1747,16 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
...
@@ -1747,16 +1747,16 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
(
rmpp_mad
->
rmpp_hdr
.
rmpp_type
==
IB_MGMT_RMPP_TYPE_DATA
);
(
rmpp_mad
->
rmpp_hdr
.
rmpp_type
==
IB_MGMT_RMPP_TYPE_DATA
);
}
}
static
inline
int
rcv_has_same_class
(
struct
ib_mad_send_wr_private
*
wr
,
static
inline
int
rcv_has_same_class
(
const
struct
ib_mad_send_wr_private
*
wr
,
struct
ib_mad_recv_wc
*
rwc
)
const
struct
ib_mad_recv_wc
*
rwc
)
{
{
return
((
struct
ib_mad
*
)(
wr
->
send_buf
.
mad
))
->
mad_hdr
.
mgmt_class
==
return
((
struct
ib_mad
_hdr
*
)(
wr
->
send_buf
.
mad
))
->
mgmt_class
==
rwc
->
recv_buf
.
mad
->
mad_hdr
.
mgmt_class
;
rwc
->
recv_buf
.
mad
->
mad_hdr
.
mgmt_class
;
}
}
static
inline
int
rcv_has_same_gid
(
struct
ib_mad_agent_private
*
mad_agent_priv
,
static
inline
int
rcv_has_same_gid
(
const
struct
ib_mad_agent_private
*
mad_agent_priv
,
struct
ib_mad_send_wr_private
*
wr
,
const
struct
ib_mad_send_wr_private
*
wr
,
struct
ib_mad_recv_wc
*
rwc
)
const
struct
ib_mad_recv_wc
*
rwc
)
{
{
struct
ib_ah_attr
attr
;
struct
ib_ah_attr
attr
;
u8
send_resp
,
rcv_resp
;
u8
send_resp
,
rcv_resp
;
...
@@ -1765,8 +1765,8 @@ static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
...
@@ -1765,8 +1765,8 @@ static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
u8
port_num
=
mad_agent_priv
->
agent
.
port_num
;
u8
port_num
=
mad_agent_priv
->
agent
.
port_num
;
u8
lmc
;
u8
lmc
;
send_resp
=
ib_response_mad
((
struct
ib_mad
*
)
wr
->
send_buf
.
mad
);
send_resp
=
ib_response_mad
((
struct
ib_mad
_hdr
*
)
wr
->
send_buf
.
mad
);
rcv_resp
=
ib_response_mad
(
rwc
->
recv_buf
.
mad
);
rcv_resp
=
ib_response_mad
(
&
rwc
->
recv_buf
.
mad
->
mad_hdr
);
if
(
send_resp
==
rcv_resp
)
if
(
send_resp
==
rcv_resp
)
/* both requests, or both responses. GIDs different */
/* both requests, or both responses. GIDs different */
...
@@ -1811,8 +1811,8 @@ static inline int is_direct(u8 class)
...
@@ -1811,8 +1811,8 @@ static inline int is_direct(u8 class)
}
}
struct
ib_mad_send_wr_private
*
struct
ib_mad_send_wr_private
*
ib_find_send_mad
(
struct
ib_mad_agent_private
*
mad_agent_priv
,
ib_find_send_mad
(
const
struct
ib_mad_agent_private
*
mad_agent_priv
,
struct
ib_mad_recv_wc
*
wc
)
const
struct
ib_mad_recv_wc
*
wc
)
{
{
struct
ib_mad_send_wr_private
*
wr
;
struct
ib_mad_send_wr_private
*
wr
;
struct
ib_mad
*
mad
;
struct
ib_mad
*
mad
;
...
@@ -1836,7 +1836,7 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
...
@@ -1836,7 +1836,7 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
* been notified that the send has completed
* been notified that the send has completed
*/
*/
list_for_each_entry
(
wr
,
&
mad_agent_priv
->
send_list
,
agent_list
)
{
list_for_each_entry
(
wr
,
&
mad_agent_priv
->
send_list
,
agent_list
)
{
if
(
is_data_mad
(
mad_agent_priv
,
wr
->
send_buf
.
mad
)
&&
if
(
is_
rmpp_
data_mad
(
mad_agent_priv
,
wr
->
send_buf
.
mad
)
&&
wr
->
tid
==
mad
->
mad_hdr
.
tid
&&
wr
->
tid
==
mad
->
mad_hdr
.
tid
&&
wr
->
timeout
&&
wr
->
timeout
&&
rcv_has_same_class
(
wr
,
wc
)
&&
rcv_has_same_class
(
wr
,
wc
)
&&
...
@@ -1879,7 +1879,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
...
@@ -1879,7 +1879,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
}
}
/* Complete corresponding request */
/* Complete corresponding request */
if
(
ib_response_mad
(
mad_recv_wc
->
recv_buf
.
mad
))
{
if
(
ib_response_mad
(
&
mad_recv_wc
->
recv_buf
.
mad
->
mad_hdr
))
{
spin_lock_irqsave
(
&
mad_agent_priv
->
lock
,
flags
);
spin_lock_irqsave
(
&
mad_agent_priv
->
lock
,
flags
);
mad_send_wr
=
ib_find_send_mad
(
mad_agent_priv
,
mad_recv_wc
);
mad_send_wr
=
ib_find_send_mad
(
mad_agent_priv
,
mad_recv_wc
);
if
(
!
mad_send_wr
)
{
if
(
!
mad_send_wr
)
{
...
@@ -1979,7 +1979,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
...
@@ -1979,7 +1979,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
snoop_recv
(
qp_info
,
&
recv
->
header
.
recv_wc
,
IB_MAD_SNOOP_RECVS
);
snoop_recv
(
qp_info
,
&
recv
->
header
.
recv_wc
,
IB_MAD_SNOOP_RECVS
);
/* Validate MAD */
/* Validate MAD */
if
(
!
validate_mad
(
&
recv
->
mad
.
mad
,
qp_info
->
qp
->
qp_num
))
if
(
!
validate_mad
(
&
recv
->
mad
.
mad
.
mad_hdr
,
qp_info
->
qp
->
qp_num
))
goto
out
;
goto
out
;
response
=
kmem_cache_alloc
(
ib_mad_cache
,
GFP_KERNEL
);
response
=
kmem_cache_alloc
(
ib_mad_cache
,
GFP_KERNEL
);
...
@@ -2411,7 +2411,8 @@ find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
...
@@ -2411,7 +2411,8 @@ find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
list_for_each_entry
(
mad_send_wr
,
&
mad_agent_priv
->
send_list
,
list_for_each_entry
(
mad_send_wr
,
&
mad_agent_priv
->
send_list
,
agent_list
)
{
agent_list
)
{
if
(
is_data_mad
(
mad_agent_priv
,
mad_send_wr
->
send_buf
.
mad
)
&&
if
(
is_rmpp_data_mad
(
mad_agent_priv
,
mad_send_wr
->
send_buf
.
mad
)
&&
&
mad_send_wr
->
send_buf
==
send_buf
)
&
mad_send_wr
->
send_buf
==
send_buf
)
return
mad_send_wr
;
return
mad_send_wr
;
}
}
...
@@ -2938,7 +2939,7 @@ static int ib_mad_port_open(struct ib_device *device,
...
@@ -2938,7 +2939,7 @@ static int ib_mad_port_open(struct ib_device *device,
init_mad_qp
(
port_priv
,
&
port_priv
->
qp_info
[
1
]);
init_mad_qp
(
port_priv
,
&
port_priv
->
qp_info
[
1
]);
cq_size
=
mad_sendq_size
+
mad_recvq_size
;
cq_size
=
mad_sendq_size
+
mad_recvq_size
;
has_smi
=
rdma_
port_get_link_layer
(
device
,
port_num
)
==
IB_LINK_LAYER_INFINIBAND
;
has_smi
=
rdma_
cap_ib_smi
(
device
,
port_num
)
;
if
(
has_smi
)
if
(
has_smi
)
cq_size
*=
2
;
cq_size
*=
2
;
...
@@ -3057,9 +3058,6 @@ static void ib_mad_init_device(struct ib_device *device)
...
@@ -3057,9 +3058,6 @@ static void ib_mad_init_device(struct ib_device *device)
{
{
int
start
,
end
,
i
;
int
start
,
end
,
i
;
if
(
rdma_node_get_transport
(
device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
return
;
if
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
{
if
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
{
start
=
0
;
start
=
0
;
end
=
0
;
end
=
0
;
...
@@ -3069,6 +3067,9 @@ static void ib_mad_init_device(struct ib_device *device)
...
@@ -3069,6 +3067,9 @@ static void ib_mad_init_device(struct ib_device *device)
}
}
for
(
i
=
start
;
i
<=
end
;
i
++
)
{
for
(
i
=
start
;
i
<=
end
;
i
++
)
{
if
(
!
rdma_cap_ib_mad
(
device
,
i
))
continue
;
if
(
ib_mad_port_open
(
device
,
i
))
{
if
(
ib_mad_port_open
(
device
,
i
))
{
dev_err
(
&
device
->
dev
,
"Couldn't open port %d
\n
"
,
i
);
dev_err
(
&
device
->
dev
,
"Couldn't open port %d
\n
"
,
i
);
goto
error
;
goto
error
;
...
@@ -3086,40 +3087,39 @@ static void ib_mad_init_device(struct ib_device *device)
...
@@ -3086,40 +3087,39 @@ static void ib_mad_init_device(struct ib_device *device)
dev_err
(
&
device
->
dev
,
"Couldn't close port %d
\n
"
,
i
);
dev_err
(
&
device
->
dev
,
"Couldn't close port %d
\n
"
,
i
);
error:
error:
i
--
;
while
(
--
i
>=
start
)
{
if
(
!
rdma_cap_ib_mad
(
device
,
i
))
continue
;
while
(
i
>=
start
)
{
if
(
ib_agent_port_close
(
device
,
i
))
if
(
ib_agent_port_close
(
device
,
i
))
dev_err
(
&
device
->
dev
,
dev_err
(
&
device
->
dev
,
"Couldn't close port %d for agents
\n
"
,
i
);
"Couldn't close port %d for agents
\n
"
,
i
);
if
(
ib_mad_port_close
(
device
,
i
))
if
(
ib_mad_port_close
(
device
,
i
))
dev_err
(
&
device
->
dev
,
"Couldn't close port %d
\n
"
,
i
);
dev_err
(
&
device
->
dev
,
"Couldn't close port %d
\n
"
,
i
);
i
--
;
}
}
}
}
static
void
ib_mad_remove_device
(
struct
ib_device
*
device
)
static
void
ib_mad_remove_device
(
struct
ib_device
*
device
)
{
{
int
i
,
num_ports
,
cur_port
;
int
start
,
end
,
i
;
if
(
rdma_node_get_transport
(
device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
return
;
if
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
{
if
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
{
num_ports
=
1
;
start
=
0
;
cur_port
=
0
;
end
=
0
;
}
else
{
}
else
{
num_ports
=
device
->
phys_port_cnt
;
start
=
1
;
cur_port
=
1
;
end
=
device
->
phys_port_cnt
;
}
}
for
(
i
=
0
;
i
<
num_ports
;
i
++
,
cur_port
++
)
{
if
(
ib_agent_port_close
(
device
,
cur_port
))
for
(
i
=
start
;
i
<=
end
;
i
++
)
{
if
(
!
rdma_cap_ib_mad
(
device
,
i
))
continue
;
if
(
ib_agent_port_close
(
device
,
i
))
dev_err
(
&
device
->
dev
,
dev_err
(
&
device
->
dev
,
"Couldn't close port %d for agents
\n
"
,
"Couldn't close port %d for agents
\n
"
,
i
);
cur_port
);
if
(
ib_mad_port_close
(
device
,
i
))
if
(
ib_mad_port_close
(
device
,
cur_port
))
dev_err
(
&
device
->
dev
,
"Couldn't close port %d
\n
"
,
i
);
dev_err
(
&
device
->
dev
,
"Couldn't close port %d
\n
"
,
cur_port
);
}
}
}
}
...
...
drivers/infiniband/core/mad_priv.h
浏览文件 @
175e8efe
...
@@ -213,8 +213,8 @@ struct ib_mad_port_private {
...
@@ -213,8 +213,8 @@ struct ib_mad_port_private {
int
ib_send_mad
(
struct
ib_mad_send_wr_private
*
mad_send_wr
);
int
ib_send_mad
(
struct
ib_mad_send_wr_private
*
mad_send_wr
);
struct
ib_mad_send_wr_private
*
struct
ib_mad_send_wr_private
*
ib_find_send_mad
(
struct
ib_mad_agent_private
*
mad_agent_priv
,
ib_find_send_mad
(
const
struct
ib_mad_agent_private
*
mad_agent_priv
,
struct
ib_mad_recv_wc
*
mad_recv_wc
);
const
struct
ib_mad_recv_wc
*
mad_recv_wc
);
void
ib_mad_complete_send_wr
(
struct
ib_mad_send_wr_private
*
mad_send_wr
,
void
ib_mad_complete_send_wr
(
struct
ib_mad_send_wr_private
*
mad_send_wr
,
struct
ib_mad_send_wc
*
mad_send_wc
);
struct
ib_mad_send_wc
*
mad_send_wc
);
...
...
drivers/infiniband/core/multicast.c
浏览文件 @
175e8efe
...
@@ -780,8 +780,7 @@ static void mcast_event_handler(struct ib_event_handler *handler,
...
@@ -780,8 +780,7 @@ static void mcast_event_handler(struct ib_event_handler *handler,
int
index
;
int
index
;
dev
=
container_of
(
handler
,
struct
mcast_device
,
event_handler
);
dev
=
container_of
(
handler
,
struct
mcast_device
,
event_handler
);
if
(
rdma_port_get_link_layer
(
dev
->
device
,
event
->
element
.
port_num
)
!=
if
(
WARN_ON
(
!
rdma_cap_ib_mcast
(
dev
->
device
,
event
->
element
.
port_num
)))
IB_LINK_LAYER_INFINIBAND
)
return
;
return
;
index
=
event
->
element
.
port_num
-
dev
->
start_port
;
index
=
event
->
element
.
port_num
-
dev
->
start_port
;
...
@@ -808,9 +807,6 @@ static void mcast_add_one(struct ib_device *device)
...
@@ -808,9 +807,6 @@ static void mcast_add_one(struct ib_device *device)
int
i
;
int
i
;
int
count
=
0
;
int
count
=
0
;
if
(
rdma_node_get_transport
(
device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
return
;
dev
=
kmalloc
(
sizeof
*
dev
+
device
->
phys_port_cnt
*
sizeof
*
port
,
dev
=
kmalloc
(
sizeof
*
dev
+
device
->
phys_port_cnt
*
sizeof
*
port
,
GFP_KERNEL
);
GFP_KERNEL
);
if
(
!
dev
)
if
(
!
dev
)
...
@@ -824,8 +820,7 @@ static void mcast_add_one(struct ib_device *device)
...
@@ -824,8 +820,7 @@ static void mcast_add_one(struct ib_device *device)
}
}
for
(
i
=
0
;
i
<=
dev
->
end_port
-
dev
->
start_port
;
i
++
)
{
for
(
i
=
0
;
i
<=
dev
->
end_port
-
dev
->
start_port
;
i
++
)
{
if
(
rdma_port_get_link_layer
(
device
,
dev
->
start_port
+
i
)
!=
if
(
!
rdma_cap_ib_mcast
(
device
,
dev
->
start_port
+
i
))
IB_LINK_LAYER_INFINIBAND
)
continue
;
continue
;
port
=
&
dev
->
port
[
i
];
port
=
&
dev
->
port
[
i
];
port
->
dev
=
dev
;
port
->
dev
=
dev
;
...
@@ -863,8 +858,7 @@ static void mcast_remove_one(struct ib_device *device)
...
@@ -863,8 +858,7 @@ static void mcast_remove_one(struct ib_device *device)
flush_workqueue
(
mcast_wq
);
flush_workqueue
(
mcast_wq
);
for
(
i
=
0
;
i
<=
dev
->
end_port
-
dev
->
start_port
;
i
++
)
{
for
(
i
=
0
;
i
<=
dev
->
end_port
-
dev
->
start_port
;
i
++
)
{
if
(
rdma_port_get_link_layer
(
device
,
dev
->
start_port
+
i
)
==
if
(
rdma_cap_ib_mcast
(
device
,
dev
->
start_port
+
i
))
{
IB_LINK_LAYER_INFINIBAND
)
{
port
=
&
dev
->
port
[
i
];
port
=
&
dev
->
port
[
i
];
deref_port
(
port
);
deref_port
(
port
);
wait_for_completion
(
&
port
->
comp
);
wait_for_completion
(
&
port
->
comp
);
...
...
drivers/infiniband/core/sa_query.c
浏览文件 @
175e8efe
...
@@ -450,7 +450,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
...
@@ -450,7 +450,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
struct
ib_sa_port
*
port
=
struct
ib_sa_port
*
port
=
&
sa_dev
->
port
[
event
->
element
.
port_num
-
sa_dev
->
start_port
];
&
sa_dev
->
port
[
event
->
element
.
port_num
-
sa_dev
->
start_port
];
if
(
rdma_port_get_link_layer
(
handler
->
device
,
port
->
port_num
)
!=
IB_LINK_LAYER_INFINIBAND
)
if
(
WARN_ON
(
!
rdma_cap_ib_sa
(
handler
->
device
,
port
->
port_num
))
)
return
;
return
;
spin_lock_irqsave
(
&
port
->
ah_lock
,
flags
);
spin_lock_irqsave
(
&
port
->
ah_lock
,
flags
);
...
@@ -540,7 +540,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
...
@@ -540,7 +540,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
ah_attr
->
port_num
=
port_num
;
ah_attr
->
port_num
=
port_num
;
ah_attr
->
static_rate
=
rec
->
rate
;
ah_attr
->
static_rate
=
rec
->
rate
;
force_grh
=
rdma_
port_get_link_layer
(
device
,
port_num
)
==
IB_LINK_LAYER_ETHERNET
;
force_grh
=
rdma_
cap_eth_ah
(
device
,
port_num
)
;
if
(
rec
->
hop_limit
>
1
||
force_grh
)
{
if
(
rec
->
hop_limit
>
1
||
force_grh
)
{
ah_attr
->
ah_flags
=
IB_AH_GRH
;
ah_attr
->
ah_flags
=
IB_AH_GRH
;
...
@@ -1153,9 +1153,7 @@ static void ib_sa_add_one(struct ib_device *device)
...
@@ -1153,9 +1153,7 @@ static void ib_sa_add_one(struct ib_device *device)
{
{
struct
ib_sa_device
*
sa_dev
;
struct
ib_sa_device
*
sa_dev
;
int
s
,
e
,
i
;
int
s
,
e
,
i
;
int
count
=
0
;
if
(
rdma_node_get_transport
(
device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
return
;
if
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
if
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
s
=
e
=
0
;
s
=
e
=
0
;
...
@@ -1175,7 +1173,7 @@ static void ib_sa_add_one(struct ib_device *device)
...
@@ -1175,7 +1173,7 @@ static void ib_sa_add_one(struct ib_device *device)
for
(
i
=
0
;
i
<=
e
-
s
;
++
i
)
{
for
(
i
=
0
;
i
<=
e
-
s
;
++
i
)
{
spin_lock_init
(
&
sa_dev
->
port
[
i
].
ah_lock
);
spin_lock_init
(
&
sa_dev
->
port
[
i
].
ah_lock
);
if
(
rdma_port_get_link_layer
(
device
,
i
+
1
)
!=
IB_LINK_LAYER_INFINIBAND
)
if
(
!
rdma_cap_ib_sa
(
device
,
i
+
1
)
)
continue
;
continue
;
sa_dev
->
port
[
i
].
sm_ah
=
NULL
;
sa_dev
->
port
[
i
].
sm_ah
=
NULL
;
...
@@ -1189,8 +1187,13 @@ static void ib_sa_add_one(struct ib_device *device)
...
@@ -1189,8 +1187,13 @@ static void ib_sa_add_one(struct ib_device *device)
goto
err
;
goto
err
;
INIT_WORK
(
&
sa_dev
->
port
[
i
].
update_task
,
update_sm_ah
);
INIT_WORK
(
&
sa_dev
->
port
[
i
].
update_task
,
update_sm_ah
);
count
++
;
}
}
if
(
!
count
)
goto
free
;
ib_set_client_data
(
device
,
&
sa_client
,
sa_dev
);
ib_set_client_data
(
device
,
&
sa_client
,
sa_dev
);
/*
/*
...
@@ -1204,19 +1207,20 @@ static void ib_sa_add_one(struct ib_device *device)
...
@@ -1204,19 +1207,20 @@ static void ib_sa_add_one(struct ib_device *device)
if
(
ib_register_event_handler
(
&
sa_dev
->
event_handler
))
if
(
ib_register_event_handler
(
&
sa_dev
->
event_handler
))
goto
err
;
goto
err
;
for
(
i
=
0
;
i
<=
e
-
s
;
++
i
)
for
(
i
=
0
;
i
<=
e
-
s
;
++
i
)
{
if
(
rdma_
port_get_link_layer
(
device
,
i
+
1
)
==
IB_LINK_LAYER_INFINIBAND
)
if
(
rdma_
cap_ib_sa
(
device
,
i
+
1
)
)
update_sm_ah
(
&
sa_dev
->
port
[
i
].
update_task
);
update_sm_ah
(
&
sa_dev
->
port
[
i
].
update_task
);
}
return
;
return
;
err:
err:
while
(
--
i
>=
0
)
while
(
--
i
>=
0
)
{
if
(
rdma_
port_get_link_layer
(
device
,
i
+
1
)
==
IB_LINK_LAYER_INFINIBAND
)
if
(
rdma_
cap_ib_sa
(
device
,
i
+
1
)
)
ib_unregister_mad_agent
(
sa_dev
->
port
[
i
].
agent
);
ib_unregister_mad_agent
(
sa_dev
->
port
[
i
].
agent
);
}
free:
kfree
(
sa_dev
);
kfree
(
sa_dev
);
return
;
return
;
}
}
...
@@ -1233,7 +1237,7 @@ static void ib_sa_remove_one(struct ib_device *device)
...
@@ -1233,7 +1237,7 @@ static void ib_sa_remove_one(struct ib_device *device)
flush_workqueue
(
ib_wq
);
flush_workqueue
(
ib_wq
);
for
(
i
=
0
;
i
<=
sa_dev
->
end_port
-
sa_dev
->
start_port
;
++
i
)
{
for
(
i
=
0
;
i
<=
sa_dev
->
end_port
-
sa_dev
->
start_port
;
++
i
)
{
if
(
rdma_
port_get_link_layer
(
device
,
i
+
1
)
==
IB_LINK_LAYER_INFINIBAND
)
{
if
(
rdma_
cap_ib_sa
(
device
,
i
+
1
)
)
{
ib_unregister_mad_agent
(
sa_dev
->
port
[
i
].
agent
);
ib_unregister_mad_agent
(
sa_dev
->
port
[
i
].
agent
);
if
(
sa_dev
->
port
[
i
].
sm_ah
)
if
(
sa_dev
->
port
[
i
].
sm_ah
)
kref_put
(
&
sa_dev
->
port
[
i
].
sm_ah
->
ref
,
free_sm_ah
);
kref_put
(
&
sa_dev
->
port
[
i
].
sm_ah
->
ref
,
free_sm_ah
);
...
...
drivers/infiniband/core/smi.c
浏览文件 @
175e8efe
...
@@ -41,7 +41,7 @@
...
@@ -41,7 +41,7 @@
/*
/*
* Fixup a directed route SMP for sending
* Fixup a directed route SMP for sending
* Return
0
if the SMP should be discarded
* Return
IB_SMI_DISCARD
if the SMP should be discarded
*/
*/
enum
smi_action
smi_handle_dr_smp_send
(
struct
ib_smp
*
smp
,
enum
smi_action
smi_handle_dr_smp_send
(
struct
ib_smp
*
smp
,
u8
node_type
,
int
port_num
)
u8
node_type
,
int
port_num
)
...
@@ -126,7 +126,7 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
...
@@ -126,7 +126,7 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
/*
/*
* Adjust information for a received SMP
* Adjust information for a received SMP
* Return
0
if the SMP should be dropped
* Return
IB_SMI_DISCARD
if the SMP should be dropped
*/
*/
enum
smi_action
smi_handle_dr_smp_recv
(
struct
ib_smp
*
smp
,
u8
node_type
,
enum
smi_action
smi_handle_dr_smp_recv
(
struct
ib_smp
*
smp
,
u8
node_type
,
int
port_num
,
int
phys_port_cnt
)
int
port_num
,
int
phys_port_cnt
)
...
...
drivers/infiniband/core/sysfs.c
浏览文件 @
175e8efe
...
@@ -456,6 +456,7 @@ static void ib_device_release(struct device *device)
...
@@ -456,6 +456,7 @@ static void ib_device_release(struct device *device)
{
{
struct
ib_device
*
dev
=
container_of
(
device
,
struct
ib_device
,
dev
);
struct
ib_device
*
dev
=
container_of
(
device
,
struct
ib_device
,
dev
);
kfree
(
dev
->
port_immutable
);
kfree
(
dev
);
kfree
(
dev
);
}
}
...
...
drivers/infiniband/core/ucm.c
浏览文件 @
175e8efe
...
@@ -1253,8 +1253,7 @@ static void ib_ucm_add_one(struct ib_device *device)
...
@@ -1253,8 +1253,7 @@ static void ib_ucm_add_one(struct ib_device *device)
dev_t
base
;
dev_t
base
;
struct
ib_ucm_device
*
ucm_dev
;
struct
ib_ucm_device
*
ucm_dev
;
if
(
!
device
->
alloc_ucontext
||
if
(
!
device
->
alloc_ucontext
||
!
rdma_cap_ib_cm
(
device
,
1
))
rdma_node_get_transport
(
device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
return
;
return
;
ucm_dev
=
kzalloc
(
sizeof
*
ucm_dev
,
GFP_KERNEL
);
ucm_dev
=
kzalloc
(
sizeof
*
ucm_dev
,
GFP_KERNEL
);
...
...
drivers/infiniband/core/ucma.c
浏览文件 @
175e8efe
...
@@ -722,26 +722,13 @@ static ssize_t ucma_query_route(struct ucma_file *file,
...
@@ -722,26 +722,13 @@ static ssize_t ucma_query_route(struct ucma_file *file,
resp
.
node_guid
=
(
__force
__u64
)
ctx
->
cm_id
->
device
->
node_guid
;
resp
.
node_guid
=
(
__force
__u64
)
ctx
->
cm_id
->
device
->
node_guid
;
resp
.
port_num
=
ctx
->
cm_id
->
port_num
;
resp
.
port_num
=
ctx
->
cm_id
->
port_num
;
switch
(
rdma_node_get_transport
(
ctx
->
cm_id
->
device
->
node_type
))
{
case
RDMA_TRANSPORT_IB
:
if
(
rdma_cap_ib_sa
(
ctx
->
cm_id
->
device
,
ctx
->
cm_id
->
port_num
))
switch
(
rdma_port_get_link_layer
(
ctx
->
cm_id
->
device
,
ucma_copy_ib_route
(
&
resp
,
&
ctx
->
cm_id
->
route
);
ctx
->
cm_id
->
port_num
))
{
else
if
(
rdma_protocol_roce
(
ctx
->
cm_id
->
device
,
ctx
->
cm_id
->
port_num
))
case
IB_LINK_LAYER_INFINIBAND
:
ucma_copy_iboe_route
(
&
resp
,
&
ctx
->
cm_id
->
route
);
ucma_copy_ib_route
(
&
resp
,
&
ctx
->
cm_id
->
route
);
else
if
(
rdma_protocol_iwarp
(
ctx
->
cm_id
->
device
,
ctx
->
cm_id
->
port_num
))
break
;
case
IB_LINK_LAYER_ETHERNET
:
ucma_copy_iboe_route
(
&
resp
,
&
ctx
->
cm_id
->
route
);
break
;
default:
break
;
}
break
;
case
RDMA_TRANSPORT_IWARP
:
ucma_copy_iw_route
(
&
resp
,
&
ctx
->
cm_id
->
route
);
ucma_copy_iw_route
(
&
resp
,
&
ctx
->
cm_id
->
route
);
break
;
default:
break
;
}
out:
out:
if
(
copy_to_user
((
void
__user
*
)(
unsigned
long
)
cmd
.
response
,
if
(
copy_to_user
((
void
__user
*
)(
unsigned
long
)
cmd
.
response
,
...
...
drivers/infiniband/core/user_mad.c
浏览文件 @
175e8efe
...
@@ -99,7 +99,6 @@ struct ib_umad_port {
...
@@ -99,7 +99,6 @@ struct ib_umad_port {
};
};
struct
ib_umad_device
{
struct
ib_umad_device
{
int
start_port
,
end_port
;
struct
kobject
kobj
;
struct
kobject
kobj
;
struct
ib_umad_port
port
[
0
];
struct
ib_umad_port
port
[
0
];
};
};
...
@@ -426,11 +425,11 @@ static int is_duplicate(struct ib_umad_file *file,
...
@@ -426,11 +425,11 @@ static int is_duplicate(struct ib_umad_file *file,
* the same TID, reject the second as a duplicate. This is more
* the same TID, reject the second as a duplicate. This is more
* restrictive than required by the spec.
* restrictive than required by the spec.
*/
*/
if
(
!
ib_response_mad
(
(
struct
ib_mad
*
)
hdr
))
{
if
(
!
ib_response_mad
(
hdr
))
{
if
(
!
ib_response_mad
(
(
struct
ib_mad
*
)
sent_hdr
))
if
(
!
ib_response_mad
(
sent_hdr
))
return
1
;
return
1
;
continue
;
continue
;
}
else
if
(
!
ib_response_mad
(
(
struct
ib_mad
*
)
sent_hdr
))
}
else
if
(
!
ib_response_mad
(
sent_hdr
))
continue
;
continue
;
if
(
same_destination
(
&
packet
->
mad
.
hdr
,
&
sent_packet
->
mad
.
hdr
))
if
(
same_destination
(
&
packet
->
mad
.
hdr
,
&
sent_packet
->
mad
.
hdr
))
...
@@ -1273,16 +1272,10 @@ static void ib_umad_add_one(struct ib_device *device)
...
@@ -1273,16 +1272,10 @@ static void ib_umad_add_one(struct ib_device *device)
{
{
struct
ib_umad_device
*
umad_dev
;
struct
ib_umad_device
*
umad_dev
;
int
s
,
e
,
i
;
int
s
,
e
,
i
;
int
count
=
0
;
if
(
rdma_node_get_transport
(
device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
s
=
rdma_start_port
(
device
);
return
;
e
=
rdma_end_port
(
device
);
if
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
s
=
e
=
0
;
else
{
s
=
1
;
e
=
device
->
phys_port_cnt
;
}
umad_dev
=
kzalloc
(
sizeof
*
umad_dev
+
umad_dev
=
kzalloc
(
sizeof
*
umad_dev
+
(
e
-
s
+
1
)
*
sizeof
(
struct
ib_umad_port
),
(
e
-
s
+
1
)
*
sizeof
(
struct
ib_umad_port
),
...
@@ -1292,25 +1285,34 @@ static void ib_umad_add_one(struct ib_device *device)
...
@@ -1292,25 +1285,34 @@ static void ib_umad_add_one(struct ib_device *device)
kobject_init
(
&
umad_dev
->
kobj
,
&
ib_umad_dev_ktype
);
kobject_init
(
&
umad_dev
->
kobj
,
&
ib_umad_dev_ktype
);
umad_dev
->
start_port
=
s
;
umad_dev
->
end_port
=
e
;
for
(
i
=
s
;
i
<=
e
;
++
i
)
{
for
(
i
=
s
;
i
<=
e
;
++
i
)
{
if
(
!
rdma_cap_ib_mad
(
device
,
i
))
continue
;
umad_dev
->
port
[
i
-
s
].
umad_dev
=
umad_dev
;
umad_dev
->
port
[
i
-
s
].
umad_dev
=
umad_dev
;
if
(
ib_umad_init_port
(
device
,
i
,
umad_dev
,
if
(
ib_umad_init_port
(
device
,
i
,
umad_dev
,
&
umad_dev
->
port
[
i
-
s
]))
&
umad_dev
->
port
[
i
-
s
]))
goto
err
;
goto
err
;
count
++
;
}
}
if
(
!
count
)
goto
free
;
ib_set_client_data
(
device
,
&
umad_client
,
umad_dev
);
ib_set_client_data
(
device
,
&
umad_client
,
umad_dev
);
return
;
return
;
err:
err:
while
(
--
i
>=
s
)
while
(
--
i
>=
s
)
{
ib_umad_kill_port
(
&
umad_dev
->
port
[
i
-
s
]);
if
(
!
rdma_cap_ib_mad
(
device
,
i
))
continue
;
ib_umad_kill_port
(
&
umad_dev
->
port
[
i
-
s
]);
}
free:
kobject_put
(
&
umad_dev
->
kobj
);
kobject_put
(
&
umad_dev
->
kobj
);
}
}
...
@@ -1322,8 +1324,10 @@ static void ib_umad_remove_one(struct ib_device *device)
...
@@ -1322,8 +1324,10 @@ static void ib_umad_remove_one(struct ib_device *device)
if
(
!
umad_dev
)
if
(
!
umad_dev
)
return
;
return
;
for
(
i
=
0
;
i
<=
umad_dev
->
end_port
-
umad_dev
->
start_port
;
++
i
)
for
(
i
=
0
;
i
<=
rdma_end_port
(
device
)
-
rdma_start_port
(
device
);
++
i
)
{
ib_umad_kill_port
(
&
umad_dev
->
port
[
i
]);
if
(
rdma_cap_ib_mad
(
device
,
i
+
rdma_start_port
(
device
)))
ib_umad_kill_port
(
&
umad_dev
->
port
[
i
]);
}
kobject_put
(
&
umad_dev
->
kobj
);
kobject_put
(
&
umad_dev
->
kobj
);
}
}
...
...
drivers/infiniband/core/verbs.c
浏览文件 @
175e8efe
...
@@ -48,6 +48,71 @@
...
@@ -48,6 +48,71 @@
#include "core_priv.h"
#include "core_priv.h"
static
const
char
*
const
ib_events
[]
=
{
[
IB_EVENT_CQ_ERR
]
=
"CQ error"
,
[
IB_EVENT_QP_FATAL
]
=
"QP fatal error"
,
[
IB_EVENT_QP_REQ_ERR
]
=
"QP request error"
,
[
IB_EVENT_QP_ACCESS_ERR
]
=
"QP access error"
,
[
IB_EVENT_COMM_EST
]
=
"communication established"
,
[
IB_EVENT_SQ_DRAINED
]
=
"send queue drained"
,
[
IB_EVENT_PATH_MIG
]
=
"path migration successful"
,
[
IB_EVENT_PATH_MIG_ERR
]
=
"path migration error"
,
[
IB_EVENT_DEVICE_FATAL
]
=
"device fatal error"
,
[
IB_EVENT_PORT_ACTIVE
]
=
"port active"
,
[
IB_EVENT_PORT_ERR
]
=
"port error"
,
[
IB_EVENT_LID_CHANGE
]
=
"LID change"
,
[
IB_EVENT_PKEY_CHANGE
]
=
"P_key change"
,
[
IB_EVENT_SM_CHANGE
]
=
"SM change"
,
[
IB_EVENT_SRQ_ERR
]
=
"SRQ error"
,
[
IB_EVENT_SRQ_LIMIT_REACHED
]
=
"SRQ limit reached"
,
[
IB_EVENT_QP_LAST_WQE_REACHED
]
=
"last WQE reached"
,
[
IB_EVENT_CLIENT_REREGISTER
]
=
"client reregister"
,
[
IB_EVENT_GID_CHANGE
]
=
"GID changed"
,
};
const
char
*
ib_event_msg
(
enum
ib_event_type
event
)
{
size_t
index
=
event
;
return
(
index
<
ARRAY_SIZE
(
ib_events
)
&&
ib_events
[
index
])
?
ib_events
[
index
]
:
"unrecognized event"
;
}
EXPORT_SYMBOL
(
ib_event_msg
);
static
const
char
*
const
wc_statuses
[]
=
{
[
IB_WC_SUCCESS
]
=
"success"
,
[
IB_WC_LOC_LEN_ERR
]
=
"local length error"
,
[
IB_WC_LOC_QP_OP_ERR
]
=
"local QP operation error"
,
[
IB_WC_LOC_EEC_OP_ERR
]
=
"local EE context operation error"
,
[
IB_WC_LOC_PROT_ERR
]
=
"local protection error"
,
[
IB_WC_WR_FLUSH_ERR
]
=
"WR flushed"
,
[
IB_WC_MW_BIND_ERR
]
=
"memory management operation error"
,
[
IB_WC_BAD_RESP_ERR
]
=
"bad response error"
,
[
IB_WC_LOC_ACCESS_ERR
]
=
"local access error"
,
[
IB_WC_REM_INV_REQ_ERR
]
=
"invalid request error"
,
[
IB_WC_REM_ACCESS_ERR
]
=
"remote access error"
,
[
IB_WC_REM_OP_ERR
]
=
"remote operation error"
,
[
IB_WC_RETRY_EXC_ERR
]
=
"transport retry counter exceeded"
,
[
IB_WC_RNR_RETRY_EXC_ERR
]
=
"RNR retry counter exceeded"
,
[
IB_WC_LOC_RDD_VIOL_ERR
]
=
"local RDD violation error"
,
[
IB_WC_REM_INV_RD_REQ_ERR
]
=
"remote invalid RD request"
,
[
IB_WC_REM_ABORT_ERR
]
=
"operation aborted"
,
[
IB_WC_INV_EECN_ERR
]
=
"invalid EE context number"
,
[
IB_WC_INV_EEC_STATE_ERR
]
=
"invalid EE context state"
,
[
IB_WC_FATAL_ERR
]
=
"fatal error"
,
[
IB_WC_RESP_TIMEOUT_ERR
]
=
"response timeout error"
,
[
IB_WC_GENERAL_ERR
]
=
"general error"
,
};
const
char
*
ib_wc_status_msg
(
enum
ib_wc_status
status
)
{
size_t
index
=
status
;
return
(
index
<
ARRAY_SIZE
(
wc_statuses
)
&&
wc_statuses
[
index
])
?
wc_statuses
[
index
]
:
"unrecognized status"
;
}
EXPORT_SYMBOL
(
ib_wc_status_msg
);
__attribute_const__
int
ib_rate_to_mult
(
enum
ib_rate
rate
)
__attribute_const__
int
ib_rate_to_mult
(
enum
ib_rate
rate
)
{
{
switch
(
rate
)
{
switch
(
rate
)
{
...
@@ -198,11 +263,9 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
...
@@ -198,11 +263,9 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
u32
flow_class
;
u32
flow_class
;
u16
gid_index
;
u16
gid_index
;
int
ret
;
int
ret
;
int
is_eth
=
(
rdma_port_get_link_layer
(
device
,
port_num
)
==
IB_LINK_LAYER_ETHERNET
);
memset
(
ah_attr
,
0
,
sizeof
*
ah_attr
);
memset
(
ah_attr
,
0
,
sizeof
*
ah_attr
);
if
(
is_eth
)
{
if
(
rdma_cap_eth_ah
(
device
,
port_num
)
)
{
if
(
!
(
wc
->
wc_flags
&
IB_WC_GRH
))
if
(
!
(
wc
->
wc_flags
&
IB_WC_GRH
))
return
-
EPROTOTYPE
;
return
-
EPROTOTYPE
;
...
@@ -871,7 +934,7 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
...
@@ -871,7 +934,7 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
union
ib_gid
sgid
;
union
ib_gid
sgid
;
if
((
*
qp_attr_mask
&
IB_QP_AV
)
&&
if
((
*
qp_attr_mask
&
IB_QP_AV
)
&&
(
rdma_
port_get_link_layer
(
qp
->
device
,
qp_attr
->
ah_attr
.
port_num
)
==
IB_LINK_LAYER_ETHERNET
))
{
(
rdma_
cap_eth_ah
(
qp
->
device
,
qp_attr
->
ah_attr
.
port_num
)
))
{
ret
=
ib_query_gid
(
qp
->
device
,
qp_attr
->
ah_attr
.
port_num
,
ret
=
ib_query_gid
(
qp
->
device
,
qp_attr
->
ah_attr
.
port_num
,
qp_attr
->
ah_attr
.
grh
.
sgid_index
,
&
sgid
);
qp_attr
->
ah_attr
.
grh
.
sgid_index
,
&
sgid
);
if
(
ret
)
if
(
ret
)
...
...
drivers/infiniband/hw/amso1100/c2_provider.c
浏览文件 @
175e8efe
...
@@ -757,6 +757,23 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
...
@@ -757,6 +757,23 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
return
netdev
;
return
netdev
;
}
}
static
int
c2_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
c2_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IWARP
;
return
0
;
}
int
c2_register_device
(
struct
c2_dev
*
dev
)
int
c2_register_device
(
struct
c2_dev
*
dev
)
{
{
int
ret
=
-
ENOMEM
;
int
ret
=
-
ENOMEM
;
...
@@ -820,6 +837,7 @@ int c2_register_device(struct c2_dev *dev)
...
@@ -820,6 +837,7 @@ int c2_register_device(struct c2_dev *dev)
dev
->
ibdev
.
reg_phys_mr
=
c2_reg_phys_mr
;
dev
->
ibdev
.
reg_phys_mr
=
c2_reg_phys_mr
;
dev
->
ibdev
.
reg_user_mr
=
c2_reg_user_mr
;
dev
->
ibdev
.
reg_user_mr
=
c2_reg_user_mr
;
dev
->
ibdev
.
dereg_mr
=
c2_dereg_mr
;
dev
->
ibdev
.
dereg_mr
=
c2_dereg_mr
;
dev
->
ibdev
.
get_port_immutable
=
c2_port_immutable
;
dev
->
ibdev
.
alloc_fmr
=
NULL
;
dev
->
ibdev
.
alloc_fmr
=
NULL
;
dev
->
ibdev
.
unmap_fmr
=
NULL
;
dev
->
ibdev
.
unmap_fmr
=
NULL
;
...
...
drivers/infiniband/hw/cxgb3/iwch_provider.c
浏览文件 @
175e8efe
...
@@ -1343,6 +1343,23 @@ static struct device_attribute *iwch_class_attributes[] = {
...
@@ -1343,6 +1343,23 @@ static struct device_attribute *iwch_class_attributes[] = {
&
dev_attr_board_id
,
&
dev_attr_board_id
,
};
};
static
int
iwch_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
iwch_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IWARP
;
return
0
;
}
int
iwch_register_device
(
struct
iwch_dev
*
dev
)
int
iwch_register_device
(
struct
iwch_dev
*
dev
)
{
{
int
ret
;
int
ret
;
...
@@ -1420,6 +1437,7 @@ int iwch_register_device(struct iwch_dev *dev)
...
@@ -1420,6 +1437,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev
->
ibdev
.
post_recv
=
iwch_post_receive
;
dev
->
ibdev
.
post_recv
=
iwch_post_receive
;
dev
->
ibdev
.
get_protocol_stats
=
iwch_get_mib
;
dev
->
ibdev
.
get_protocol_stats
=
iwch_get_mib
;
dev
->
ibdev
.
uverbs_abi_ver
=
IWCH_UVERBS_ABI_VERSION
;
dev
->
ibdev
.
uverbs_abi_ver
=
IWCH_UVERBS_ABI_VERSION
;
dev
->
ibdev
.
get_port_immutable
=
iwch_port_immutable
;
dev
->
ibdev
.
iwcm
=
kmalloc
(
sizeof
(
struct
iw_cm_verbs
),
GFP_KERNEL
);
dev
->
ibdev
.
iwcm
=
kmalloc
(
sizeof
(
struct
iw_cm_verbs
),
GFP_KERNEL
);
if
(
!
dev
->
ibdev
.
iwcm
)
if
(
!
dev
->
ibdev
.
iwcm
)
...
...
drivers/infiniband/hw/cxgb4/provider.c
浏览文件 @
175e8efe
...
@@ -465,6 +465,23 @@ static struct device_attribute *c4iw_class_attributes[] = {
...
@@ -465,6 +465,23 @@ static struct device_attribute *c4iw_class_attributes[] = {
&
dev_attr_board_id
,
&
dev_attr_board_id
,
};
};
static
int
c4iw_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
c4iw_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IWARP
;
return
0
;
}
int
c4iw_register_device
(
struct
c4iw_dev
*
dev
)
int
c4iw_register_device
(
struct
c4iw_dev
*
dev
)
{
{
int
ret
;
int
ret
;
...
@@ -542,6 +559,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
...
@@ -542,6 +559,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev
->
ibdev
.
post_recv
=
c4iw_post_receive
;
dev
->
ibdev
.
post_recv
=
c4iw_post_receive
;
dev
->
ibdev
.
get_protocol_stats
=
c4iw_get_mib
;
dev
->
ibdev
.
get_protocol_stats
=
c4iw_get_mib
;
dev
->
ibdev
.
uverbs_abi_ver
=
C4IW_UVERBS_ABI_VERSION
;
dev
->
ibdev
.
uverbs_abi_ver
=
C4IW_UVERBS_ABI_VERSION
;
dev
->
ibdev
.
get_port_immutable
=
c4iw_port_immutable
;
dev
->
ibdev
.
iwcm
=
kmalloc
(
sizeof
(
struct
iw_cm_verbs
),
GFP_KERNEL
);
dev
->
ibdev
.
iwcm
=
kmalloc
(
sizeof
(
struct
iw_cm_verbs
),
GFP_KERNEL
);
if
(
!
dev
->
ibdev
.
iwcm
)
if
(
!
dev
->
ibdev
.
iwcm
)
...
...
drivers/infiniband/hw/ehca/ehca_iverbs.h
浏览文件 @
175e8efe
...
@@ -49,6 +49,9 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props);
...
@@ -49,6 +49,9 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props);
int
ehca_query_port
(
struct
ib_device
*
ibdev
,
u8
port
,
int
ehca_query_port
(
struct
ib_device
*
ibdev
,
u8
port
,
struct
ib_port_attr
*
props
);
struct
ib_port_attr
*
props
);
enum
rdma_protocol_type
ehca_query_protocol
(
struct
ib_device
*
device
,
u8
port_num
);
int
ehca_query_sma_attr
(
struct
ehca_shca
*
shca
,
u8
port
,
int
ehca_query_sma_attr
(
struct
ehca_shca
*
shca
,
u8
port
,
struct
ehca_sma_attr
*
attr
);
struct
ehca_sma_attr
*
attr
);
...
...
drivers/infiniband/hw/ehca/ehca_main.c
浏览文件 @
175e8efe
...
@@ -431,6 +431,23 @@ static int init_node_guid(struct ehca_shca *shca)
...
@@ -431,6 +431,23 @@ static int init_node_guid(struct ehca_shca *shca)
return
ret
;
return
ret
;
}
}
static
int
ehca_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
ehca_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IBA_IB
;
return
0
;
}
static
int
ehca_init_device
(
struct
ehca_shca
*
shca
)
static
int
ehca_init_device
(
struct
ehca_shca
*
shca
)
{
{
int
ret
;
int
ret
;
...
@@ -510,6 +527,7 @@ static int ehca_init_device(struct ehca_shca *shca)
...
@@ -510,6 +527,7 @@ static int ehca_init_device(struct ehca_shca *shca)
shca
->
ib_device
.
process_mad
=
ehca_process_mad
;
shca
->
ib_device
.
process_mad
=
ehca_process_mad
;
shca
->
ib_device
.
mmap
=
ehca_mmap
;
shca
->
ib_device
.
mmap
=
ehca_mmap
;
shca
->
ib_device
.
dma_ops
=
&
ehca_dma_mapping_ops
;
shca
->
ib_device
.
dma_ops
=
&
ehca_dma_mapping_ops
;
shca
->
ib_device
.
get_port_immutable
=
ehca_port_immutable
;
if
(
EHCA_BMASK_GET
(
HCA_CAP_SRQ
,
shca
->
hca_cap
))
{
if
(
EHCA_BMASK_GET
(
HCA_CAP_SRQ
,
shca
->
hca_cap
))
{
shca
->
ib_device
.
uverbs_cmd_mask
|=
shca
->
ib_device
.
uverbs_cmd_mask
|=
...
...
drivers/infiniband/hw/ipath/ipath_verbs.c
浏览文件 @
175e8efe
...
@@ -1980,6 +1980,23 @@ static int disable_timer(struct ipath_devdata *dd)
...
@@ -1980,6 +1980,23 @@ static int disable_timer(struct ipath_devdata *dd)
return
0
;
return
0
;
}
}
static
int
ipath_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
ipath_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IBA_IB
;
return
0
;
}
/**
/**
* ipath_register_ib_device - register our device with the infiniband core
* ipath_register_ib_device - register our device with the infiniband core
* @dd: the device data structure
* @dd: the device data structure
...
@@ -2179,6 +2196,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
...
@@ -2179,6 +2196,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
dev
->
process_mad
=
ipath_process_mad
;
dev
->
process_mad
=
ipath_process_mad
;
dev
->
mmap
=
ipath_mmap
;
dev
->
mmap
=
ipath_mmap
;
dev
->
dma_ops
=
&
ipath_dma_mapping_ops
;
dev
->
dma_ops
=
&
ipath_dma_mapping_ops
;
dev
->
get_port_immutable
=
ipath_port_immutable
;
snprintf
(
dev
->
node_desc
,
sizeof
(
dev
->
node_desc
),
snprintf
(
dev
->
node_desc
,
sizeof
(
dev
->
node_desc
),
IPATH_IDSTR
" %s"
,
init_utsname
()
->
nodename
);
IPATH_IDSTR
" %s"
,
init_utsname
()
->
nodename
);
...
...
drivers/infiniband/hw/mlx4/main.c
浏览文件 @
175e8efe
...
@@ -2114,6 +2114,27 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
...
@@ -2114,6 +2114,27 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
kfree
(
ibdev
->
eq_table
);
kfree
(
ibdev
->
eq_table
);
}
}
static
int
mlx4_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
mlx4_ib_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
if
(
mlx4_ib_port_link_layer
(
ibdev
,
port_num
)
==
IB_LINK_LAYER_INFINIBAND
)
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IBA_IB
;
else
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IBA_ROCE
;
return
0
;
}
static
void
*
mlx4_ib_add
(
struct
mlx4_dev
*
dev
)
static
void
*
mlx4_ib_add
(
struct
mlx4_dev
*
dev
)
{
{
struct
mlx4_ib_dev
*
ibdev
;
struct
mlx4_ib_dev
*
ibdev
;
...
@@ -2241,6 +2262,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
...
@@ -2241,6 +2262,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev
->
ib_dev
.
attach_mcast
=
mlx4_ib_mcg_attach
;
ibdev
->
ib_dev
.
attach_mcast
=
mlx4_ib_mcg_attach
;
ibdev
->
ib_dev
.
detach_mcast
=
mlx4_ib_mcg_detach
;
ibdev
->
ib_dev
.
detach_mcast
=
mlx4_ib_mcg_detach
;
ibdev
->
ib_dev
.
process_mad
=
mlx4_ib_process_mad
;
ibdev
->
ib_dev
.
process_mad
=
mlx4_ib_process_mad
;
ibdev
->
ib_dev
.
get_port_immutable
=
mlx4_port_immutable
;
if
(
!
mlx4_is_slave
(
ibdev
->
dev
))
{
if
(
!
mlx4_is_slave
(
ibdev
->
dev
))
{
ibdev
->
ib_dev
.
alloc_fmr
=
mlx4_ib_fmr_alloc
;
ibdev
->
ib_dev
.
alloc_fmr
=
mlx4_ib_fmr_alloc
;
...
...
drivers/infiniband/hw/mlx5/main.c
浏览文件 @
175e8efe
...
@@ -1182,6 +1182,23 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
...
@@ -1182,6 +1182,23 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
mlx5_ib_dealloc_pd
(
devr
->
p0
);
mlx5_ib_dealloc_pd
(
devr
->
p0
);
}
}
static
int
mlx5_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
mlx5_ib_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IBA_IB
;
return
0
;
}
static
void
*
mlx5_ib_add
(
struct
mlx5_core_dev
*
mdev
)
static
void
*
mlx5_ib_add
(
struct
mlx5_core_dev
*
mdev
)
{
{
struct
mlx5_ib_dev
*
dev
;
struct
mlx5_ib_dev
*
dev
;
...
@@ -1285,6 +1302,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
...
@@ -1285,6 +1302,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev
->
ib_dev
.
alloc_fast_reg_page_list
=
mlx5_ib_alloc_fast_reg_page_list
;
dev
->
ib_dev
.
alloc_fast_reg_page_list
=
mlx5_ib_alloc_fast_reg_page_list
;
dev
->
ib_dev
.
free_fast_reg_page_list
=
mlx5_ib_free_fast_reg_page_list
;
dev
->
ib_dev
.
free_fast_reg_page_list
=
mlx5_ib_free_fast_reg_page_list
;
dev
->
ib_dev
.
check_mr_status
=
mlx5_ib_check_mr_status
;
dev
->
ib_dev
.
check_mr_status
=
mlx5_ib_check_mr_status
;
dev
->
ib_dev
.
get_port_immutable
=
mlx5_port_immutable
;
mlx5_ib_internal_query_odp_caps
(
dev
);
mlx5_ib_internal_query_odp_caps
(
dev
);
...
...
drivers/infiniband/hw/mthca/mthca_provider.c
浏览文件 @
175e8efe
...
@@ -1244,6 +1244,23 @@ static int mthca_init_node_data(struct mthca_dev *dev)
...
@@ -1244,6 +1244,23 @@ static int mthca_init_node_data(struct mthca_dev *dev)
return
err
;
return
err
;
}
}
static
int
mthca_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
mthca_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IBA_IB
;
return
0
;
}
int
mthca_register_device
(
struct
mthca_dev
*
dev
)
int
mthca_register_device
(
struct
mthca_dev
*
dev
)
{
{
int
ret
;
int
ret
;
...
@@ -1323,6 +1340,7 @@ int mthca_register_device(struct mthca_dev *dev)
...
@@ -1323,6 +1340,7 @@ int mthca_register_device(struct mthca_dev *dev)
dev
->
ib_dev
.
reg_phys_mr
=
mthca_reg_phys_mr
;
dev
->
ib_dev
.
reg_phys_mr
=
mthca_reg_phys_mr
;
dev
->
ib_dev
.
reg_user_mr
=
mthca_reg_user_mr
;
dev
->
ib_dev
.
reg_user_mr
=
mthca_reg_user_mr
;
dev
->
ib_dev
.
dereg_mr
=
mthca_dereg_mr
;
dev
->
ib_dev
.
dereg_mr
=
mthca_dereg_mr
;
dev
->
ib_dev
.
get_port_immutable
=
mthca_port_immutable
;
if
(
dev
->
mthca_flags
&
MTHCA_FLAG_FMR
)
{
if
(
dev
->
mthca_flags
&
MTHCA_FLAG_FMR
)
{
dev
->
ib_dev
.
alloc_fmr
=
mthca_alloc_fmr
;
dev
->
ib_dev
.
alloc_fmr
=
mthca_alloc_fmr
;
...
...
drivers/infiniband/hw/nes/nes_verbs.c
浏览文件 @
175e8efe
...
@@ -606,7 +606,6 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
...
@@ -606,7 +606,6 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
return
0
;
return
0
;
}
}
/**
/**
* nes_query_pkey
* nes_query_pkey
*/
*/
...
@@ -3828,6 +3827,22 @@ static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_
...
@@ -3828,6 +3827,22 @@ static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_
return
0
;
return
0
;
}
}
static
int
nes_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
nes_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IWARP
;
return
0
;
}
/**
/**
* nes_init_ofa_device
* nes_init_ofa_device
...
@@ -3928,6 +3943,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
...
@@ -3928,6 +3943,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev
->
ibdev
.
iwcm
->
reject
=
nes_reject
;
nesibdev
->
ibdev
.
iwcm
->
reject
=
nes_reject
;
nesibdev
->
ibdev
.
iwcm
->
create_listen
=
nes_create_listen
;
nesibdev
->
ibdev
.
iwcm
->
create_listen
=
nes_create_listen
;
nesibdev
->
ibdev
.
iwcm
->
destroy_listen
=
nes_destroy_listen
;
nesibdev
->
ibdev
.
iwcm
->
destroy_listen
=
nes_destroy_listen
;
nesibdev
->
ibdev
.
get_port_immutable
=
nes_port_immutable
;
return
nesibdev
;
return
nesibdev
;
}
}
...
...
drivers/infiniband/hw/ocrdma/ocrdma_main.c
浏览文件 @
175e8efe
...
@@ -202,6 +202,23 @@ static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
...
@@ -202,6 +202,23 @@ static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
return
IB_LINK_LAYER_ETHERNET
;
return
IB_LINK_LAYER_ETHERNET
;
}
}
static
int
ocrdma_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
ocrdma_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IBA_ROCE
;
return
0
;
}
static
int
ocrdma_register_device
(
struct
ocrdma_dev
*
dev
)
static
int
ocrdma_register_device
(
struct
ocrdma_dev
*
dev
)
{
{
strlcpy
(
dev
->
ibdev
.
name
,
"ocrdma%d"
,
IB_DEVICE_NAME_MAX
);
strlcpy
(
dev
->
ibdev
.
name
,
"ocrdma%d"
,
IB_DEVICE_NAME_MAX
);
...
@@ -286,6 +303,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
...
@@ -286,6 +303,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev
->
ibdev
.
dma_device
=
&
dev
->
nic_info
.
pdev
->
dev
;
dev
->
ibdev
.
dma_device
=
&
dev
->
nic_info
.
pdev
->
dev
;
dev
->
ibdev
.
process_mad
=
ocrdma_process_mad
;
dev
->
ibdev
.
process_mad
=
ocrdma_process_mad
;
dev
->
ibdev
.
get_port_immutable
=
ocrdma_port_immutable
;
if
(
ocrdma_get_asic_type
(
dev
)
==
OCRDMA_ASIC_GEN_SKH_R
)
{
if
(
ocrdma_get_asic_type
(
dev
)
==
OCRDMA_ASIC_GEN_SKH_R
)
{
dev
->
ibdev
.
uverbs_cmd_mask
|=
dev
->
ibdev
.
uverbs_cmd_mask
|=
...
...
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
浏览文件 @
175e8efe
...
@@ -41,6 +41,9 @@ int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
...
@@ -41,6 +41,9 @@ int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
int
ocrdma_modify_port
(
struct
ib_device
*
,
u8
port
,
int
mask
,
int
ocrdma_modify_port
(
struct
ib_device
*
,
u8
port
,
int
mask
,
struct
ib_port_modify
*
props
);
struct
ib_port_modify
*
props
);
enum
rdma_protocol_type
ocrdma_query_protocol
(
struct
ib_device
*
device
,
u8
port_num
);
void
ocrdma_get_guid
(
struct
ocrdma_dev
*
,
u8
*
guid
);
void
ocrdma_get_guid
(
struct
ocrdma_dev
*
,
u8
*
guid
);
int
ocrdma_query_gid
(
struct
ib_device
*
,
u8
port
,
int
ocrdma_query_gid
(
struct
ib_device
*
,
u8
port
,
int
index
,
union
ib_gid
*
gid
);
int
index
,
union
ib_gid
*
gid
);
...
...
drivers/infiniband/hw/qib/qib_verbs.c
浏览文件 @
175e8efe
...
@@ -2040,6 +2040,23 @@ static void init_ibport(struct qib_pportdata *ppd)
...
@@ -2040,6 +2040,23 @@ static void init_ibport(struct qib_pportdata *ppd)
RCU_INIT_POINTER
(
ibp
->
qp1
,
NULL
);
RCU_INIT_POINTER
(
ibp
->
qp1
,
NULL
);
}
}
static
int
qib_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
qib_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
immutable
->
core_cap_flags
=
RDMA_CORE_PORT_IBA_IB
;
return
0
;
}
/**
/**
* qib_register_ib_device - register our device with the infiniband core
* qib_register_ib_device - register our device with the infiniband core
* @dd: the device data structure
* @dd: the device data structure
...
@@ -2227,6 +2244,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
...
@@ -2227,6 +2244,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
ibdev
->
process_mad
=
qib_process_mad
;
ibdev
->
process_mad
=
qib_process_mad
;
ibdev
->
mmap
=
qib_mmap
;
ibdev
->
mmap
=
qib_mmap
;
ibdev
->
dma_ops
=
&
qib_dma_mapping_ops
;
ibdev
->
dma_ops
=
&
qib_dma_mapping_ops
;
ibdev
->
get_port_immutable
=
qib_port_immutable
;
snprintf
(
ibdev
->
node_desc
,
sizeof
(
ibdev
->
node_desc
),
snprintf
(
ibdev
->
node_desc
,
sizeof
(
ibdev
->
node_desc
),
"Intel Infiniband HCA %s"
,
init_utsname
()
->
nodename
);
"Intel Infiniband HCA %s"
,
init_utsname
()
->
nodename
);
...
...
drivers/infiniband/hw/usnic/usnic_ib_main.c
浏览文件 @
175e8efe
...
@@ -300,6 +300,22 @@ static struct notifier_block usnic_ib_inetaddr_notifier = {
...
@@ -300,6 +300,22 @@ static struct notifier_block usnic_ib_inetaddr_notifier = {
};
};
/* End of inet section*/
/* End of inet section*/
static
int
usnic_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
)
{
struct
ib_port_attr
attr
;
int
err
;
err
=
usnic_ib_query_port
(
ibdev
,
port_num
,
&
attr
);
if
(
err
)
return
err
;
immutable
->
pkey_tbl_len
=
attr
.
pkey_tbl_len
;
immutable
->
gid_tbl_len
=
attr
.
gid_tbl_len
;
return
0
;
}
/* Start of PF discovery section */
/* Start of PF discovery section */
static
void
*
usnic_ib_device_add
(
struct
pci_dev
*
dev
)
static
void
*
usnic_ib_device_add
(
struct
pci_dev
*
dev
)
{
{
...
@@ -383,6 +399,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
...
@@ -383,6 +399,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev
->
ib_dev
.
poll_cq
=
usnic_ib_poll_cq
;
us_ibdev
->
ib_dev
.
poll_cq
=
usnic_ib_poll_cq
;
us_ibdev
->
ib_dev
.
req_notify_cq
=
usnic_ib_req_notify_cq
;
us_ibdev
->
ib_dev
.
req_notify_cq
=
usnic_ib_req_notify_cq
;
us_ibdev
->
ib_dev
.
get_dma_mr
=
usnic_ib_get_dma_mr
;
us_ibdev
->
ib_dev
.
get_dma_mr
=
usnic_ib_get_dma_mr
;
us_ibdev
->
ib_dev
.
get_port_immutable
=
usnic_port_immutable
;
if
(
ib_register_device
(
&
us_ibdev
->
ib_dev
,
NULL
))
if
(
ib_register_device
(
&
us_ibdev
->
ib_dev
,
NULL
))
...
...
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
浏览文件 @
175e8efe
...
@@ -27,6 +27,8 @@ int usnic_ib_query_device(struct ib_device *ibdev,
...
@@ -27,6 +27,8 @@ int usnic_ib_query_device(struct ib_device *ibdev,
struct
ib_device_attr
*
props
);
struct
ib_device_attr
*
props
);
int
usnic_ib_query_port
(
struct
ib_device
*
ibdev
,
u8
port
,
int
usnic_ib_query_port
(
struct
ib_device
*
ibdev
,
u8
port
,
struct
ib_port_attr
*
props
);
struct
ib_port_attr
*
props
);
enum
rdma_protocol_type
usnic_ib_query_protocol
(
struct
ib_device
*
device
,
u8
port_num
);
int
usnic_ib_query_qp
(
struct
ib_qp
*
qp
,
struct
ib_qp_attr
*
qp_attr
,
int
usnic_ib_query_qp
(
struct
ib_qp
*
qp
,
struct
ib_qp_attr
*
qp_attr
,
int
qp_attr_mask
,
int
qp_attr_mask
,
struct
ib_qp_init_attr
*
qp_init_attr
);
struct
ib_qp_init_attr
*
qp_init_attr
);
...
...
drivers/infiniband/ulp/ipoib/ipoib_main.c
浏览文件 @
175e8efe
...
@@ -1685,9 +1685,7 @@ static void ipoib_add_one(struct ib_device *device)
...
@@ -1685,9 +1685,7 @@ static void ipoib_add_one(struct ib_device *device)
struct
net_device
*
dev
;
struct
net_device
*
dev
;
struct
ipoib_dev_priv
*
priv
;
struct
ipoib_dev_priv
*
priv
;
int
s
,
e
,
p
;
int
s
,
e
,
p
;
int
count
=
0
;
if
(
rdma_node_get_transport
(
device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
return
;
dev_list
=
kmalloc
(
sizeof
*
dev_list
,
GFP_KERNEL
);
dev_list
=
kmalloc
(
sizeof
*
dev_list
,
GFP_KERNEL
);
if
(
!
dev_list
)
if
(
!
dev_list
)
...
@@ -1704,15 +1702,21 @@ static void ipoib_add_one(struct ib_device *device)
...
@@ -1704,15 +1702,21 @@ static void ipoib_add_one(struct ib_device *device)
}
}
for
(
p
=
s
;
p
<=
e
;
++
p
)
{
for
(
p
=
s
;
p
<=
e
;
++
p
)
{
if
(
rdma_port_get_link_layer
(
device
,
p
)
!=
IB_LINK_LAYER_INFINIBAND
)
if
(
!
rdma_protocol_ib
(
device
,
p
)
)
continue
;
continue
;
dev
=
ipoib_add_port
(
"ib%d"
,
device
,
p
);
dev
=
ipoib_add_port
(
"ib%d"
,
device
,
p
);
if
(
!
IS_ERR
(
dev
))
{
if
(
!
IS_ERR
(
dev
))
{
priv
=
netdev_priv
(
dev
);
priv
=
netdev_priv
(
dev
);
list_add_tail
(
&
priv
->
list
,
dev_list
);
list_add_tail
(
&
priv
->
list
,
dev_list
);
count
++
;
}
}
}
}
if
(
!
count
)
{
kfree
(
dev_list
);
return
;
}
ib_set_client_data
(
device
,
&
ipoib_client
,
dev_list
);
ib_set_client_data
(
device
,
&
ipoib_client
,
dev_list
);
}
}
...
@@ -1721,9 +1725,6 @@ static void ipoib_remove_one(struct ib_device *device)
...
@@ -1721,9 +1725,6 @@ static void ipoib_remove_one(struct ib_device *device)
struct
ipoib_dev_priv
*
priv
,
*
tmp
;
struct
ipoib_dev_priv
*
priv
,
*
tmp
;
struct
list_head
*
dev_list
;
struct
list_head
*
dev_list
;
if
(
rdma_node_get_transport
(
device
->
node_type
)
!=
RDMA_TRANSPORT_IB
)
return
;
dev_list
=
ib_get_client_data
(
device
,
&
ipoib_client
);
dev_list
=
ib_get_client_data
(
device
,
&
ipoib_client
);
if
(
!
dev_list
)
if
(
!
dev_list
)
return
;
return
;
...
...
drivers/infiniband/ulp/iser/iser_verbs.c
浏览文件 @
175e8efe
...
@@ -51,19 +51,22 @@ static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
...
@@ -51,19 +51,22 @@ static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
static
void
iser_cq_event_callback
(
struct
ib_event
*
cause
,
void
*
context
)
static
void
iser_cq_event_callback
(
struct
ib_event
*
cause
,
void
*
context
)
{
{
iser_err
(
"got cq event %d
\n
"
,
cause
->
event
);
iser_err
(
"cq event %s (%d)
\n
"
,
ib_event_msg
(
cause
->
event
),
cause
->
event
);
}
}
static
void
iser_qp_event_callback
(
struct
ib_event
*
cause
,
void
*
context
)
static
void
iser_qp_event_callback
(
struct
ib_event
*
cause
,
void
*
context
)
{
{
iser_err
(
"got qp event %d
\n
"
,
cause
->
event
);
iser_err
(
"qp event %s (%d)
\n
"
,
ib_event_msg
(
cause
->
event
),
cause
->
event
);
}
}
static
void
iser_event_handler
(
struct
ib_event_handler
*
handler
,
static
void
iser_event_handler
(
struct
ib_event_handler
*
handler
,
struct
ib_event
*
event
)
struct
ib_event
*
event
)
{
{
iser_err
(
"async event %d on device %s port %d
\n
"
,
event
->
event
,
iser_err
(
"async event %s (%d) on device %s port %d
\n
"
,
event
->
device
->
name
,
event
->
element
.
port_num
);
ib_event_msg
(
event
->
event
),
event
->
event
,
event
->
device
->
name
,
event
->
element
.
port_num
);
}
}
/**
/**
...
@@ -873,8 +876,9 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
...
@@ -873,8 +876,9 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
int
ret
=
0
;
int
ret
=
0
;
iser_conn
=
(
struct
iser_conn
*
)
cma_id
->
context
;
iser_conn
=
(
struct
iser_conn
*
)
cma_id
->
context
;
iser_info
(
"event %d status %d conn %p id %p
\n
"
,
iser_info
(
"%s (%d): status %d conn %p id %p
\n
"
,
event
->
event
,
event
->
status
,
cma_id
->
context
,
cma_id
);
rdma_event_msg
(
event
->
event
),
event
->
event
,
event
->
status
,
cma_id
->
context
,
cma_id
);
mutex_lock
(
&
iser_conn
->
state_mutex
);
mutex_lock
(
&
iser_conn
->
state_mutex
);
switch
(
event
->
event
)
{
switch
(
event
->
event
)
{
...
@@ -913,7 +917,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
...
@@ -913,7 +917,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
}
}
break
;
break
;
default:
default:
iser_err
(
"Unexpected RDMA CM event (%d)
\n
"
,
event
->
event
);
iser_err
(
"Unexpected RDMA CM event: %s (%d)
\n
"
,
rdma_event_msg
(
event
->
event
),
event
->
event
);
break
;
break
;
}
}
mutex_unlock
(
&
iser_conn
->
state_mutex
);
mutex_unlock
(
&
iser_conn
->
state_mutex
);
...
@@ -1173,10 +1178,13 @@ static void iser_handle_wc(struct ib_wc *wc)
...
@@ -1173,10 +1178,13 @@ static void iser_handle_wc(struct ib_wc *wc)
}
}
}
else
{
}
else
{
if
(
wc
->
status
!=
IB_WC_WR_FLUSH_ERR
)
if
(
wc
->
status
!=
IB_WC_WR_FLUSH_ERR
)
iser_err
(
"wr id %llx status %d vend_err %x
\n
"
,
iser_err
(
"%s (%d): wr id %llx vend_err %x
\n
"
,
wc
->
wr_id
,
wc
->
status
,
wc
->
vendor_err
);
ib_wc_status_msg
(
wc
->
status
),
wc
->
status
,
wc
->
wr_id
,
wc
->
vendor_err
);
else
else
iser_dbg
(
"flush error: wr id %llx
\n
"
,
wc
->
wr_id
);
iser_dbg
(
"%s (%d): wr id %llx
\n
"
,
ib_wc_status_msg
(
wc
->
status
),
wc
->
status
,
wc
->
wr_id
);
if
(
wc
->
wr_id
==
ISER_BEACON_WRID
)
if
(
wc
->
wr_id
==
ISER_BEACON_WRID
)
/* all flush errors were consumed */
/* all flush errors were consumed */
...
...
drivers/infiniband/ulp/isert/ib_isert.c
浏览文件 @
175e8efe
...
@@ -78,7 +78,9 @@ isert_qp_event_callback(struct ib_event *e, void *context)
...
@@ -78,7 +78,9 @@ isert_qp_event_callback(struct ib_event *e, void *context)
{
{
struct
isert_conn
*
isert_conn
=
context
;
struct
isert_conn
*
isert_conn
=
context
;
isert_err
(
"conn %p event: %d
\n
"
,
isert_conn
,
e
->
event
);
isert_err
(
"%s (%d): conn %p
\n
"
,
ib_event_msg
(
e
->
event
),
e
->
event
,
isert_conn
);
switch
(
e
->
event
)
{
switch
(
e
->
event
)
{
case
IB_EVENT_COMM_EST
:
case
IB_EVENT_COMM_EST
:
rdma_notify
(
isert_conn
->
cm_id
,
IB_EVENT_COMM_EST
);
rdma_notify
(
isert_conn
->
cm_id
,
IB_EVENT_COMM_EST
);
...
@@ -897,7 +899,8 @@ static int
...
@@ -897,7 +899,8 @@ static int
isert_np_cma_handler
(
struct
isert_np
*
isert_np
,
isert_np_cma_handler
(
struct
isert_np
*
isert_np
,
enum
rdma_cm_event_type
event
)
enum
rdma_cm_event_type
event
)
{
{
isert_dbg
(
"isert np %p, handling event %d
\n
"
,
isert_np
,
event
);
isert_dbg
(
"%s (%d): isert np %p
\n
"
,
rdma_event_msg
(
event
),
event
,
isert_np
);
switch
(
event
)
{
switch
(
event
)
{
case
RDMA_CM_EVENT_DEVICE_REMOVAL
:
case
RDMA_CM_EVENT_DEVICE_REMOVAL
:
...
@@ -957,7 +960,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
...
@@ -957,7 +960,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
{
int
ret
=
0
;
int
ret
=
0
;
isert_info
(
"event %d status %d id %p np %p
\n
"
,
event
->
event
,
isert_info
(
"%s (%d): status %d id %p np %p
\n
"
,
rdma_event_msg
(
event
->
event
),
event
->
event
,
event
->
status
,
cma_id
,
cma_id
->
context
);
event
->
status
,
cma_id
,
cma_id
->
context
);
switch
(
event
->
event
)
{
switch
(
event
->
event
)
{
...
@@ -2091,10 +2095,13 @@ isert_handle_wc(struct ib_wc *wc)
...
@@ -2091,10 +2095,13 @@ isert_handle_wc(struct ib_wc *wc)
}
}
}
else
{
}
else
{
if
(
wc
->
status
!=
IB_WC_WR_FLUSH_ERR
)
if
(
wc
->
status
!=
IB_WC_WR_FLUSH_ERR
)
isert_err
(
"wr id %llx status %d vend_err %x
\n
"
,
isert_err
(
"%s (%d): wr id %llx vend_err %x
\n
"
,
wc
->
wr_id
,
wc
->
status
,
wc
->
vendor_err
);
ib_wc_status_msg
(
wc
->
status
),
wc
->
status
,
wc
->
wr_id
,
wc
->
vendor_err
);
else
else
isert_dbg
(
"flush error: wr id %llx
\n
"
,
wc
->
wr_id
);
isert_dbg
(
"%s (%d): wr id %llx
\n
"
,
ib_wc_status_msg
(
wc
->
status
),
wc
->
status
,
wc
->
wr_id
);
if
(
wc
->
wr_id
!=
ISER_FASTREG_LI_WRID
)
if
(
wc
->
wr_id
!=
ISER_FASTREG_LI_WRID
)
isert_cq_comp_err
(
isert_conn
,
wc
);
isert_cq_comp_err
(
isert_conn
,
wc
);
...
...
drivers/infiniband/ulp/srp/ib_srp.c
浏览文件 @
175e8efe
...
@@ -254,7 +254,8 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
...
@@ -254,7 +254,8 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
static
void
srp_qp_event
(
struct
ib_event
*
event
,
void
*
context
)
static
void
srp_qp_event
(
struct
ib_event
*
event
,
void
*
context
)
{
{
pr_debug
(
"QP event %d
\n
"
,
event
->
event
);
pr_debug
(
"QP event %s (%d)
\n
"
,
ib_event_msg
(
event
->
event
),
event
->
event
);
}
}
static
int
srp_init_qp
(
struct
srp_target_port
*
target
,
static
int
srp_init_qp
(
struct
srp_target_port
*
target
,
...
@@ -1922,17 +1923,18 @@ static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
...
@@ -1922,17 +1923,18 @@ static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
if
(
ch
->
connected
&&
!
target
->
qp_in_error
)
{
if
(
ch
->
connected
&&
!
target
->
qp_in_error
)
{
if
(
wr_id
&
LOCAL_INV_WR_ID_MASK
)
{
if
(
wr_id
&
LOCAL_INV_WR_ID_MASK
)
{
shost_printk
(
KERN_ERR
,
target
->
scsi_host
,
PFX
shost_printk
(
KERN_ERR
,
target
->
scsi_host
,
PFX
"LOCAL_INV failed with status %
d
\n
"
,
"LOCAL_INV failed with status %
s (%d)
\n
"
,
wc_status
);
ib_wc_status_msg
(
wc_status
),
wc_status
);
}
else
if
(
wr_id
&
FAST_REG_WR_ID_MASK
)
{
}
else
if
(
wr_id
&
FAST_REG_WR_ID_MASK
)
{
shost_printk
(
KERN_ERR
,
target
->
scsi_host
,
PFX
shost_printk
(
KERN_ERR
,
target
->
scsi_host
,
PFX
"FAST_REG_MR failed status %
d
\n
"
,
"FAST_REG_MR failed status %
s (%d)
\n
"
,
wc_status
);
ib_wc_status_msg
(
wc_status
),
wc_status
);
}
else
{
}
else
{
shost_printk
(
KERN_ERR
,
target
->
scsi_host
,
shost_printk
(
KERN_ERR
,
target
->
scsi_host
,
PFX
"failed %s status %
d
for iu %p
\n
"
,
PFX
"failed %s status %
s (%d)
for iu %p
\n
"
,
send_err
?
"send"
:
"receive"
,
send_err
?
"send"
:
"receive"
,
wc_status
,
(
void
*
)(
uintptr_t
)
wr_id
);
ib_wc_status_msg
(
wc_status
),
wc_status
,
(
void
*
)(
uintptr_t
)
wr_id
);
}
}
queue_work
(
system_long_wq
,
&
target
->
tl_err_work
);
queue_work
(
system_long_wq
,
&
target
->
tl_err_work
);
}
}
...
...
include/rdma/ib_mad.h
浏览文件 @
175e8efe
...
@@ -263,7 +263,7 @@ struct ib_mad_send_buf {
...
@@ -263,7 +263,7 @@ struct ib_mad_send_buf {
* ib_response_mad - Returns if the specified MAD has been generated in
* ib_response_mad - Returns if the specified MAD has been generated in
* response to a sent request or trap.
* response to a sent request or trap.
*/
*/
int
ib_response_mad
(
struct
ib_mad
*
mad
);
int
ib_response_mad
(
const
struct
ib_mad_hdr
*
hdr
);
/**
/**
* ib_get_rmpp_resptime - Returns the RMPP response time.
* ib_get_rmpp_resptime - Returns the RMPP response time.
...
@@ -675,6 +675,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
...
@@ -675,6 +675,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
* @agent: the agent in question
* @agent: the agent in question
* @return: true if agent is performing rmpp, false otherwise.
* @return: true if agent is performing rmpp, false otherwise.
*/
*/
int
ib_mad_kernel_rmpp_agent
(
struct
ib_mad_agent
*
agent
);
int
ib_mad_kernel_rmpp_agent
(
const
struct
ib_mad_agent
*
agent
);
#endif
/* IB_MAD_H */
#endif
/* IB_MAD_H */
include/rdma/ib_verbs.h
浏览文件 @
175e8efe
...
@@ -81,6 +81,13 @@ enum rdma_transport_type {
...
@@ -81,6 +81,13 @@ enum rdma_transport_type {
RDMA_TRANSPORT_USNIC_UDP
RDMA_TRANSPORT_USNIC_UDP
};
};
enum
rdma_protocol_type
{
RDMA_PROTOCOL_IB
,
RDMA_PROTOCOL_IBOE
,
RDMA_PROTOCOL_IWARP
,
RDMA_PROTOCOL_USNIC_UDP
};
__attribute_const__
enum
rdma_transport_type
__attribute_const__
enum
rdma_transport_type
rdma_node_get_transport
(
enum
rdma_node_type
node_type
);
rdma_node_get_transport
(
enum
rdma_node_type
node_type
);
...
@@ -346,6 +353,40 @@ union rdma_protocol_stats {
...
@@ -346,6 +353,40 @@ union rdma_protocol_stats {
struct
iw_protocol_stats
iw
;
struct
iw_protocol_stats
iw
;
};
};
/* Define bits for the various functionality this port needs to be supported by
* the core.
*/
/* Management 0x00000FFF */
#define RDMA_CORE_CAP_IB_MAD 0x00000001
#define RDMA_CORE_CAP_IB_SMI 0x00000002
#define RDMA_CORE_CAP_IB_CM 0x00000004
#define RDMA_CORE_CAP_IW_CM 0x00000008
#define RDMA_CORE_CAP_IB_SA 0x00000010
/* Address format 0x000FF000 */
#define RDMA_CORE_CAP_AF_IB 0x00001000
#define RDMA_CORE_CAP_ETH_AH 0x00002000
/* Protocol 0xFFF00000 */
#define RDMA_CORE_CAP_PROT_IB 0x00100000
#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
| RDMA_CORE_CAP_IB_MAD \
| RDMA_CORE_CAP_IB_SMI \
| RDMA_CORE_CAP_IB_CM \
| RDMA_CORE_CAP_IB_SA \
| RDMA_CORE_CAP_AF_IB)
#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
| RDMA_CORE_CAP_IB_MAD \
| RDMA_CORE_CAP_IB_CM \
| RDMA_CORE_CAP_IB_SA \
| RDMA_CORE_CAP_AF_IB \
| RDMA_CORE_CAP_ETH_AH)
#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
| RDMA_CORE_CAP_IW_CM)
struct
ib_port_attr
{
struct
ib_port_attr
{
enum
ib_port_state
state
;
enum
ib_port_state
state
;
enum
ib_mtu
max_mtu
;
enum
ib_mtu
max_mtu
;
...
@@ -412,6 +453,8 @@ enum ib_event_type {
...
@@ -412,6 +453,8 @@ enum ib_event_type {
IB_EVENT_GID_CHANGE
,
IB_EVENT_GID_CHANGE
,
};
};
__attribute_const__
const
char
*
ib_event_msg
(
enum
ib_event_type
event
);
struct
ib_event
{
struct
ib_event
{
struct
ib_device
*
device
;
struct
ib_device
*
device
;
union
{
union
{
...
@@ -663,6 +706,8 @@ enum ib_wc_status {
...
@@ -663,6 +706,8 @@ enum ib_wc_status {
IB_WC_GENERAL_ERR
IB_WC_GENERAL_ERR
};
};
__attribute_const__
const
char
*
ib_wc_status_msg
(
enum
ib_wc_status
status
);
enum
ib_wc_opcode
{
enum
ib_wc_opcode
{
IB_WC_SEND
,
IB_WC_SEND
,
IB_WC_RDMA_WRITE
,
IB_WC_RDMA_WRITE
,
...
@@ -1474,6 +1519,12 @@ struct ib_dma_mapping_ops {
...
@@ -1474,6 +1519,12 @@ struct ib_dma_mapping_ops {
struct
iw_cm_verbs
;
struct
iw_cm_verbs
;
struct
ib_port_immutable
{
int
pkey_tbl_len
;
int
gid_tbl_len
;
u32
core_cap_flags
;
};
struct
ib_device
{
struct
ib_device
{
struct
device
*
dma_device
;
struct
device
*
dma_device
;
...
@@ -1487,8 +1538,10 @@ struct ib_device {
...
@@ -1487,8 +1538,10 @@ struct ib_device {
struct
list_head
client_data_list
;
struct
list_head
client_data_list
;
struct
ib_cache
cache
;
struct
ib_cache
cache
;
int
*
pkey_tbl_len
;
/**
int
*
gid_tbl_len
;
* port_immutable is indexed by port number
*/
struct
ib_port_immutable
*
port_immutable
;
int
num_comp_vectors
;
int
num_comp_vectors
;
...
@@ -1675,6 +1728,14 @@ struct ib_device {
...
@@ -1675,6 +1728,14 @@ struct ib_device {
u32
local_dma_lkey
;
u32
local_dma_lkey
;
u8
node_type
;
u8
node_type
;
u8
phys_port_cnt
;
u8
phys_port_cnt
;
/**
* The following mandatory functions are used only at device
* registration. Keep functions such as these at the end of this
* structure to avoid cache line misses when accessing struct ib_device
* in fast paths.
*/
int
(
*
get_port_immutable
)(
struct
ib_device
*
,
u8
,
struct
ib_port_immutable
*
);
};
};
struct
ib_client
{
struct
ib_client
{
...
@@ -1743,6 +1804,242 @@ int ib_query_port(struct ib_device *device,
...
@@ -1743,6 +1804,242 @@ int ib_query_port(struct ib_device *device,
enum
rdma_link_layer
rdma_port_get_link_layer
(
struct
ib_device
*
device
,
enum
rdma_link_layer
rdma_port_get_link_layer
(
struct
ib_device
*
device
,
u8
port_num
);
u8
port_num
);
/**
* rdma_start_port - Return the first valid port number for the device
* specified
*
* @device: Device to be checked
*
* Return start port number
*/
static
inline
u8
rdma_start_port
(
const
struct
ib_device
*
device
)
{
return
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
?
0
:
1
;
}
/**
* rdma_end_port - Return the last valid port number for the device
* specified
*
* @device: Device to be checked
*
* Return last port number
*/
static
inline
u8
rdma_end_port
(
const
struct
ib_device
*
device
)
{
return
(
device
->
node_type
==
RDMA_NODE_IB_SWITCH
)
?
0
:
device
->
phys_port_cnt
;
}
static
inline
bool
rdma_protocol_ib
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
device
->
port_immutable
[
port_num
].
core_cap_flags
&
RDMA_CORE_CAP_PROT_IB
;
}
static
inline
bool
rdma_protocol_roce
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
device
->
port_immutable
[
port_num
].
core_cap_flags
&
RDMA_CORE_CAP_PROT_ROCE
;
}
static
inline
bool
rdma_protocol_iwarp
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
device
->
port_immutable
[
port_num
].
core_cap_flags
&
RDMA_CORE_CAP_PROT_IWARP
;
}
static
inline
bool
rdma_ib_or_roce
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
device
->
port_immutable
[
port_num
].
core_cap_flags
&
(
RDMA_CORE_CAP_PROT_IB
|
RDMA_CORE_CAP_PROT_ROCE
);
}
/**
* rdma_cap_ib_mad - Check if the port of a device supports Infiniband
* Management Datagrams.
* @device: Device to check
* @port_num: Port number to check
*
* Management Datagrams (MAD) are a required part of the InfiniBand
* specification and are supported on all InfiniBand devices. A slightly
* extended version are also supported on OPA interfaces.
*
* Return: true if the port supports sending/receiving of MAD packets.
*/
static
inline
bool
rdma_cap_ib_mad
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
device
->
port_immutable
[
port_num
].
core_cap_flags
&
RDMA_CORE_CAP_IB_MAD
;
}
/**
* rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
* Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
* @device: Device to check
* @port_num: Port number to check
*
* Each InfiniBand node is required to provide a Subnet Management Agent
* that the subnet manager can access. Prior to the fabric being fully
* configured by the subnet manager, the SMA is accessed via a well known
* interface called the Subnet Management Interface (SMI). This interface
* uses directed route packets to communicate with the SM to get around the
* chicken and egg problem of the SM needing to know what's on the fabric
* in order to configure the fabric, and needing to configure the fabric in
* order to send packets to the devices on the fabric. These directed
* route packets do not need the fabric fully configured in order to reach
* their destination. The SMI is the only method allowed to send
* directed route packets on an InfiniBand fabric.
*
* Return: true if the port provides an SMI.
*/
static
inline
bool
rdma_cap_ib_smi
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
device
->
port_immutable
[
port_num
].
core_cap_flags
&
RDMA_CORE_CAP_IB_SMI
;
}
/**
* rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
* Communication Manager.
* @device: Device to check
* @port_num: Port number to check
*
* The InfiniBand Communication Manager is one of many pre-defined General
* Service Agents (GSA) that are accessed via the General Service
* Interface (GSI). It's role is to facilitate establishment of connections
* between nodes as well as other management related tasks for established
* connections.
*
* Return: true if the port supports an IB CM (this does not guarantee that
* a CM is actually running however).
*/
static
inline
bool
rdma_cap_ib_cm
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
device
->
port_immutable
[
port_num
].
core_cap_flags
&
RDMA_CORE_CAP_IB_CM
;
}
/**
* rdma_cap_iw_cm - Check if the port of device has the capability IWARP
* Communication Manager.
* @device: Device to check
* @port_num: Port number to check
*
* Similar to above, but specific to iWARP connections which have a different
* managment protocol than InfiniBand.
*
* Return: true if the port supports an iWARP CM (this does not guarantee that
* a CM is actually running however).
*/
static
inline
bool
rdma_cap_iw_cm
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
device
->
port_immutable
[
port_num
].
core_cap_flags
&
RDMA_CORE_CAP_IW_CM
;
}
/**
* rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
* Subnet Administration.
* @device: Device to check
* @port_num: Port number to check
*
* An InfiniBand Subnet Administration (SA) service is a pre-defined General
* Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
* fabrics, devices should resolve routes to other hosts by contacting the
* SA to query the proper route.
*
* Return: true if the port should act as a client to the fabric Subnet
* Administration interface. This does not imply that the SA service is
* running locally.
*/
static
inline
bool
rdma_cap_ib_sa
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
device
->
port_immutable
[
port_num
].
core_cap_flags
&
RDMA_CORE_CAP_IB_SA
;
}
/**
* rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
* Multicast.
* @device: Device to check
* @port_num: Port number to check
*
* InfiniBand multicast registration is more complex than normal IPv4 or
* IPv6 multicast registration. Each Host Channel Adapter must register
* with the Subnet Manager when it wishes to join a multicast group. It
* should do so only once regardless of how many queue pairs it subscribes
* to this group. And it should leave the group only after all queue pairs
* attached to the group have been detached.
*
* Return: true if the port must undertake the additional adminstrative
* overhead of registering/unregistering with the SM and tracking of the
* total number of queue pairs attached to the multicast group.
*/
static
inline
bool
rdma_cap_ib_mcast
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
rdma_cap_ib_sa
(
device
,
port_num
);
}
/**
* rdma_cap_af_ib - Check if the port of device has the capability
* Native Infiniband Address.
* @device: Device to check
* @port_num: Port number to check
*
* InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
* GID. RoCE uses a different mechanism, but still generates a GID via
* a prescribed mechanism and port specific data.
*
* Return: true if the port uses a GID address to identify devices on the
* network.
*/
static
inline
bool
rdma_cap_af_ib
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
device
->
port_immutable
[
port_num
].
core_cap_flags
&
RDMA_CORE_CAP_AF_IB
;
}
/**
* rdma_cap_eth_ah - Check if the port of device has the capability
* Ethernet Address Handle.
* @device: Device to check
* @port_num: Port number to check
*
* RoCE is InfiniBand over Ethernet, and it uses a well defined technique
* to fabricate GIDs over Ethernet/IP specific addresses native to the
* port. Normally, packet headers are generated by the sending host
* adapter, but when sending connectionless datagrams, we must manually
* inject the proper headers for the fabric we are communicating over.
*
* Return: true if we are running as a RoCE port and must force the
* addition of a Global Route Header built from our Ethernet Address
* Handle into our header list for connectionless packets.
*/
static
inline
bool
rdma_cap_eth_ah
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
device
->
port_immutable
[
port_num
].
core_cap_flags
&
RDMA_CORE_CAP_ETH_AH
;
}
/**
* rdma_cap_read_multi_sge - Check if the port of device has the capability
* RDMA Read Multiple Scatter-Gather Entries.
* @device: Device to check
* @port_num: Port number to check
*
* iWARP has a restriction that RDMA READ requests may only have a single
* Scatter/Gather Entry (SGE) in the work request.
*
* NOTE: although the linux kernel currently assumes all devices are either
* single SGE RDMA READ devices or identical SGE maximums for RDMA READs and
* WRITEs, according to Tom Talpey, this is not accurate. There are some
* devices out there that support more than a single SGE on RDMA READ
* requests, but do not support the same number of SGEs as they do on
* RDMA WRITE requests. The linux kernel would need rearchitecting to
* support these imbalanced READ/WRITE SGEs allowed devices. So, for now,
* suffice with either the device supports the same READ/WRITE SGEs, or
* it only gets one READ sge.
*
* Return: true for any device that allows more than one SGE in RDMA READ
* requests.
*/
static
inline
bool
rdma_cap_read_multi_sge
(
struct
ib_device
*
device
,
u8
port_num
)
{
return
!
(
device
->
port_immutable
[
port_num
].
core_cap_flags
&
RDMA_CORE_CAP_PROT_IWARP
);
}
int
ib_query_gid
(
struct
ib_device
*
device
,
int
ib_query_gid
(
struct
ib_device
*
device
,
u8
port_num
,
int
index
,
union
ib_gid
*
gid
);
u8
port_num
,
int
index
,
union
ib_gid
*
gid
);
...
...
include/rdma/rdma_cm.h
浏览文件 @
175e8efe
...
@@ -62,6 +62,8 @@ enum rdma_cm_event_type {
...
@@ -62,6 +62,8 @@ enum rdma_cm_event_type {
RDMA_CM_EVENT_TIMEWAIT_EXIT
RDMA_CM_EVENT_TIMEWAIT_EXIT
};
};
__attribute_const__
const
char
*
rdma_event_msg
(
enum
rdma_cm_event_type
event
);
enum
rdma_port_space
{
enum
rdma_port_space
{
RDMA_PS_SDP
=
0x0001
,
RDMA_PS_SDP
=
0x0001
,
RDMA_PS_IPOIB
=
0x0002
,
RDMA_PS_IPOIB
=
0x0002
,
...
...
net/rds/af_rds.c
浏览文件 @
175e8efe
...
@@ -40,15 +40,6 @@
...
@@ -40,15 +40,6 @@
#include "rds.h"
#include "rds.h"
char
*
rds_str_array
(
char
**
array
,
size_t
elements
,
size_t
index
)
{
if
((
index
<
elements
)
&&
array
[
index
])
return
array
[
index
];
else
return
"unknown"
;
}
EXPORT_SYMBOL
(
rds_str_array
);
/* this is just used for stats gathering :/ */
/* this is just used for stats gathering :/ */
static
DEFINE_SPINLOCK
(
rds_sock_lock
);
static
DEFINE_SPINLOCK
(
rds_sock_lock
);
static
unsigned
long
rds_sock_count
;
static
unsigned
long
rds_sock_count
;
...
...
net/rds/ib.h
浏览文件 @
175e8efe
...
@@ -339,7 +339,6 @@ u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
...
@@ -339,7 +339,6 @@ u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
extern
wait_queue_head_t
rds_ib_ring_empty_wait
;
extern
wait_queue_head_t
rds_ib_ring_empty_wait
;
/* ib_send.c */
/* ib_send.c */
char
*
rds_ib_wc_status_str
(
enum
ib_wc_status
status
);
void
rds_ib_xmit_complete
(
struct
rds_connection
*
conn
);
void
rds_ib_xmit_complete
(
struct
rds_connection
*
conn
);
int
rds_ib_xmit
(
struct
rds_connection
*
conn
,
struct
rds_message
*
rm
,
int
rds_ib_xmit
(
struct
rds_connection
*
conn
,
struct
rds_message
*
rm
,
unsigned
int
hdr_off
,
unsigned
int
sg
,
unsigned
int
off
);
unsigned
int
hdr_off
,
unsigned
int
sg
,
unsigned
int
off
);
...
...
net/rds/ib_cm.c
浏览文件 @
175e8efe
...
@@ -39,36 +39,6 @@
...
@@ -39,36 +39,6 @@
#include "rds.h"
#include "rds.h"
#include "ib.h"
#include "ib.h"
static
char
*
rds_ib_event_type_strings
[]
=
{
#define RDS_IB_EVENT_STRING(foo) \
[IB_EVENT_##foo] = __stringify(IB_EVENT_##foo)
RDS_IB_EVENT_STRING
(
CQ_ERR
),
RDS_IB_EVENT_STRING
(
QP_FATAL
),
RDS_IB_EVENT_STRING
(
QP_REQ_ERR
),
RDS_IB_EVENT_STRING
(
QP_ACCESS_ERR
),
RDS_IB_EVENT_STRING
(
COMM_EST
),
RDS_IB_EVENT_STRING
(
SQ_DRAINED
),
RDS_IB_EVENT_STRING
(
PATH_MIG
),
RDS_IB_EVENT_STRING
(
PATH_MIG_ERR
),
RDS_IB_EVENT_STRING
(
DEVICE_FATAL
),
RDS_IB_EVENT_STRING
(
PORT_ACTIVE
),
RDS_IB_EVENT_STRING
(
PORT_ERR
),
RDS_IB_EVENT_STRING
(
LID_CHANGE
),
RDS_IB_EVENT_STRING
(
PKEY_CHANGE
),
RDS_IB_EVENT_STRING
(
SM_CHANGE
),
RDS_IB_EVENT_STRING
(
SRQ_ERR
),
RDS_IB_EVENT_STRING
(
SRQ_LIMIT_REACHED
),
RDS_IB_EVENT_STRING
(
QP_LAST_WQE_REACHED
),
RDS_IB_EVENT_STRING
(
CLIENT_REREGISTER
),
#undef RDS_IB_EVENT_STRING
};
static
char
*
rds_ib_event_str
(
enum
ib_event_type
type
)
{
return
rds_str_array
(
rds_ib_event_type_strings
,
ARRAY_SIZE
(
rds_ib_event_type_strings
),
type
);
};
/*
/*
* Set the selected protocol version
* Set the selected protocol version
*/
*/
...
@@ -243,7 +213,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
...
@@ -243,7 +213,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
static
void
rds_ib_cq_event_handler
(
struct
ib_event
*
event
,
void
*
data
)
static
void
rds_ib_cq_event_handler
(
struct
ib_event
*
event
,
void
*
data
)
{
{
rdsdebug
(
"event %u (%s) data %p
\n
"
,
rdsdebug
(
"event %u (%s) data %p
\n
"
,
event
->
event
,
rds_ib_event_str
(
event
->
event
),
data
);
event
->
event
,
ib_event_msg
(
event
->
event
),
data
);
}
}
static
void
rds_ib_qp_event_handler
(
struct
ib_event
*
event
,
void
*
data
)
static
void
rds_ib_qp_event_handler
(
struct
ib_event
*
event
,
void
*
data
)
...
@@ -252,7 +222,7 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
...
@@ -252,7 +222,7 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
struct
rds_ib_connection
*
ic
=
conn
->
c_transport_data
;
struct
rds_ib_connection
*
ic
=
conn
->
c_transport_data
;
rdsdebug
(
"conn %p ic %p event %u (%s)
\n
"
,
conn
,
ic
,
event
->
event
,
rdsdebug
(
"conn %p ic %p event %u (%s)
\n
"
,
conn
,
ic
,
event
->
event
,
rds_ib_event_str
(
event
->
event
));
ib_event_msg
(
event
->
event
));
switch
(
event
->
event
)
{
switch
(
event
->
event
)
{
case
IB_EVENT_COMM_EST
:
case
IB_EVENT_COMM_EST
:
...
@@ -261,7 +231,7 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
...
@@ -261,7 +231,7 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
default:
default:
rdsdebug
(
"Fatal QP Event %u (%s) "
rdsdebug
(
"Fatal QP Event %u (%s) "
"- connection %pI4->%pI4, reconnecting
\n
"
,
"- connection %pI4->%pI4, reconnecting
\n
"
,
event
->
event
,
rds_ib_event_str
(
event
->
event
),
event
->
event
,
ib_event_msg
(
event
->
event
),
&
conn
->
c_laddr
,
&
conn
->
c_faddr
);
&
conn
->
c_laddr
,
&
conn
->
c_faddr
);
rds_conn_drop
(
conn
);
rds_conn_drop
(
conn
);
break
;
break
;
...
...
net/rds/ib_recv.c
浏览文件 @
175e8efe
...
@@ -956,7 +956,7 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic,
...
@@ -956,7 +956,7 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic,
while
(
ib_poll_cq
(
ic
->
i_recv_cq
,
1
,
&
wc
)
>
0
)
{
while
(
ib_poll_cq
(
ic
->
i_recv_cq
,
1
,
&
wc
)
>
0
)
{
rdsdebug
(
"wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u
\n
"
,
rdsdebug
(
"wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u
\n
"
,
(
unsigned
long
long
)
wc
.
wr_id
,
wc
.
status
,
(
unsigned
long
long
)
wc
.
wr_id
,
wc
.
status
,
rds_ib_wc_status_str
(
wc
.
status
),
wc
.
byte_len
,
ib_wc_status_msg
(
wc
.
status
),
wc
.
byte_len
,
be32_to_cpu
(
wc
.
ex
.
imm_data
));
be32_to_cpu
(
wc
.
ex
.
imm_data
));
rds_ib_stats_inc
(
s_ib_rx_cq_event
);
rds_ib_stats_inc
(
s_ib_rx_cq_event
);
...
@@ -978,7 +978,7 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic,
...
@@ -978,7 +978,7 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic,
"status %u (%s), disconnecting and "
"status %u (%s), disconnecting and "
"reconnecting
\n
"
,
&
conn
->
c_faddr
,
"reconnecting
\n
"
,
&
conn
->
c_faddr
,
wc
.
status
,
wc
.
status
,
rds_ib_wc_status_str
(
wc
.
status
));
ib_wc_status_msg
(
wc
.
status
));
}
}
/*
/*
...
...
net/rds/ib_send.c
浏览文件 @
175e8efe
...
@@ -39,40 +39,6 @@
...
@@ -39,40 +39,6 @@
#include "rds.h"
#include "rds.h"
#include "ib.h"
#include "ib.h"
static
char
*
rds_ib_wc_status_strings
[]
=
{
#define RDS_IB_WC_STATUS_STR(foo) \
[IB_WC_##foo] = __stringify(IB_WC_##foo)
RDS_IB_WC_STATUS_STR
(
SUCCESS
),
RDS_IB_WC_STATUS_STR
(
LOC_LEN_ERR
),
RDS_IB_WC_STATUS_STR
(
LOC_QP_OP_ERR
),
RDS_IB_WC_STATUS_STR
(
LOC_EEC_OP_ERR
),
RDS_IB_WC_STATUS_STR
(
LOC_PROT_ERR
),
RDS_IB_WC_STATUS_STR
(
WR_FLUSH_ERR
),
RDS_IB_WC_STATUS_STR
(
MW_BIND_ERR
),
RDS_IB_WC_STATUS_STR
(
BAD_RESP_ERR
),
RDS_IB_WC_STATUS_STR
(
LOC_ACCESS_ERR
),
RDS_IB_WC_STATUS_STR
(
REM_INV_REQ_ERR
),
RDS_IB_WC_STATUS_STR
(
REM_ACCESS_ERR
),
RDS_IB_WC_STATUS_STR
(
REM_OP_ERR
),
RDS_IB_WC_STATUS_STR
(
RETRY_EXC_ERR
),
RDS_IB_WC_STATUS_STR
(
RNR_RETRY_EXC_ERR
),
RDS_IB_WC_STATUS_STR
(
LOC_RDD_VIOL_ERR
),
RDS_IB_WC_STATUS_STR
(
REM_INV_RD_REQ_ERR
),
RDS_IB_WC_STATUS_STR
(
REM_ABORT_ERR
),
RDS_IB_WC_STATUS_STR
(
INV_EECN_ERR
),
RDS_IB_WC_STATUS_STR
(
INV_EEC_STATE_ERR
),
RDS_IB_WC_STATUS_STR
(
FATAL_ERR
),
RDS_IB_WC_STATUS_STR
(
RESP_TIMEOUT_ERR
),
RDS_IB_WC_STATUS_STR
(
GENERAL_ERR
),
#undef RDS_IB_WC_STATUS_STR
};
char
*
rds_ib_wc_status_str
(
enum
ib_wc_status
status
)
{
return
rds_str_array
(
rds_ib_wc_status_strings
,
ARRAY_SIZE
(
rds_ib_wc_status_strings
),
status
);
}
/*
/*
* Convert IB-specific error message to RDS error message and call core
* Convert IB-specific error message to RDS error message and call core
* completion handler.
* completion handler.
...
@@ -293,7 +259,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
...
@@ -293,7 +259,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
while
(
ib_poll_cq
(
cq
,
1
,
&
wc
)
>
0
)
{
while
(
ib_poll_cq
(
cq
,
1
,
&
wc
)
>
0
)
{
rdsdebug
(
"wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u
\n
"
,
rdsdebug
(
"wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u
\n
"
,
(
unsigned
long
long
)
wc
.
wr_id
,
wc
.
status
,
(
unsigned
long
long
)
wc
.
wr_id
,
wc
.
status
,
rds_ib_wc_status_str
(
wc
.
status
),
wc
.
byte_len
,
ib_wc_status_msg
(
wc
.
status
),
wc
.
byte_len
,
be32_to_cpu
(
wc
.
ex
.
imm_data
));
be32_to_cpu
(
wc
.
ex
.
imm_data
));
rds_ib_stats_inc
(
s_ib_tx_cq_event
);
rds_ib_stats_inc
(
s_ib_tx_cq_event
);
...
@@ -344,7 +310,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
...
@@ -344,7 +310,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
rds_ib_conn_error
(
conn
,
"send completion on %pI4 had status "
rds_ib_conn_error
(
conn
,
"send completion on %pI4 had status "
"%u (%s), disconnecting and reconnecting
\n
"
,
"%u (%s), disconnecting and reconnecting
\n
"
,
&
conn
->
c_faddr
,
wc
.
status
,
&
conn
->
c_faddr
,
wc
.
status
,
rds_ib_wc_status_str
(
wc
.
status
));
ib_wc_status_msg
(
wc
.
status
));
}
}
}
}
}
}
...
...
net/rds/rdma_transport.c
浏览文件 @
175e8efe
...
@@ -37,34 +37,6 @@
...
@@ -37,34 +37,6 @@
static
struct
rdma_cm_id
*
rds_rdma_listen_id
;
static
struct
rdma_cm_id
*
rds_rdma_listen_id
;
static
char
*
rds_cm_event_strings
[]
=
{
#define RDS_CM_EVENT_STRING(foo) \
[RDMA_CM_EVENT_##foo] = __stringify(RDMA_CM_EVENT_##foo)
RDS_CM_EVENT_STRING
(
ADDR_RESOLVED
),
RDS_CM_EVENT_STRING
(
ADDR_ERROR
),
RDS_CM_EVENT_STRING
(
ROUTE_RESOLVED
),
RDS_CM_EVENT_STRING
(
ROUTE_ERROR
),
RDS_CM_EVENT_STRING
(
CONNECT_REQUEST
),
RDS_CM_EVENT_STRING
(
CONNECT_RESPONSE
),
RDS_CM_EVENT_STRING
(
CONNECT_ERROR
),
RDS_CM_EVENT_STRING
(
UNREACHABLE
),
RDS_CM_EVENT_STRING
(
REJECTED
),
RDS_CM_EVENT_STRING
(
ESTABLISHED
),
RDS_CM_EVENT_STRING
(
DISCONNECTED
),
RDS_CM_EVENT_STRING
(
DEVICE_REMOVAL
),
RDS_CM_EVENT_STRING
(
MULTICAST_JOIN
),
RDS_CM_EVENT_STRING
(
MULTICAST_ERROR
),
RDS_CM_EVENT_STRING
(
ADDR_CHANGE
),
RDS_CM_EVENT_STRING
(
TIMEWAIT_EXIT
),
#undef RDS_CM_EVENT_STRING
};
static
char
*
rds_cm_event_str
(
enum
rdma_cm_event_type
type
)
{
return
rds_str_array
(
rds_cm_event_strings
,
ARRAY_SIZE
(
rds_cm_event_strings
),
type
);
};
int
rds_rdma_cm_event_handler
(
struct
rdma_cm_id
*
cm_id
,
int
rds_rdma_cm_event_handler
(
struct
rdma_cm_id
*
cm_id
,
struct
rdma_cm_event
*
event
)
struct
rdma_cm_event
*
event
)
{
{
...
@@ -74,7 +46,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
...
@@ -74,7 +46,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
int
ret
=
0
;
int
ret
=
0
;
rdsdebug
(
"conn %p id %p handling event %u (%s)
\n
"
,
conn
,
cm_id
,
rdsdebug
(
"conn %p id %p handling event %u (%s)
\n
"
,
conn
,
cm_id
,
event
->
event
,
rd
s_cm_event_str
(
event
->
event
));
event
->
event
,
rd
ma_event_msg
(
event
->
event
));
if
(
cm_id
->
device
->
node_type
==
RDMA_NODE_RNIC
)
if
(
cm_id
->
device
->
node_type
==
RDMA_NODE_RNIC
)
trans
=
&
rds_iw_transport
;
trans
=
&
rds_iw_transport
;
...
@@ -139,7 +111,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
...
@@ -139,7 +111,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
default:
default:
/* things like device disconnect? */
/* things like device disconnect? */
printk
(
KERN_ERR
"RDS: unknown event %u (%s)!
\n
"
,
printk
(
KERN_ERR
"RDS: unknown event %u (%s)!
\n
"
,
event
->
event
,
rd
s_cm_event_str
(
event
->
event
));
event
->
event
,
rd
ma_event_msg
(
event
->
event
));
break
;
break
;
}
}
...
@@ -148,7 +120,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
...
@@ -148,7 +120,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
mutex_unlock
(
&
conn
->
c_cm_lock
);
mutex_unlock
(
&
conn
->
c_cm_lock
);
rdsdebug
(
"id %p event %u (%s) handling ret %d
\n
"
,
cm_id
,
event
->
event
,
rdsdebug
(
"id %p event %u (%s) handling ret %d
\n
"
,
cm_id
,
event
->
event
,
rd
s_cm_event_str
(
event
->
event
),
ret
);
rd
ma_event_msg
(
event
->
event
),
ret
);
return
ret
;
return
ret
;
}
}
...
...
net/rds/rds.h
浏览文件 @
175e8efe
...
@@ -575,7 +575,6 @@ struct rds_statistics {
...
@@ -575,7 +575,6 @@ struct rds_statistics {
};
};
/* af_rds.c */
/* af_rds.c */
char
*
rds_str_array
(
char
**
array
,
size_t
elements
,
size_t
index
);
void
rds_sock_addref
(
struct
rds_sock
*
rs
);
void
rds_sock_addref
(
struct
rds_sock
*
rs
);
void
rds_sock_put
(
struct
rds_sock
*
rs
);
void
rds_sock_put
(
struct
rds_sock
*
rs
);
void
rds_wake_sk_sleep
(
struct
rds_sock
*
rs
);
void
rds_wake_sk_sleep
(
struct
rds_sock
*
rs
);
...
...
net/sunrpc/xprtrdma/frwr_ops.c
浏览文件 @
175e8efe
...
@@ -128,8 +128,8 @@ frwr_sendcompletion(struct ib_wc *wc)
...
@@ -128,8 +128,8 @@ frwr_sendcompletion(struct ib_wc *wc)
/* WARNING: Only wr_id and status are reliable at this point */
/* WARNING: Only wr_id and status are reliable at this point */
r
=
(
struct
rpcrdma_mw
*
)(
unsigned
long
)
wc
->
wr_id
;
r
=
(
struct
rpcrdma_mw
*
)(
unsigned
long
)
wc
->
wr_id
;
dprintk
(
"RPC: %s: frmr %p (stale), status %
d
\n
"
,
dprintk
(
"RPC: %s: frmr %p (stale), status %
s (%d)
\n
"
,
__func__
,
r
,
wc
->
status
);
__func__
,
r
,
ib_wc_status_msg
(
wc
->
status
),
wc
->
status
);
r
->
r
.
frmr
.
fr_state
=
FRMR_IS_STALE
;
r
->
r
.
frmr
.
fr_state
=
FRMR_IS_STALE
;
}
}
...
...
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
浏览文件 @
175e8efe
...
@@ -117,8 +117,8 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
...
@@ -117,8 +117,8 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
static
int
rdma_read_max_sge
(
struct
svcxprt_rdma
*
xprt
,
int
sge_count
)
static
int
rdma_read_max_sge
(
struct
svcxprt_rdma
*
xprt
,
int
sge_count
)
{
{
if
(
rdma_node_get_transport
(
xprt
->
sc_cm_id
->
device
->
node_type
)
==
if
(
!
rdma_cap_read_multi_sge
(
xprt
->
sc_cm_id
->
device
,
RDMA_TRANSPORT_IWARP
)
xprt
->
sc_cm_id
->
port_num
)
)
return
1
;
return
1
;
else
else
return
min_t
(
int
,
sge_count
,
xprt
->
sc_max_sge
);
return
min_t
(
int
,
sge_count
,
xprt
->
sc_max_sge
);
...
...
net/sunrpc/xprtrdma/svc_rdma_transport.c
浏览文件 @
175e8efe
...
@@ -175,8 +175,8 @@ void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
...
@@ -175,8 +175,8 @@ void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
static
void
cq_event_handler
(
struct
ib_event
*
event
,
void
*
context
)
static
void
cq_event_handler
(
struct
ib_event
*
event
,
void
*
context
)
{
{
struct
svc_xprt
*
xprt
=
context
;
struct
svc_xprt
*
xprt
=
context
;
dprintk
(
"svcrdma: received CQ event
id=%d
, context=%p
\n
"
,
dprintk
(
"svcrdma: received CQ event
%s (%d)
, context=%p
\n
"
,
event
->
event
,
context
);
ib_event_msg
(
event
->
event
),
event
->
event
,
context
);
set_bit
(
XPT_CLOSE
,
&
xprt
->
xpt_flags
);
set_bit
(
XPT_CLOSE
,
&
xprt
->
xpt_flags
);
}
}
...
@@ -191,8 +191,9 @@ static void qp_event_handler(struct ib_event *event, void *context)
...
@@ -191,8 +191,9 @@ static void qp_event_handler(struct ib_event *event, void *context)
case
IB_EVENT_COMM_EST
:
case
IB_EVENT_COMM_EST
:
case
IB_EVENT_SQ_DRAINED
:
case
IB_EVENT_SQ_DRAINED
:
case
IB_EVENT_QP_LAST_WQE_REACHED
:
case
IB_EVENT_QP_LAST_WQE_REACHED
:
dprintk
(
"svcrdma: QP event %d received for QP=%p
\n
"
,
dprintk
(
"svcrdma: QP event %s (%d) received for QP=%p
\n
"
,
event
->
event
,
event
->
element
.
qp
);
ib_event_msg
(
event
->
event
),
event
->
event
,
event
->
element
.
qp
);
break
;
break
;
/* These are considered fatal events */
/* These are considered fatal events */
case
IB_EVENT_PATH_MIG_ERR
:
case
IB_EVENT_PATH_MIG_ERR
:
...
@@ -201,9 +202,10 @@ static void qp_event_handler(struct ib_event *event, void *context)
...
@@ -201,9 +202,10 @@ static void qp_event_handler(struct ib_event *event, void *context)
case
IB_EVENT_QP_ACCESS_ERR
:
case
IB_EVENT_QP_ACCESS_ERR
:
case
IB_EVENT_DEVICE_FATAL
:
case
IB_EVENT_DEVICE_FATAL
:
default:
default:
dprintk
(
"svcrdma: QP ERROR event %
d
received for QP=%p, "
dprintk
(
"svcrdma: QP ERROR event %
s (%d)
received for QP=%p, "
"closing transport
\n
"
,
"closing transport
\n
"
,
event
->
event
,
event
->
element
.
qp
);
ib_event_msg
(
event
->
event
),
event
->
event
,
event
->
element
.
qp
);
set_bit
(
XPT_CLOSE
,
&
xprt
->
xpt_flags
);
set_bit
(
XPT_CLOSE
,
&
xprt
->
xpt_flags
);
break
;
break
;
}
}
...
@@ -402,7 +404,8 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
...
@@ -402,7 +404,8 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
for
(
i
=
0
;
i
<
ret
;
i
++
)
{
for
(
i
=
0
;
i
<
ret
;
i
++
)
{
wc
=
&
wc_a
[
i
];
wc
=
&
wc_a
[
i
];
if
(
wc
->
status
!=
IB_WC_SUCCESS
)
{
if
(
wc
->
status
!=
IB_WC_SUCCESS
)
{
dprintk
(
"svcrdma: sq wc err status %d
\n
"
,
dprintk
(
"svcrdma: sq wc err status %s (%d)
\n
"
,
ib_wc_status_msg
(
wc
->
status
),
wc
->
status
);
wc
->
status
);
/* Close the transport */
/* Close the transport */
...
@@ -616,7 +619,8 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id,
...
@@ -616,7 +619,8 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id,
switch
(
event
->
event
)
{
switch
(
event
->
event
)
{
case
RDMA_CM_EVENT_CONNECT_REQUEST
:
case
RDMA_CM_EVENT_CONNECT_REQUEST
:
dprintk
(
"svcrdma: Connect request on cma_id=%p, xprt = %p, "
dprintk
(
"svcrdma: Connect request on cma_id=%p, xprt = %p, "
"event=%d
\n
"
,
cma_id
,
cma_id
->
context
,
event
->
event
);
"event = %s (%d)
\n
"
,
cma_id
,
cma_id
->
context
,
rdma_event_msg
(
event
->
event
),
event
->
event
);
handle_connect_req
(
cma_id
,
handle_connect_req
(
cma_id
,
event
->
param
.
conn
.
initiator_depth
);
event
->
param
.
conn
.
initiator_depth
);
break
;
break
;
...
@@ -636,7 +640,8 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id,
...
@@ -636,7 +640,8 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id,
default:
default:
dprintk
(
"svcrdma: Unexpected event on listening endpoint %p, "
dprintk
(
"svcrdma: Unexpected event on listening endpoint %p, "
"event=%d
\n
"
,
cma_id
,
event
->
event
);
"event = %s (%d)
\n
"
,
cma_id
,
rdma_event_msg
(
event
->
event
),
event
->
event
);
break
;
break
;
}
}
...
@@ -669,7 +674,8 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id,
...
@@ -669,7 +674,8 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id,
break
;
break
;
case
RDMA_CM_EVENT_DEVICE_REMOVAL
:
case
RDMA_CM_EVENT_DEVICE_REMOVAL
:
dprintk
(
"svcrdma: Device removal cma_id=%p, xprt = %p, "
dprintk
(
"svcrdma: Device removal cma_id=%p, xprt = %p, "
"event=%d
\n
"
,
cma_id
,
xprt
,
event
->
event
);
"event = %s (%d)
\n
"
,
cma_id
,
xprt
,
rdma_event_msg
(
event
->
event
),
event
->
event
);
if
(
xprt
)
{
if
(
xprt
)
{
set_bit
(
XPT_CLOSE
,
&
xprt
->
xpt_flags
);
set_bit
(
XPT_CLOSE
,
&
xprt
->
xpt_flags
);
svc_xprt_enqueue
(
xprt
);
svc_xprt_enqueue
(
xprt
);
...
@@ -677,7 +683,8 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id,
...
@@ -677,7 +683,8 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id,
break
;
break
;
default:
default:
dprintk
(
"svcrdma: Unexpected event on DTO endpoint %p, "
dprintk
(
"svcrdma: Unexpected event on DTO endpoint %p, "
"event=%d
\n
"
,
cma_id
,
event
->
event
);
"event = %s (%d)
\n
"
,
cma_id
,
rdma_event_msg
(
event
->
event
),
event
->
event
);
break
;
break
;
}
}
return
0
;
return
0
;
...
@@ -851,7 +858,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
...
@@ -851,7 +858,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
struct
ib_qp_init_attr
qp_attr
;
struct
ib_qp_init_attr
qp_attr
;
struct
ib_device_attr
devattr
;
struct
ib_device_attr
devattr
;
int
uninitialized_var
(
dma_mr_acc
);
int
uninitialized_var
(
dma_mr_acc
);
int
need_dma_mr
;
int
need_dma_mr
=
0
;
int
ret
;
int
ret
;
int
i
;
int
i
;
...
@@ -985,35 +992,26 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
...
@@ -985,35 +992,26 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
/*
/*
* Determine if a DMA MR is required and if so, what privs are required
* Determine if a DMA MR is required and if so, what privs are required
*/
*/
switch
(
rdma_node_get_transport
(
newxprt
->
sc_cm_id
->
device
->
node_type
))
{
if
(
!
rdma_protocol_iwarp
(
newxprt
->
sc_cm_id
->
device
,
case
RDMA_TRANSPORT_IWARP
:
newxprt
->
sc_cm_id
->
port_num
)
&&
newxprt
->
sc_dev_caps
|=
SVCRDMA_DEVCAP_READ_W_INV
;
!
rdma_ib_or_roce
(
newxprt
->
sc_cm_id
->
device
,
if
(
!
(
newxprt
->
sc_dev_caps
&
SVCRDMA_DEVCAP_FAST_REG
))
{
newxprt
->
sc_cm_id
->
port_num
))
need_dma_mr
=
1
;
dma_mr_acc
=
(
IB_ACCESS_LOCAL_WRITE
|
IB_ACCESS_REMOTE_WRITE
);
}
else
if
(
!
(
devattr
.
device_cap_flags
&
IB_DEVICE_LOCAL_DMA_LKEY
))
{
need_dma_mr
=
1
;
dma_mr_acc
=
IB_ACCESS_LOCAL_WRITE
;
}
else
need_dma_mr
=
0
;
break
;
case
RDMA_TRANSPORT_IB
:
if
(
!
(
newxprt
->
sc_dev_caps
&
SVCRDMA_DEVCAP_FAST_REG
))
{
need_dma_mr
=
1
;
dma_mr_acc
=
IB_ACCESS_LOCAL_WRITE
;
}
else
if
(
!
(
devattr
.
device_cap_flags
&
IB_DEVICE_LOCAL_DMA_LKEY
))
{
need_dma_mr
=
1
;
dma_mr_acc
=
IB_ACCESS_LOCAL_WRITE
;
}
else
need_dma_mr
=
0
;
break
;
default:
goto
errout
;
goto
errout
;
if
(
!
(
newxprt
->
sc_dev_caps
&
SVCRDMA_DEVCAP_FAST_REG
)
||
!
(
devattr
.
device_cap_flags
&
IB_DEVICE_LOCAL_DMA_LKEY
))
{
need_dma_mr
=
1
;
dma_mr_acc
=
IB_ACCESS_LOCAL_WRITE
;
if
(
rdma_protocol_iwarp
(
newxprt
->
sc_cm_id
->
device
,
newxprt
->
sc_cm_id
->
port_num
)
&&
!
(
newxprt
->
sc_dev_caps
&
SVCRDMA_DEVCAP_FAST_REG
))
dma_mr_acc
|=
IB_ACCESS_REMOTE_WRITE
;
}
}
if
(
rdma_protocol_iwarp
(
newxprt
->
sc_cm_id
->
device
,
newxprt
->
sc_cm_id
->
port_num
))
newxprt
->
sc_dev_caps
|=
SVCRDMA_DEVCAP_READ_W_INV
;
/* Create the DMA MR if needed, otherwise, use the DMA LKEY */
/* Create the DMA MR if needed, otherwise, use the DMA LKEY */
if
(
need_dma_mr
)
{
if
(
need_dma_mr
)
{
/* Register all of physical memory */
/* Register all of physical memory */
...
...
net/sunrpc/xprtrdma/verbs.c
浏览文件 @
175e8efe
...
@@ -105,32 +105,6 @@ rpcrdma_run_tasklet(unsigned long data)
...
@@ -105,32 +105,6 @@ rpcrdma_run_tasklet(unsigned long data)
static
DECLARE_TASKLET
(
rpcrdma_tasklet_g
,
rpcrdma_run_tasklet
,
0UL
);
static
DECLARE_TASKLET
(
rpcrdma_tasklet_g
,
rpcrdma_run_tasklet
,
0UL
);
static
const
char
*
const
async_event
[]
=
{
"CQ error"
,
"QP fatal error"
,
"QP request error"
,
"QP access error"
,
"communication established"
,
"send queue drained"
,
"path migration successful"
,
"path mig error"
,
"device fatal error"
,
"port active"
,
"port error"
,
"LID change"
,
"P_key change"
,
"SM change"
,
"SRQ error"
,
"SRQ limit reached"
,
"last WQE reached"
,
"client reregister"
,
"GID change"
,
};
#define ASYNC_MSG(status) \
((status) < ARRAY_SIZE(async_event) ? \
async_event[(status)] : "unknown async error")
static
void
static
void
rpcrdma_schedule_tasklet
(
struct
list_head
*
sched_list
)
rpcrdma_schedule_tasklet
(
struct
list_head
*
sched_list
)
{
{
...
@@ -148,7 +122,7 @@ rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
...
@@ -148,7 +122,7 @@ rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
struct
rpcrdma_ep
*
ep
=
context
;
struct
rpcrdma_ep
*
ep
=
context
;
pr_err
(
"RPC: %s: %s on device %s ep %p
\n
"
,
pr_err
(
"RPC: %s: %s on device %s ep %p
\n
"
,
__func__
,
ASYNC_MSG
(
event
->
event
),
__func__
,
ib_event_msg
(
event
->
event
),
event
->
device
->
name
,
context
);
event
->
device
->
name
,
context
);
if
(
ep
->
rep_connected
==
1
)
{
if
(
ep
->
rep_connected
==
1
)
{
ep
->
rep_connected
=
-
EIO
;
ep
->
rep_connected
=
-
EIO
;
...
@@ -163,7 +137,7 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
...
@@ -163,7 +137,7 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
struct
rpcrdma_ep
*
ep
=
context
;
struct
rpcrdma_ep
*
ep
=
context
;
pr_err
(
"RPC: %s: %s on device %s ep %p
\n
"
,
pr_err
(
"RPC: %s: %s on device %s ep %p
\n
"
,
__func__
,
ASYNC_MSG
(
event
->
event
),
__func__
,
ib_event_msg
(
event
->
event
),
event
->
device
->
name
,
context
);
event
->
device
->
name
,
context
);
if
(
ep
->
rep_connected
==
1
)
{
if
(
ep
->
rep_connected
==
1
)
{
ep
->
rep_connected
=
-
EIO
;
ep
->
rep_connected
=
-
EIO
;
...
@@ -172,35 +146,6 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
...
@@ -172,35 +146,6 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
}
}
}
}
static
const
char
*
const
wc_status
[]
=
{
"success"
,
"local length error"
,
"local QP operation error"
,
"local EE context operation error"
,
"local protection error"
,
"WR flushed"
,
"memory management operation error"
,
"bad response error"
,
"local access error"
,
"remote invalid request error"
,
"remote access error"
,
"remote operation error"
,
"transport retry counter exceeded"
,
"RNR retry counter exceeded"
,
"local RDD violation error"
,
"remove invalid RD request"
,
"operation aborted"
,
"invalid EE context number"
,
"invalid EE context state"
,
"fatal error"
,
"response timeout error"
,
"general error"
,
};
#define COMPLETION_MSG(status) \
((status) < ARRAY_SIZE(wc_status) ? \
wc_status[(status)] : "unexpected completion error")
static
void
static
void
rpcrdma_sendcq_process_wc
(
struct
ib_wc
*
wc
)
rpcrdma_sendcq_process_wc
(
struct
ib_wc
*
wc
)
{
{
...
@@ -209,7 +154,7 @@ rpcrdma_sendcq_process_wc(struct ib_wc *wc)
...
@@ -209,7 +154,7 @@ rpcrdma_sendcq_process_wc(struct ib_wc *wc)
if
(
wc
->
status
!=
IB_WC_SUCCESS
&&
if
(
wc
->
status
!=
IB_WC_SUCCESS
&&
wc
->
status
!=
IB_WC_WR_FLUSH_ERR
)
wc
->
status
!=
IB_WC_WR_FLUSH_ERR
)
pr_err
(
"RPC: %s: SEND: %s
\n
"
,
pr_err
(
"RPC: %s: SEND: %s
\n
"
,
__func__
,
COMPLETION_MSG
(
wc
->
status
));
__func__
,
ib_wc_status_msg
(
wc
->
status
));
}
else
{
}
else
{
struct
rpcrdma_mw
*
r
;
struct
rpcrdma_mw
*
r
;
...
@@ -302,7 +247,7 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
...
@@ -302,7 +247,7 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
out_fail:
out_fail:
if
(
wc
->
status
!=
IB_WC_WR_FLUSH_ERR
)
if
(
wc
->
status
!=
IB_WC_WR_FLUSH_ERR
)
pr_err
(
"RPC: %s: rep %p: %s
\n
"
,
pr_err
(
"RPC: %s: rep %p: %s
\n
"
,
__func__
,
rep
,
COMPLETION_MSG
(
wc
->
status
));
__func__
,
rep
,
ib_wc_status_msg
(
wc
->
status
));
rep
->
rr_len
=
~
0U
;
rep
->
rr_len
=
~
0U
;
goto
out_schedule
;
goto
out_schedule
;
}
}
...
@@ -386,31 +331,6 @@ rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
...
@@ -386,31 +331,6 @@ rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
rpcrdma_sendcq_process_wc
(
&
wc
);
rpcrdma_sendcq_process_wc
(
&
wc
);
}
}
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
static
const
char
*
const
conn
[]
=
{
"address resolved"
,
"address error"
,
"route resolved"
,
"route error"
,
"connect request"
,
"connect response"
,
"connect error"
,
"unreachable"
,
"rejected"
,
"established"
,
"disconnected"
,
"device removal"
,
"multicast join"
,
"multicast error"
,
"address change"
,
"timewait exit"
,
};
#define CONNECTION_MSG(status) \
((status) < ARRAY_SIZE(conn) ? \
conn[(status)] : "unrecognized connection error")
#endif
static
int
static
int
rpcrdma_conn_upcall
(
struct
rdma_cm_id
*
id
,
struct
rdma_cm_event
*
event
)
rpcrdma_conn_upcall
(
struct
rdma_cm_id
*
id
,
struct
rdma_cm_event
*
event
)
{
{
...
@@ -476,7 +396,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
...
@@ -476,7 +396,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
default:
default:
dprintk
(
"RPC: %s: %pIS:%u (ep 0x%p): %s
\n
"
,
dprintk
(
"RPC: %s: %pIS:%u (ep 0x%p): %s
\n
"
,
__func__
,
sap
,
rpc_get_port
(
sap
),
ep
,
__func__
,
sap
,
rpc_get_port
(
sap
),
ep
,
CONNECTION_MSG
(
event
->
event
));
rdma_event_msg
(
event
->
event
));
break
;
break
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录