Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
89fbb69c
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
89fbb69c
编写于
10月 28, 2005
作者:
L
Linus Torvalds
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
上级
7efe5d7c
4cce3390
变更
42
展开全部
隐藏空白更改
内联
并排
Showing
42 changed file
with
2671 addition
and
1686 deletion
+2671
-1686
drivers/infiniband/core/agent.c
drivers/infiniband/core/agent.c
+76
-217
drivers/infiniband/core/agent.h
drivers/infiniband/core/agent.h
+5
-8
drivers/infiniband/core/agent_priv.h
drivers/infiniband/core/agent_priv.h
+0
-62
drivers/infiniband/core/cm.c
drivers/infiniband/core/cm.c
+98
-119
drivers/infiniband/core/cm_msgs.h
drivers/infiniband/core/cm_msgs.h
+1
-0
drivers/infiniband/core/device.c
drivers/infiniband/core/device.c
+12
-0
drivers/infiniband/core/mad.c
drivers/infiniband/core/mad.c
+152
-183
drivers/infiniband/core/mad_priv.h
drivers/infiniband/core/mad_priv.h
+3
-5
drivers/infiniband/core/mad_rmpp.c
drivers/infiniband/core/mad_rmpp.c
+52
-62
drivers/infiniband/core/mad_rmpp.h
drivers/infiniband/core/mad_rmpp.h
+2
-0
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/sa_query.c
+139
-131
drivers/infiniband/core/smi.h
drivers/infiniband/core/smi.h
+2
-0
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/sysfs.c
+16
-0
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucm.c
+199
-68
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/user_mad.c
+200
-199
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs.h
+49
-13
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_cmd.c
+677
-181
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_main.c
+321
-182
drivers/infiniband/core/verbs.c
drivers/infiniband/core/verbs.c
+12
-6
drivers/infiniband/hw/mthca/Makefile
drivers/infiniband/hw/mthca/Makefile
+2
-1
drivers/infiniband/hw/mthca/mthca_catas.c
drivers/infiniband/hw/mthca/mthca_catas.c
+153
-0
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_cmd.c
+9
-2
drivers/infiniband/hw/mthca/mthca_dev.h
drivers/infiniband/hw/mthca/mthca_dev.h
+22
-0
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/mthca/mthca_eq.c
+18
-3
drivers/infiniband/hw/mthca/mthca_mad.c
drivers/infiniband/hw/mthca/mthca_mad.c
+9
-63
drivers/infiniband/hw/mthca/mthca_main.c
drivers/infiniband/hw/mthca/mthca_main.c
+11
-0
drivers/infiniband/hw/mthca/mthca_mcg.c
drivers/infiniband/hw/mthca/mthca_mcg.c
+6
-5
drivers/infiniband/hw/mthca/mthca_memfree.c
drivers/infiniband/hw/mthca/mthca_memfree.c
+2
-1
drivers/infiniband/hw/mthca/mthca_memfree.h
drivers/infiniband/hw/mthca/mthca_memfree.h
+2
-1
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_provider.c
+46
-3
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/hw/mthca/mthca_qp.c
+7
-9
drivers/infiniband/hw/mthca/mthca_srq.c
drivers/infiniband/hw/mthca/mthca_srq.c
+41
-2
drivers/infiniband/hw/mthca/mthca_user.h
drivers/infiniband/hw/mthca/mthca_user.h
+6
-0
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib.h
+14
-9
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
+70
-50
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
+9
-6
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+5
-4
include/rdma/ib_cm.h
include/rdma/ib_cm.h
+6
-4
include/rdma/ib_mad.h
include/rdma/ib_mad.h
+34
-32
include/rdma/ib_user_cm.h
include/rdma/ib_user_cm.h
+4
-6
include/rdma/ib_user_verbs.h
include/rdma/ib_user_verbs.h
+176
-46
include/rdma/ib_verbs.h
include/rdma/ib_verbs.h
+3
-3
未找到文件。
drivers/infiniband/core/agent.c
浏览文件 @
89fbb69c
...
...
@@ -37,58 +37,41 @@
* $Id: agent.c 1389 2004-12-27 22:56:47Z roland $
*/
#include <linux/dma-mapping.h>
#include <asm/bug.h>
#include "agent.h"
#include "smi.h"
#
include <rdma/ib_smi.h>
#
define SPFX "ib_agent: "
#include "smi.h"
#include "agent_priv.h"
#include "mad_priv.h"
#include "agent.h"
struct
ib_agent_port_private
{
struct
list_head
port_list
;
struct
ib_mad_agent
*
agent
[
2
];
};
s
pinlock_t
ib_agent_port_list_lock
;
s
tatic
DEFINE_SPINLOCK
(
ib_agent_port_list_lock
)
;
static
LIST_HEAD
(
ib_agent_port_list
);
/*
* Caller must hold ib_agent_port_list_lock
*/
static
inline
struct
ib_agent_port_private
*
__ib_get_agent_port
(
struct
ib_device
*
device
,
int
port_num
,
struct
ib_mad_agent
*
mad_agent
)
static
struct
ib_agent_port_private
*
__ib_get_agent_port
(
struct
ib_device
*
device
,
int
port_num
)
{
struct
ib_agent_port_private
*
entry
;
BUG_ON
(
!
(
!!
device
^
!!
mad_agent
));
/* Exactly one MUST be (!NULL) */
if
(
device
)
{
list_for_each_entry
(
entry
,
&
ib_agent_port_list
,
port_list
)
{
if
(
entry
->
smp_agent
->
device
==
device
&&
entry
->
port_num
==
port_num
)
return
entry
;
}
}
else
{
list_for_each_entry
(
entry
,
&
ib_agent_port_list
,
port_list
)
{
if
((
entry
->
smp_agent
==
mad_agent
)
||
(
entry
->
perf_mgmt_agent
==
mad_agent
))
return
entry
;
}
list_for_each_entry
(
entry
,
&
ib_agent_port_list
,
port_list
)
{
if
(
entry
->
agent
[
0
]
->
device
==
device
&&
entry
->
agent
[
0
]
->
port_num
==
port_num
)
return
entry
;
}
return
NULL
;
}
static
inline
struct
ib_agent_port_private
*
ib_get_agent_port
(
struct
ib_device
*
device
,
int
port_num
,
struct
ib_mad_agent
*
mad_agent
)
static
struct
ib_agent_port_private
*
ib_get_agent_port
(
struct
ib_device
*
device
,
int
port_num
)
{
struct
ib_agent_port_private
*
entry
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
ib_agent_port_list_lock
,
flags
);
entry
=
__ib_get_agent_port
(
device
,
port_num
,
mad_agent
);
entry
=
__ib_get_agent_port
(
device
,
port_num
);
spin_unlock_irqrestore
(
&
ib_agent_port_list_lock
,
flags
);
return
entry
;
}
...
...
@@ -100,192 +83,76 @@ int smi_check_local_dr_smp(struct ib_smp *smp,
if
(
smp
->
mgmt_class
!=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
return
1
;
port_priv
=
ib_get_agent_port
(
device
,
port_num
,
NULL
);
port_priv
=
ib_get_agent_port
(
device
,
port_num
);
if
(
!
port_priv
)
{
printk
(
KERN_DEBUG
SPFX
"smi_check_local_dr_smp %s port %d "
"not open
\n
"
,
device
->
name
,
port_num
);
"not open
\n
"
,
device
->
name
,
port_num
);
return
1
;
}
return
smi_check_local_smp
(
port_priv
->
smp_agent
,
smp
);
return
smi_check_local_smp
(
port_priv
->
agent
[
0
]
,
smp
);
}
static
int
agent_mad_send
(
struct
ib_mad_agent
*
mad_agent
,
struct
ib_agent_port_private
*
port_priv
,
struct
ib_mad_private
*
mad_priv
,
struct
ib_grh
*
grh
,
struct
ib_wc
*
wc
)
int
agent_send_response
(
struct
ib_mad
*
mad
,
struct
ib_grh
*
grh
,
struct
ib_wc
*
wc
,
struct
ib_device
*
device
,
int
port_num
,
int
qpn
)
{
struct
ib_agent_send_wr
*
agent_send_wr
;
struct
ib_sge
gather_list
;
struct
ib_send_wr
send_wr
;
struct
ib_send_wr
*
bad_send_wr
;
struct
ib_ah_attr
ah_attr
;
unsigned
long
flags
;
int
ret
=
1
;
agent_send_wr
=
kmalloc
(
sizeof
(
*
agent_send_wr
),
GFP_KERNEL
);
if
(
!
agent_send_wr
)
goto
out
;
agent_send_wr
->
mad
=
mad_priv
;
gather_list
.
addr
=
dma_map_single
(
mad_agent
->
device
->
dma_device
,
&
mad_priv
->
mad
,
sizeof
(
mad_priv
->
mad
),
DMA_TO_DEVICE
);
gather_list
.
length
=
sizeof
(
mad_priv
->
mad
);
gather_list
.
lkey
=
mad_agent
->
mr
->
lkey
;
send_wr
.
next
=
NULL
;
send_wr
.
opcode
=
IB_WR_SEND
;
send_wr
.
sg_list
=
&
gather_list
;
send_wr
.
num_sge
=
1
;
send_wr
.
wr
.
ud
.
remote_qpn
=
wc
->
src_qp
;
/* DQPN */
send_wr
.
wr
.
ud
.
timeout_ms
=
0
;
send_wr
.
send_flags
=
IB_SEND_SIGNALED
|
IB_SEND_SOLICITED
;
struct
ib_agent_port_private
*
port_priv
;
struct
ib_mad_agent
*
agent
;
struct
ib_mad_send_buf
*
send_buf
;
struct
ib_ah
*
ah
;
int
ret
;
ah_attr
.
dlid
=
wc
->
slid
;
ah_attr
.
port_num
=
mad_agent
->
port_num
;
ah_attr
.
src_path_bits
=
wc
->
dlid_path_bits
;
ah_attr
.
sl
=
wc
->
sl
;
ah_attr
.
static_rate
=
0
;
ah_attr
.
ah_flags
=
0
;
/* No GRH */
if
(
mad_priv
->
mad
.
mad
.
mad_hdr
.
mgmt_class
==
IB_MGMT_CLASS_PERF_MGMT
)
{
if
(
wc
->
wc_flags
&
IB_WC_GRH
)
{
ah_attr
.
ah_flags
=
IB_AH_GRH
;
/* Should sgid be looked up ? */
ah_attr
.
grh
.
sgid_index
=
0
;
ah_attr
.
grh
.
hop_limit
=
grh
->
hop_limit
;
ah_attr
.
grh
.
flow_label
=
be32_to_cpu
(
grh
->
version_tclass_flow
)
&
0xfffff
;
ah_attr
.
grh
.
traffic_class
=
(
be32_to_cpu
(
grh
->
version_tclass_flow
)
>>
20
)
&
0xff
;
memcpy
(
ah_attr
.
grh
.
dgid
.
raw
,
grh
->
sgid
.
raw
,
sizeof
(
ah_attr
.
grh
.
dgid
));
}
port_priv
=
ib_get_agent_port
(
device
,
port_num
);
if
(
!
port_priv
)
{
printk
(
KERN_ERR
SPFX
"Unable to find port agent
\n
"
);
return
-
ENODEV
;
}
agent_send_wr
->
ah
=
ib_create_ah
(
mad_agent
->
qp
->
pd
,
&
ah_attr
);
if
(
IS_ERR
(
agent_send_wr
->
ah
))
{
printk
(
KERN_ERR
SPFX
"No memory for address handle
\n
"
);
kfree
(
agent_send_wr
);
goto
out
;
agent
=
port_priv
->
agent
[
qpn
];
ah
=
ib_create_ah_from_wc
(
agent
->
qp
->
pd
,
wc
,
grh
,
port_num
);
if
(
IS_ERR
(
ah
))
{
ret
=
PTR_ERR
(
ah
);
printk
(
KERN_ERR
SPFX
"ib_create_ah_from_wc error:%d
\n
"
,
ret
);
return
ret
;
}
send_
wr
.
wr
.
ud
.
ah
=
agent_send_wr
->
ah
;
if
(
mad_priv
->
mad
.
mad
.
mad_hdr
.
mgmt_class
==
IB_MGMT_CLASS_PERF_MGMT
)
{
send_wr
.
wr
.
ud
.
pkey_index
=
wc
->
pkey_index
;
send_wr
.
wr
.
ud
.
remote_qkey
=
IB_QP1_QKEY
;
}
else
{
/* for SMPs */
send_wr
.
wr
.
ud
.
pkey_index
=
0
;
send_wr
.
wr
.
ud
.
remote_qkey
=
0
;
send_
buf
=
ib_create_send_mad
(
agent
,
wc
->
src_qp
,
wc
->
pkey_index
,
0
,
IB_MGMT_MAD_HDR
,
IB_MGMT_MAD_DATA
,
GFP_KERNEL
)
;
if
(
IS_ERR
(
send_buf
))
{
ret
=
PTR_ERR
(
send_buf
);
printk
(
KERN_ERR
SPFX
"ib_create_send_mad error:%d
\n
"
,
ret
)
;
goto
err1
;
}
send_wr
.
wr
.
ud
.
mad_hdr
=
&
mad_priv
->
mad
.
mad
.
mad_hdr
;
send_wr
.
wr_id
=
(
unsigned
long
)
agent_send_wr
;
pci_unmap_addr_set
(
agent_send_wr
,
mapping
,
gather_list
.
addr
);
/* Send */
spin_lock_irqsave
(
&
port_priv
->
send_list_lock
,
flags
);
if
(
ib_post_send_mad
(
mad_agent
,
&
send_wr
,
&
bad_send_wr
))
{
spin_unlock_irqrestore
(
&
port_priv
->
send_list_lock
,
flags
);
dma_unmap_single
(
mad_agent
->
device
->
dma_device
,
pci_unmap_addr
(
agent_send_wr
,
mapping
),
sizeof
(
mad_priv
->
mad
),
DMA_TO_DEVICE
);
ib_destroy_ah
(
agent_send_wr
->
ah
);
kfree
(
agent_send_wr
);
}
else
{
list_add_tail
(
&
agent_send_wr
->
send_list
,
&
port_priv
->
send_posted_list
);
spin_unlock_irqrestore
(
&
port_priv
->
send_list_lock
,
flags
);
ret
=
0
;
memcpy
(
send_buf
->
mad
,
mad
,
sizeof
*
mad
);
send_buf
->
ah
=
ah
;
if
((
ret
=
ib_post_send_mad
(
send_buf
,
NULL
)))
{
printk
(
KERN_ERR
SPFX
"ib_post_send_mad error:%d
\n
"
,
ret
);
goto
err2
;
}
out:
return
0
;
err2:
ib_free_send_mad
(
send_buf
);
err1:
ib_destroy_ah
(
ah
);
return
ret
;
}
int
agent_send
(
struct
ib_mad_private
*
mad
,
struct
ib_grh
*
grh
,
struct
ib_wc
*
wc
,
struct
ib_device
*
device
,
int
port_num
)
{
struct
ib_agent_port_private
*
port_priv
;
struct
ib_mad_agent
*
mad_agent
;
port_priv
=
ib_get_agent_port
(
device
,
port_num
,
NULL
);
if
(
!
port_priv
)
{
printk
(
KERN_DEBUG
SPFX
"agent_send %s port %d not open
\n
"
,
device
->
name
,
port_num
);
return
1
;
}
/* Get mad agent based on mgmt_class in MAD */
switch
(
mad
->
mad
.
mad
.
mad_hdr
.
mgmt_class
)
{
case
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
:
case
IB_MGMT_CLASS_SUBN_LID_ROUTED
:
mad_agent
=
port_priv
->
smp_agent
;
break
;
case
IB_MGMT_CLASS_PERF_MGMT
:
mad_agent
=
port_priv
->
perf_mgmt_agent
;
break
;
default:
return
1
;
}
return
agent_mad_send
(
mad_agent
,
port_priv
,
mad
,
grh
,
wc
);
}
static
void
agent_send_handler
(
struct
ib_mad_agent
*
mad_agent
,
struct
ib_mad_send_wc
*
mad_send_wc
)
{
struct
ib_agent_port_private
*
port_priv
;
struct
ib_agent_send_wr
*
agent_send_wr
;
unsigned
long
flags
;
/* Find matching MAD agent */
port_priv
=
ib_get_agent_port
(
NULL
,
0
,
mad_agent
);
if
(
!
port_priv
)
{
printk
(
KERN_ERR
SPFX
"agent_send_handler: no matching MAD "
"agent %p
\n
"
,
mad_agent
);
return
;
}
agent_send_wr
=
(
struct
ib_agent_send_wr
*
)(
unsigned
long
)
mad_send_wc
->
wr_id
;
spin_lock_irqsave
(
&
port_priv
->
send_list_lock
,
flags
);
/* Remove completed send from posted send MAD list */
list_del
(
&
agent_send_wr
->
send_list
);
spin_unlock_irqrestore
(
&
port_priv
->
send_list_lock
,
flags
);
dma_unmap_single
(
mad_agent
->
device
->
dma_device
,
pci_unmap_addr
(
agent_send_wr
,
mapping
),
sizeof
(
agent_send_wr
->
mad
->
mad
),
DMA_TO_DEVICE
);
ib_destroy_ah
(
agent_send_wr
->
ah
);
/* Release allocated memory */
kmem_cache_free
(
ib_mad_cache
,
agent_send_wr
->
mad
);
kfree
(
agent_send_wr
);
ib_destroy_ah
(
mad_send_wc
->
send_buf
->
ah
);
ib_free_send_mad
(
mad_send_wc
->
send_buf
);
}
int
ib_agent_port_open
(
struct
ib_device
*
device
,
int
port_num
)
{
int
ret
;
struct
ib_agent_port_private
*
port_priv
;
unsigned
long
flags
;
/* First, check if port already open for SMI */
port_priv
=
ib_get_agent_port
(
device
,
port_num
,
NULL
);
if
(
port_priv
)
{
printk
(
KERN_DEBUG
SPFX
"%s port %d already open
\n
"
,
device
->
name
,
port_num
);
return
0
;
}
int
ret
;
/* Create new device info */
port_priv
=
kmalloc
(
sizeof
*
port_priv
,
GFP_KERNEL
);
...
...
@@ -294,32 +161,25 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
ret
=
-
ENOMEM
;
goto
error1
;
}
memset
(
port_priv
,
0
,
sizeof
*
port_priv
);
port_priv
->
port_num
=
port_num
;
spin_lock_init
(
&
port_priv
->
send_list_lock
);
INIT_LIST_HEAD
(
&
port_priv
->
send_posted_list
);
/* Obtain send only MAD agent for SM class (SMI QP) */
port_priv
->
smp_agent
=
ib_register_mad_agent
(
device
,
port_num
,
IB_QPT_SMI
,
NULL
,
0
,
/* Obtain send only MAD agent for SMI QP */
port_priv
->
agent
[
0
]
=
ib_register_mad_agent
(
device
,
port_num
,
IB_QPT_SMI
,
NULL
,
0
,
&
agent_send_handler
,
NULL
,
NULL
);
if
(
IS_ERR
(
port_priv
->
smp_agent
))
{
ret
=
PTR_ERR
(
port_priv
->
smp_agent
);
NULL
,
NULL
);
if
(
IS_ERR
(
port_priv
->
agent
[
0
]))
{
ret
=
PTR_ERR
(
port_priv
->
agent
[
0
]);
goto
error2
;
}
/* Obtain send only MAD agent for PerfMgmt class (GSI QP) */
port_priv
->
perf_mgmt_agent
=
ib_register_mad_agent
(
device
,
port_num
,
IB_QPT_GSI
,
NULL
,
0
,
&
agent_send_handler
,
NULL
,
NULL
);
if
(
IS_ERR
(
port_priv
->
perf_mgmt_agent
))
{
ret
=
PTR_ERR
(
port_priv
->
perf_mgmt_agent
);
/* Obtain send only MAD agent for GSI QP */
port_priv
->
agent
[
1
]
=
ib_register_mad_agent
(
device
,
port_num
,
IB_QPT_GSI
,
NULL
,
0
,
&
agent_send_handler
,
NULL
,
NULL
);
if
(
IS_ERR
(
port_priv
->
agent
[
1
]))
{
ret
=
PTR_ERR
(
port_priv
->
agent
[
1
]);
goto
error3
;
}
...
...
@@ -330,7 +190,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
return
0
;
error3:
ib_unregister_mad_agent
(
port_priv
->
smp_agent
);
ib_unregister_mad_agent
(
port_priv
->
agent
[
0
]
);
error2:
kfree
(
port_priv
);
error1:
...
...
@@ -343,7 +203,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
unsigned
long
flags
;
spin_lock_irqsave
(
&
ib_agent_port_list_lock
,
flags
);
port_priv
=
__ib_get_agent_port
(
device
,
port_num
,
NULL
);
port_priv
=
__ib_get_agent_port
(
device
,
port_num
);
if
(
port_priv
==
NULL
)
{
spin_unlock_irqrestore
(
&
ib_agent_port_list_lock
,
flags
);
printk
(
KERN_ERR
SPFX
"Port %d not found
\n
"
,
port_num
);
...
...
@@ -352,9 +212,8 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
list_del
(
&
port_priv
->
port_list
);
spin_unlock_irqrestore
(
&
ib_agent_port_list_lock
,
flags
);
ib_unregister_mad_agent
(
port_priv
->
perf_mgmt_agent
);
ib_unregister_mad_agent
(
port_priv
->
smp_agent
);
ib_unregister_mad_agent
(
port_priv
->
agent
[
1
]
);
ib_unregister_mad_agent
(
port_priv
->
agent
[
0
]
);
kfree
(
port_priv
);
return
0
;
}
drivers/infiniband/core/agent.h
浏览文件 @
89fbb69c
...
...
@@ -39,17 +39,14 @@
#ifndef __AGENT_H_
#define __AGENT_H_
extern
spinlock_t
ib_agent_port_list_lock
;
#include <rdma/ib_mad.h>
extern
int
ib_agent_port_open
(
struct
ib_device
*
device
,
int
port_num
);
extern
int
ib_agent_port_open
(
struct
ib_device
*
device
,
int
port_num
);
extern
int
ib_agent_port_close
(
struct
ib_device
*
device
,
int
port_num
);
extern
int
agent_send
(
struct
ib_mad_private
*
mad
,
struct
ib_grh
*
grh
,
struct
ib_wc
*
wc
,
struct
ib_device
*
device
,
int
port_num
);
extern
int
agent_send_response
(
struct
ib_mad
*
mad
,
struct
ib_grh
*
grh
,
struct
ib_wc
*
wc
,
struct
ib_device
*
device
,
int
port_num
,
int
qpn
);
#endif
/* __AGENT_H_ */
drivers/infiniband/core/agent_priv.h
已删除
100644 → 0
浏览文件 @
7efe5d7c
/*
* Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* $Id: agent_priv.h 1640 2005-01-24 22:39:02Z halr $
*/
#ifndef __IB_AGENT_PRIV_H__
#define __IB_AGENT_PRIV_H__
#include <linux/pci.h>
#define SPFX "ib_agent: "
struct
ib_agent_send_wr
{
struct
list_head
send_list
;
struct
ib_ah
*
ah
;
struct
ib_mad_private
*
mad
;
DECLARE_PCI_UNMAP_ADDR
(
mapping
)
};
struct
ib_agent_port_private
{
struct
list_head
port_list
;
struct
list_head
send_posted_list
;
spinlock_t
send_list_lock
;
int
port_num
;
struct
ib_mad_agent
*
smp_agent
;
/* SM class */
struct
ib_mad_agent
*
perf_mgmt_agent
;
/* PerfMgmt class */
};
#endif
/* __IB_AGENT_PRIV_H__ */
drivers/infiniband/core/cm.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/core/cm_msgs.h
浏览文件 @
89fbb69c
...
...
@@ -186,6 +186,7 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
req_msg
->
offset40
=
cpu_to_be32
((
be32_to_cpu
(
req_msg
->
offset40
)
&
0xFFFFFFF9
)
|
0x2
);
break
;
default:
req_msg
->
offset40
=
cpu_to_be32
(
be32_to_cpu
(
req_msg
->
offset40
)
&
...
...
drivers/infiniband/core/device.c
浏览文件 @
89fbb69c
...
...
@@ -514,6 +514,12 @@ int ib_query_port(struct ib_device *device,
u8
port_num
,
struct
ib_port_attr
*
port_attr
)
{
if
(
device
->
node_type
==
IB_NODE_SWITCH
)
{
if
(
port_num
)
return
-
EINVAL
;
}
else
if
(
port_num
<
1
||
port_num
>
device
->
phys_port_cnt
)
return
-
EINVAL
;
return
device
->
query_port
(
device
,
port_num
,
port_attr
);
}
EXPORT_SYMBOL
(
ib_query_port
);
...
...
@@ -583,6 +589,12 @@ int ib_modify_port(struct ib_device *device,
u8
port_num
,
int
port_modify_mask
,
struct
ib_port_modify
*
port_modify
)
{
if
(
device
->
node_type
==
IB_NODE_SWITCH
)
{
if
(
port_num
)
return
-
EINVAL
;
}
else
if
(
port_num
<
1
||
port_num
>
device
->
phys_port_cnt
)
return
-
EINVAL
;
return
device
->
modify_port
(
device
,
port_num
,
port_modify_mask
,
port_modify
);
}
...
...
drivers/infiniband/core/mad.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/core/mad_priv.h
浏览文件 @
89fbb69c
...
...
@@ -118,9 +118,10 @@ struct ib_mad_send_wr_private {
struct
ib_mad_list_head
mad_list
;
struct
list_head
agent_list
;
struct
ib_mad_agent_private
*
mad_agent_priv
;
struct
ib_mad_send_buf
send_buf
;
DECLARE_PCI_UNMAP_ADDR
(
mapping
)
struct
ib_send_wr
send_wr
;
struct
ib_sge
sg_list
[
IB_MAD_SEND_REQ_MAX_SG
];
u64
wr_id
;
/* client WR ID */
__be64
tid
;
unsigned
long
timeout
;
int
retries
;
...
...
@@ -141,10 +142,7 @@ struct ib_mad_local_private {
struct
list_head
completion_list
;
struct
ib_mad_private
*
mad_priv
;
struct
ib_mad_agent_private
*
recv_mad_agent
;
struct
ib_send_wr
send_wr
;
struct
ib_sge
sg_list
[
IB_MAD_SEND_REQ_MAX_SG
];
u64
wr_id
;
/* client WR ID */
__be64
tid
;
struct
ib_mad_send_wr_private
*
mad_send_wr
;
};
struct
ib_mad_mgmt_method_table
{
...
...
drivers/infiniband/core/mad_rmpp.c
浏览文件 @
89fbb69c
...
...
@@ -103,12 +103,12 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
static
int
data_offset
(
u8
mgmt_class
)
{
if
(
mgmt_class
==
IB_MGMT_CLASS_SUBN_ADM
)
return
offsetof
(
struct
ib_sa_mad
,
data
)
;
return
IB_MGMT_SA_HDR
;
else
if
((
mgmt_class
>=
IB_MGMT_CLASS_VENDOR_RANGE2_START
)
&&
(
mgmt_class
<=
IB_MGMT_CLASS_VENDOR_RANGE2_END
))
return
offsetof
(
struct
ib_vendor_mad
,
data
)
;
return
IB_MGMT_VENDOR_HDR
;
else
return
offsetof
(
struct
ib_rmpp_mad
,
data
)
;
return
IB_MGMT_RMPP_HDR
;
}
static
void
format_ack
(
struct
ib_rmpp_mad
*
ack
,
...
...
@@ -135,55 +135,52 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
struct
ib_mad_recv_wc
*
recv_wc
)
{
struct
ib_mad_send_buf
*
msg
;
struct
ib_send_wr
*
bad_send_wr
;
int
hdr_len
,
ret
;
int
ret
;
hdr_len
=
sizeof
(
struct
ib_mad_hdr
)
+
sizeof
(
struct
ib_rmpp_hdr
);
msg
=
ib_create_send_mad
(
&
rmpp_recv
->
agent
->
agent
,
recv_wc
->
wc
->
src_qp
,
recv_wc
->
wc
->
pkey_index
,
rmpp_recv
->
ah
,
1
,
hdr_len
,
sizeof
(
struct
ib_rmpp_mad
)
-
hdr_len
,
GFP_KERNEL
);
recv_wc
->
wc
->
pkey_index
,
1
,
IB_MGMT_RMPP_HDR
,
IB_MGMT_RMPP_DATA
,
GFP_KERNEL
);
if
(
!
msg
)
return
;
format_ack
(
(
struct
ib_rmpp_mad
*
)
msg
->
mad
,
(
struct
ib_rmpp_mad
*
)
recv_wc
->
recv_buf
.
mad
,
rmpp_recv
);
ret
=
ib_post_send_mad
(
&
rmpp_recv
->
agent
->
agent
,
&
msg
->
send_wr
,
&
bad_send_wr
);
format_ack
(
msg
->
mad
,
(
struct
ib_rmpp_mad
*
)
recv_wc
->
recv_buf
.
mad
,
rmpp_recv
);
msg
->
ah
=
rmpp_recv
->
ah
;
ret
=
ib_post_send_mad
(
msg
,
NULL
);
if
(
ret
)
ib_free_send_mad
(
msg
);
}
static
int
alloc_response_msg
(
struct
ib_mad_agent
*
agent
,
struct
ib_mad_recv_wc
*
recv_wc
,
struct
ib_mad_send_buf
**
msg
)
static
struct
ib_mad_send_buf
*
alloc_response_msg
(
struct
ib_mad_agent
*
agent
,
struct
ib_mad_recv_wc
*
recv_wc
)
{
struct
ib_mad_send_buf
*
m
;
struct
ib_mad_send_buf
*
m
sg
;
struct
ib_ah
*
ah
;
int
hdr_len
;
ah
=
ib_create_ah_from_wc
(
agent
->
qp
->
pd
,
recv_wc
->
wc
,
recv_wc
->
recv_buf
.
grh
,
agent
->
port_num
);
if
(
IS_ERR
(
ah
))
return
PTR_ERR
(
ah
);
hdr_len
=
sizeof
(
struct
ib_mad_hdr
)
+
sizeof
(
struct
ib_rmpp_hdr
);
m
=
ib_create_send_mad
(
agent
,
recv_wc
->
wc
->
src_qp
,
recv_wc
->
wc
->
pkey_index
,
ah
,
1
,
hdr_len
,
sizeof
(
struct
ib_rmpp_mad
)
-
hdr_len
,
GFP_KERNEL
);
if
(
IS_ERR
(
m
))
{
return
(
void
*
)
ah
;
msg
=
ib_create_send_mad
(
agent
,
recv_wc
->
wc
->
src_qp
,
recv_wc
->
wc
->
pkey_index
,
1
,
IB_MGMT_RMPP_HDR
,
IB_MGMT_RMPP_DATA
,
GFP_KERNEL
);
if
(
IS_ERR
(
msg
))
ib_destroy_ah
(
ah
);
return
PTR_ERR
(
m
);
}
*
msg
=
m
;
return
0
;
else
msg
->
ah
=
ah
;
return
msg
;
}
static
void
free_msg
(
struct
ib_mad_send_buf
*
msg
)
void
ib_rmpp_send_handler
(
struct
ib_mad_send_wc
*
mad_send_wc
)
{
ib_destroy_ah
(
msg
->
send_wr
.
wr
.
ud
.
ah
);
ib_free_send_mad
(
msg
);
struct
ib_rmpp_mad
*
rmpp_mad
=
mad_send_wc
->
send_buf
->
mad
;
if
(
rmpp_mad
->
rmpp_hdr
.
rmpp_type
!=
IB_MGMT_RMPP_TYPE_ACK
)
ib_destroy_ah
(
mad_send_wc
->
send_buf
->
ah
);
ib_free_send_mad
(
mad_send_wc
->
send_buf
);
}
static
void
nack_recv
(
struct
ib_mad_agent_private
*
agent
,
...
...
@@ -191,14 +188,13 @@ static void nack_recv(struct ib_mad_agent_private *agent,
{
struct
ib_mad_send_buf
*
msg
;
struct
ib_rmpp_mad
*
rmpp_mad
;
struct
ib_send_wr
*
bad_send_wr
;
int
ret
;
ret
=
alloc_response_msg
(
&
agent
->
agent
,
recv_wc
,
&
msg
);
if
(
ret
)
msg
=
alloc_response_msg
(
&
agent
->
agent
,
recv_wc
);
if
(
IS_ERR
(
msg
)
)
return
;
rmpp_mad
=
(
struct
ib_rmpp_mad
*
)
msg
->
mad
;
rmpp_mad
=
msg
->
mad
;
memcpy
(
rmpp_mad
,
recv_wc
->
recv_buf
.
mad
,
data_offset
(
recv_wc
->
recv_buf
.
mad
->
mad_hdr
.
mgmt_class
));
...
...
@@ -210,9 +206,11 @@ static void nack_recv(struct ib_mad_agent_private *agent,
rmpp_mad
->
rmpp_hdr
.
seg_num
=
0
;
rmpp_mad
->
rmpp_hdr
.
paylen_newwin
=
0
;
ret
=
ib_post_send_mad
(
&
agent
->
agent
,
&
msg
->
send_wr
,
&
bad_send_wr
);
if
(
ret
)
free_msg
(
msg
);
ret
=
ib_post_send_mad
(
msg
,
NULL
);
if
(
ret
)
{
ib_destroy_ah
(
msg
->
ah
);
ib_free_send_mad
(
msg
);
}
}
static
void
recv_timeout_handler
(
void
*
data
)
...
...
@@ -585,7 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
int
timeout
;
u32
paylen
;
rmpp_mad
=
(
struct
ib_rmpp_mad
*
)
mad_send_wr
->
send_wr
.
wr
.
ud
.
mad_hdr
;
rmpp_mad
=
mad_send_wr
->
send_buf
.
mad
;
ib_set_rmpp_flags
(
&
rmpp_mad
->
rmpp_hdr
,
IB_MGMT_RMPP_FLAG_ACTIVE
);
rmpp_mad
->
rmpp_hdr
.
seg_num
=
cpu_to_be32
(
mad_send_wr
->
seg_num
);
...
...
@@ -612,7 +610,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
}
/* 2 seconds for an ACK until we can find the packet lifetime */
timeout
=
mad_send_wr
->
send_
wr
.
wr
.
ud
.
timeout_ms
;
timeout
=
mad_send_wr
->
send_
buf
.
timeout_ms
;
if
(
!
timeout
||
timeout
>
2000
)
mad_send_wr
->
timeout
=
msecs_to_jiffies
(
2000
);
mad_send_wr
->
seg_num
++
;
...
...
@@ -640,7 +638,7 @@ static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
wc
.
status
=
IB_WC_REM_ABORT_ERR
;
wc
.
vendor_err
=
rmpp_status
;
wc
.
wr_id
=
mad_send_wr
->
wr_id
;
wc
.
send_buf
=
&
mad_send_wr
->
send_buf
;
ib_mad_complete_send_wr
(
mad_send_wr
,
&
wc
);
return
;
out:
...
...
@@ -694,12 +692,12 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
if
(
seg_num
>
mad_send_wr
->
last_ack
)
{
mad_send_wr
->
last_ack
=
seg_num
;
mad_send_wr
->
retries
=
mad_send_wr
->
send_
wr
.
wr
.
ud
.
retries
;
mad_send_wr
->
retries
=
mad_send_wr
->
send_
buf
.
retries
;
}
mad_send_wr
->
newwin
=
newwin
;
if
(
mad_send_wr
->
last_ack
==
mad_send_wr
->
total_seg
)
{
/* If no response is expected, the ACK completes the send */
if
(
!
mad_send_wr
->
send_
wr
.
wr
.
ud
.
timeout_ms
)
{
if
(
!
mad_send_wr
->
send_
buf
.
timeout_ms
)
{
struct
ib_mad_send_wc
wc
;
ib_mark_mad_done
(
mad_send_wr
);
...
...
@@ -707,13 +705,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
wc
.
status
=
IB_WC_SUCCESS
;
wc
.
vendor_err
=
0
;
wc
.
wr_id
=
mad_send_wr
->
wr_id
;
wc
.
send_buf
=
&
mad_send_wr
->
send_buf
;
ib_mad_complete_send_wr
(
mad_send_wr
,
&
wc
);
return
;
}
if
(
mad_send_wr
->
refcount
==
1
)
ib_reset_mad_timeout
(
mad_send_wr
,
mad_send_wr
->
send_wr
.
wr
.
ud
.
timeout_ms
);
ib_reset_mad_timeout
(
mad_send_wr
,
mad_send_wr
->
send_buf
.
timeout_ms
);
}
else
if
(
mad_send_wr
->
refcount
==
1
&&
mad_send_wr
->
seg_num
<
mad_send_wr
->
newwin
&&
mad_send_wr
->
seg_num
<=
mad_send_wr
->
total_seg
)
{
...
...
@@ -842,7 +840,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
struct
ib_rmpp_mad
*
rmpp_mad
;
int
i
,
total_len
,
ret
;
rmpp_mad
=
(
struct
ib_rmpp_mad
*
)
mad_send_wr
->
send_wr
.
wr
.
ud
.
mad_hdr
;
rmpp_mad
=
mad_send_wr
->
send_buf
.
mad
;
if
(
!
(
ib_get_rmpp_flags
(
&
rmpp_mad
->
rmpp_hdr
)
&
IB_MGMT_RMPP_FLAG_ACTIVE
))
return
IB_RMPP_RESULT_UNHANDLED
;
...
...
@@ -863,7 +861,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr
->
total_seg
=
(
total_len
-
mad_send_wr
->
data_offset
)
/
(
sizeof
(
struct
ib_rmpp_mad
)
-
mad_send_wr
->
data_offset
);
mad_send_wr
->
pad
=
total_len
-
offsetof
(
struct
ib_rmpp_mad
,
data
)
-
mad_send_wr
->
pad
=
total_len
-
IB_MGMT_RMPP_HDR
-
be32_to_cpu
(
rmpp_mad
->
rmpp_hdr
.
paylen_newwin
);
/* We need to wait for the final ACK even if there isn't a response */
...
...
@@ -878,23 +876,15 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
struct
ib_mad_send_wc
*
mad_send_wc
)
{
struct
ib_rmpp_mad
*
rmpp_mad
;
struct
ib_mad_send_buf
*
msg
;
int
ret
;
rmpp_mad
=
(
struct
ib_rmpp_mad
*
)
mad_send_wr
->
send_wr
.
wr
.
ud
.
mad_hdr
;
rmpp_mad
=
mad_send_wr
->
send_buf
.
mad
;
if
(
!
(
ib_get_rmpp_flags
(
&
rmpp_mad
->
rmpp_hdr
)
&
IB_MGMT_RMPP_FLAG_ACTIVE
))
return
IB_RMPP_RESULT_UNHANDLED
;
/* RMPP not active */
if
(
rmpp_mad
->
rmpp_hdr
.
rmpp_type
!=
IB_MGMT_RMPP_TYPE_DATA
)
{
msg
=
(
struct
ib_mad_send_buf
*
)
(
unsigned
long
)
mad_send_wc
->
wr_id
;
if
(
rmpp_mad
->
rmpp_hdr
.
rmpp_type
==
IB_MGMT_RMPP_TYPE_ACK
)
ib_free_send_mad
(
msg
);
else
free_msg
(
msg
);
if
(
rmpp_mad
->
rmpp_hdr
.
rmpp_type
!=
IB_MGMT_RMPP_TYPE_DATA
)
return
IB_RMPP_RESULT_INTERNAL
;
/* ACK, STOP, or ABORT */
}
if
(
mad_send_wc
->
status
!=
IB_WC_SUCCESS
||
mad_send_wr
->
status
!=
IB_WC_SUCCESS
)
...
...
@@ -905,7 +895,7 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
if
(
mad_send_wr
->
last_ack
==
mad_send_wr
->
total_seg
)
{
mad_send_wr
->
timeout
=
msecs_to_jiffies
(
mad_send_wr
->
send_
wr
.
wr
.
ud
.
timeout_ms
);
msecs_to_jiffies
(
mad_send_wr
->
send_
buf
.
timeout_ms
);
return
IB_RMPP_RESULT_PROCESSED
;
/* Send done */
}
...
...
@@ -926,7 +916,7 @@ int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
struct
ib_rmpp_mad
*
rmpp_mad
;
int
ret
;
rmpp_mad
=
(
struct
ib_rmpp_mad
*
)
mad_send_wr
->
send_wr
.
wr
.
ud
.
mad_hdr
;
rmpp_mad
=
mad_send_wr
->
send_buf
.
mad
;
if
(
!
(
ib_get_rmpp_flags
(
&
rmpp_mad
->
rmpp_hdr
)
&
IB_MGMT_RMPP_FLAG_ACTIVE
))
return
IB_RMPP_RESULT_UNHANDLED
;
/* RMPP not active */
...
...
drivers/infiniband/core/mad_rmpp.h
浏览文件 @
89fbb69c
...
...
@@ -51,6 +51,8 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
int
ib_process_rmpp_send_wc
(
struct
ib_mad_send_wr_private
*
mad_send_wr
,
struct
ib_mad_send_wc
*
mad_send_wc
);
void
ib_rmpp_send_handler
(
struct
ib_mad_send_wc
*
mad_send_wc
);
void
ib_cancel_rmpp_recvs
(
struct
ib_mad_agent_private
*
agent
);
int
ib_retry_rmpp
(
struct
ib_mad_send_wr_private
*
mad_send_wr
);
...
...
drivers/infiniband/core/sa_query.c
浏览文件 @
89fbb69c
...
...
@@ -73,11 +73,10 @@ struct ib_sa_device {
struct
ib_sa_query
{
void
(
*
callback
)(
struct
ib_sa_query
*
,
int
,
struct
ib_sa_mad
*
);
void
(
*
release
)(
struct
ib_sa_query
*
);
struct
ib_sa_port
*
port
;
struct
ib_sa_mad
*
mad
;
struct
ib_sa_sm_ah
*
sm_ah
;
DECLARE_PCI_UNMAP_ADDR
(
mapping
)
int
id
;
struct
ib_sa_port
*
port
;
struct
ib_mad_send_buf
*
mad_buf
;
struct
ib_sa_sm_ah
*
sm_ah
;
int
id
;
};
struct
ib_sa_service_query
{
...
...
@@ -426,6 +425,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
{
unsigned
long
flags
;
struct
ib_mad_agent
*
agent
;
struct
ib_mad_send_buf
*
mad_buf
;
spin_lock_irqsave
(
&
idr_lock
,
flags
);
if
(
idr_find
(
&
query_idr
,
id
)
!=
query
)
{
...
...
@@ -433,9 +433,10 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
return
;
}
agent
=
query
->
port
->
agent
;
mad_buf
=
query
->
mad_buf
;
spin_unlock_irqrestore
(
&
idr_lock
,
flags
);
ib_cancel_mad
(
agent
,
id
);
ib_cancel_mad
(
agent
,
mad_buf
);
}
EXPORT_SYMBOL
(
ib_sa_cancel_query
);
...
...
@@ -457,71 +458,46 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
static
int
send_mad
(
struct
ib_sa_query
*
query
,
int
timeout_ms
)
{
struct
ib_sa_port
*
port
=
query
->
port
;
unsigned
long
flags
;
int
ret
;
struct
ib_sge
gather_list
;
struct
ib_send_wr
*
bad_wr
,
wr
=
{
.
opcode
=
IB_WR_SEND
,
.
sg_list
=
&
gather_list
,
.
num_sge
=
1
,
.
send_flags
=
IB_SEND_SIGNALED
,
.
wr
=
{
.
ud
=
{
.
mad_hdr
=
&
query
->
mad
->
mad_hdr
,
.
remote_qpn
=
1
,
.
remote_qkey
=
IB_QP1_QKEY
,
.
timeout_ms
=
timeout_ms
,
}
}
};
int
ret
,
id
;
retry:
if
(
!
idr_pre_get
(
&
query_idr
,
GFP_ATOMIC
))
return
-
ENOMEM
;
spin_lock_irqsave
(
&
idr_lock
,
flags
);
ret
=
idr_get_new
(
&
query_idr
,
query
,
&
query
->
id
);
ret
=
idr_get_new
(
&
query_idr
,
query
,
&
id
);
spin_unlock_irqrestore
(
&
idr_lock
,
flags
);
if
(
ret
==
-
EAGAIN
)
goto
retry
;
if
(
ret
)
return
ret
;
wr
.
wr_id
=
query
->
id
;
query
->
mad_buf
->
timeout_ms
=
timeout_ms
;
query
->
mad_buf
->
context
[
0
]
=
query
;
query
->
id
=
id
;
spin_lock_irqsave
(
&
port
->
ah_lock
,
flags
);
kref_get
(
&
port
->
sm_ah
->
ref
);
query
->
sm_ah
=
port
->
sm_ah
;
wr
.
wr
.
ud
.
ah
=
port
->
sm_ah
->
ah
;
spin_unlock_irqrestore
(
&
port
->
ah_lock
,
flags
);
spin_lock_irqsave
(
&
query
->
port
->
ah_lock
,
flags
);
kref_get
(
&
query
->
port
->
sm_ah
->
ref
);
query
->
sm_ah
=
query
->
port
->
sm_ah
;
spin_unlock_irqrestore
(
&
query
->
port
->
ah_lock
,
flags
);
gather_list
.
addr
=
dma_map_single
(
port
->
agent
->
device
->
dma_device
,
query
->
mad
,
sizeof
(
struct
ib_sa_mad
),
DMA_TO_DEVICE
);
gather_list
.
length
=
sizeof
(
struct
ib_sa_mad
);
gather_list
.
lkey
=
port
->
agent
->
mr
->
lkey
;
pci_unmap_addr_set
(
query
,
mapping
,
gather_list
.
addr
);
query
->
mad_buf
->
ah
=
query
->
sm_ah
->
ah
;
ret
=
ib_post_send_mad
(
port
->
agent
,
&
wr
,
&
bad_wr
);
ret
=
ib_post_send_mad
(
query
->
mad_buf
,
NULL
);
if
(
ret
)
{
dma_unmap_single
(
port
->
agent
->
device
->
dma_device
,
pci_unmap_addr
(
query
,
mapping
),
sizeof
(
struct
ib_sa_mad
),
DMA_TO_DEVICE
);
kref_put
(
&
query
->
sm_ah
->
ref
,
free_sm_ah
);
spin_lock_irqsave
(
&
idr_lock
,
flags
);
idr_remove
(
&
query_idr
,
query
->
id
);
idr_remove
(
&
query_idr
,
id
);
spin_unlock_irqrestore
(
&
idr_lock
,
flags
);
kref_put
(
&
query
->
sm_ah
->
ref
,
free_sm_ah
);
}
/*
* It's not safe to dereference query any more, because the
* send may already have completed and freed the query in
* another context. So use wr.wr_id, which has a copy of the
* query's id.
* another context.
*/
return
ret
?
ret
:
wr
.
wr_
id
;
return
ret
?
ret
:
id
;
}
static
void
ib_sa_path_rec_callback
(
struct
ib_sa_query
*
sa_query
,
...
...
@@ -543,7 +519,6 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
static
void
ib_sa_path_rec_release
(
struct
ib_sa_query
*
sa_query
)
{
kfree
(
sa_query
->
mad
);
kfree
(
container_of
(
sa_query
,
struct
ib_sa_path_query
,
sa_query
));
}
...
...
@@ -583,43 +558,58 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
{
struct
ib_sa_path_query
*
query
;
struct
ib_sa_device
*
sa_dev
=
ib_get_client_data
(
device
,
&
sa_client
);
struct
ib_sa_port
*
port
=
&
sa_dev
->
port
[
port_num
-
sa_dev
->
start_port
];
struct
ib_mad_agent
*
agent
=
port
->
agent
;
struct
ib_sa_port
*
port
;
struct
ib_mad_agent
*
agent
;
struct
ib_sa_mad
*
mad
;
int
ret
;
if
(
!
sa_dev
)
return
-
ENODEV
;
port
=
&
sa_dev
->
port
[
port_num
-
sa_dev
->
start_port
];
agent
=
port
->
agent
;
query
=
kmalloc
(
sizeof
*
query
,
gfp_mask
);
if
(
!
query
)
return
-
ENOMEM
;
query
->
sa_query
.
mad
=
kmalloc
(
sizeof
*
query
->
sa_query
.
mad
,
gfp_mask
);
if
(
!
query
->
sa_query
.
mad
)
{
kfree
(
query
);
return
-
ENOMEM
;
query
->
sa_query
.
mad_buf
=
ib_create_send_mad
(
agent
,
1
,
0
,
0
,
IB_MGMT_SA_HDR
,
IB_MGMT_SA_DATA
,
gfp_mask
);
if
(
!
query
->
sa_query
.
mad_buf
)
{
ret
=
-
ENOMEM
;
goto
err1
;
}
query
->
callback
=
callback
;
query
->
context
=
context
;
init_mad
(
query
->
sa_query
.
mad
,
agent
);
mad
=
query
->
sa_query
.
mad_buf
->
mad
;
init_mad
(
mad
,
agent
);
query
->
sa_query
.
callback
=
callback
?
ib_sa_path_rec_callback
:
NULL
;
query
->
sa_query
.
release
=
ib_sa_path_rec_release
;
query
->
sa_query
.
port
=
port
;
query
->
sa_query
.
mad
->
mad_hdr
.
method
=
IB_MGMT_METHOD_GET
;
query
->
sa_query
.
mad
->
mad_hdr
.
attr_id
=
cpu_to_be16
(
IB_SA_ATTR_PATH_REC
);
query
->
sa_query
.
mad
->
sa_hdr
.
comp_mask
=
comp_mask
;
query
->
sa_query
.
callback
=
callback
?
ib_sa_path_rec_callback
:
NULL
;
query
->
sa_query
.
release
=
ib_sa_path_rec_release
;
query
->
sa_query
.
port
=
port
;
mad
->
mad_hdr
.
method
=
IB_MGMT_METHOD_GET
;
mad
->
mad_hdr
.
attr_id
=
cpu_to_be16
(
IB_SA_ATTR_PATH_REC
);
mad
->
sa_hdr
.
comp_mask
=
comp_mask
;
ib_pack
(
path_rec_table
,
ARRAY_SIZE
(
path_rec_table
),
rec
,
query
->
sa_query
.
mad
->
data
);
ib_pack
(
path_rec_table
,
ARRAY_SIZE
(
path_rec_table
),
rec
,
mad
->
data
);
*
sa_query
=
&
query
->
sa_query
;
ret
=
send_mad
(
&
query
->
sa_query
,
timeout_ms
);
if
(
ret
<
0
)
{
*
sa_query
=
NULL
;
kfree
(
query
->
sa_query
.
mad
);
kfree
(
query
);
}
if
(
ret
<
0
)
goto
err2
;
return
ret
;
err2:
*
sa_query
=
NULL
;
ib_free_send_mad
(
query
->
sa_query
.
mad_buf
);
err1:
kfree
(
query
);
return
ret
;
}
EXPORT_SYMBOL
(
ib_sa_path_rec_get
);
...
...
@@ -643,7 +633,6 @@ static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
static
void
ib_sa_service_rec_release
(
struct
ib_sa_query
*
sa_query
)
{
kfree
(
sa_query
->
mad
);
kfree
(
container_of
(
sa_query
,
struct
ib_sa_service_query
,
sa_query
));
}
...
...
@@ -685,10 +674,17 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
{
struct
ib_sa_service_query
*
query
;
struct
ib_sa_device
*
sa_dev
=
ib_get_client_data
(
device
,
&
sa_client
);
struct
ib_sa_port
*
port
=
&
sa_dev
->
port
[
port_num
-
sa_dev
->
start_port
];
struct
ib_mad_agent
*
agent
=
port
->
agent
;
struct
ib_sa_port
*
port
;
struct
ib_mad_agent
*
agent
;
struct
ib_sa_mad
*
mad
;
int
ret
;
if
(
!
sa_dev
)
return
-
ENODEV
;
port
=
&
sa_dev
->
port
[
port_num
-
sa_dev
->
start_port
];
agent
=
port
->
agent
;
if
(
method
!=
IB_MGMT_METHOD_GET
&&
method
!=
IB_MGMT_METHOD_SET
&&
method
!=
IB_SA_METHOD_DELETE
)
...
...
@@ -697,37 +693,45 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
query
=
kmalloc
(
sizeof
*
query
,
gfp_mask
);
if
(
!
query
)
return
-
ENOMEM
;
query
->
sa_query
.
mad
=
kmalloc
(
sizeof
*
query
->
sa_query
.
mad
,
gfp_mask
);
if
(
!
query
->
sa_query
.
mad
)
{
kfree
(
query
);
return
-
ENOMEM
;
query
->
sa_query
.
mad_buf
=
ib_create_send_mad
(
agent
,
1
,
0
,
0
,
IB_MGMT_SA_HDR
,
IB_MGMT_SA_DATA
,
gfp_mask
);
if
(
!
query
->
sa_query
.
mad_buf
)
{
ret
=
-
ENOMEM
;
goto
err1
;
}
query
->
callback
=
callback
;
query
->
context
=
context
;
init_mad
(
query
->
sa_query
.
mad
,
agent
);
mad
=
query
->
sa_query
.
mad_buf
->
mad
;
init_mad
(
mad
,
agent
);
query
->
sa_query
.
callback
=
callback
?
ib_sa_service_rec_callback
:
NULL
;
query
->
sa_query
.
release
=
ib_sa_service_rec_release
;
query
->
sa_query
.
port
=
port
;
query
->
sa_query
.
mad
->
mad_hdr
.
method
=
method
;
query
->
sa_query
.
mad
->
mad_hdr
.
attr_id
=
cpu_to_be16
(
IB_SA_ATTR_SERVICE_REC
);
query
->
sa_query
.
mad
->
sa_hdr
.
comp_mask
=
comp_mask
;
query
->
sa_query
.
callback
=
callback
?
ib_sa_service_rec_callback
:
NULL
;
query
->
sa_query
.
release
=
ib_sa_service_rec_release
;
query
->
sa_query
.
port
=
port
;
mad
->
mad_hdr
.
method
=
method
;
mad
->
mad_hdr
.
attr_id
=
cpu_to_be16
(
IB_SA_ATTR_SERVICE_REC
);
mad
->
sa_hdr
.
comp_mask
=
comp_mask
;
ib_pack
(
service_rec_table
,
ARRAY_SIZE
(
service_rec_table
),
rec
,
query
->
sa_query
.
mad
->
data
);
rec
,
mad
->
data
);
*
sa_query
=
&
query
->
sa_query
;
ret
=
send_mad
(
&
query
->
sa_query
,
timeout_ms
);
if
(
ret
<
0
)
{
*
sa_query
=
NULL
;
kfree
(
query
->
sa_query
.
mad
);
kfree
(
query
);
}
if
(
ret
<
0
)
goto
err2
;
return
ret
;
err2:
*
sa_query
=
NULL
;
ib_free_send_mad
(
query
->
sa_query
.
mad_buf
);
err1:
kfree
(
query
);
return
ret
;
}
EXPORT_SYMBOL
(
ib_sa_service_rec_query
);
...
...
@@ -751,7 +755,6 @@ static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
static
void
ib_sa_mcmember_rec_release
(
struct
ib_sa_query
*
sa_query
)
{
kfree
(
sa_query
->
mad
);
kfree
(
container_of
(
sa_query
,
struct
ib_sa_mcmember_query
,
sa_query
));
}
...
...
@@ -768,60 +771,69 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
{
struct
ib_sa_mcmember_query
*
query
;
struct
ib_sa_device
*
sa_dev
=
ib_get_client_data
(
device
,
&
sa_client
);
struct
ib_sa_port
*
port
=
&
sa_dev
->
port
[
port_num
-
sa_dev
->
start_port
];
struct
ib_mad_agent
*
agent
=
port
->
agent
;
struct
ib_sa_port
*
port
;
struct
ib_mad_agent
*
agent
;
struct
ib_sa_mad
*
mad
;
int
ret
;
if
(
!
sa_dev
)
return
-
ENODEV
;
port
=
&
sa_dev
->
port
[
port_num
-
sa_dev
->
start_port
];
agent
=
port
->
agent
;
query
=
kmalloc
(
sizeof
*
query
,
gfp_mask
);
if
(
!
query
)
return
-
ENOMEM
;
query
->
sa_query
.
mad
=
kmalloc
(
sizeof
*
query
->
sa_query
.
mad
,
gfp_mask
);
if
(
!
query
->
sa_query
.
mad
)
{
kfree
(
query
);
return
-
ENOMEM
;
query
->
sa_query
.
mad_buf
=
ib_create_send_mad
(
agent
,
1
,
0
,
0
,
IB_MGMT_SA_HDR
,
IB_MGMT_SA_DATA
,
gfp_mask
);
if
(
!
query
->
sa_query
.
mad_buf
)
{
ret
=
-
ENOMEM
;
goto
err1
;
}
query
->
callback
=
callback
;
query
->
context
=
context
;
init_mad
(
query
->
sa_query
.
mad
,
agent
);
mad
=
query
->
sa_query
.
mad_buf
->
mad
;
init_mad
(
mad
,
agent
);
query
->
sa_query
.
callback
=
callback
?
ib_sa_mcmember_rec_callback
:
NULL
;
query
->
sa_query
.
release
=
ib_sa_mcmember_rec_release
;
query
->
sa_query
.
port
=
port
;
query
->
sa_query
.
mad
->
mad_hdr
.
method
=
method
;
query
->
sa_query
.
mad
->
mad_hdr
.
attr_id
=
cpu_to_be16
(
IB_SA_ATTR_MC_MEMBER_REC
);
query
->
sa_query
.
mad
->
sa_hdr
.
comp_mask
=
comp_mask
;
query
->
sa_query
.
callback
=
callback
?
ib_sa_mcmember_rec_callback
:
NULL
;
query
->
sa_query
.
release
=
ib_sa_mcmember_rec_release
;
query
->
sa_query
.
port
=
port
;
mad
->
mad_hdr
.
method
=
method
;
mad
->
mad_hdr
.
attr_id
=
cpu_to_be16
(
IB_SA_ATTR_MC_MEMBER_REC
);
mad
->
sa_hdr
.
comp_mask
=
comp_mask
;
ib_pack
(
mcmember_rec_table
,
ARRAY_SIZE
(
mcmember_rec_table
),
rec
,
query
->
sa_query
.
mad
->
data
);
rec
,
mad
->
data
);
*
sa_query
=
&
query
->
sa_query
;
ret
=
send_mad
(
&
query
->
sa_query
,
timeout_ms
);
if
(
ret
<
0
)
{
*
sa_query
=
NULL
;
kfree
(
query
->
sa_query
.
mad
);
kfree
(
query
);
}
if
(
ret
<
0
)
goto
err2
;
return
ret
;
err2:
*
sa_query
=
NULL
;
ib_free_send_mad
(
query
->
sa_query
.
mad_buf
);
err1:
kfree
(
query
);
return
ret
;
}
EXPORT_SYMBOL
(
ib_sa_mcmember_rec_query
);
static
void
send_handler
(
struct
ib_mad_agent
*
agent
,
struct
ib_mad_send_wc
*
mad_send_wc
)
{
struct
ib_sa_query
*
query
;
struct
ib_sa_query
*
query
=
mad_send_wc
->
send_buf
->
context
[
0
]
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
idr_lock
,
flags
);
query
=
idr_find
(
&
query_idr
,
mad_send_wc
->
wr_id
);
spin_unlock_irqrestore
(
&
idr_lock
,
flags
);
if
(
!
query
)
return
;
if
(
query
->
callback
)
switch
(
mad_send_wc
->
status
)
{
case
IB_WC_SUCCESS
:
...
...
@@ -838,30 +850,25 @@ static void send_handler(struct ib_mad_agent *agent,
break
;
}
dma_unmap_single
(
agent
->
device
->
dma_device
,
pci_unmap_addr
(
query
,
mapping
),
sizeof
(
struct
ib_sa_mad
),
DMA_TO_DEVICE
);
kref_put
(
&
query
->
sm_ah
->
ref
,
free_sm_ah
);
query
->
release
(
query
);
spin_lock_irqsave
(
&
idr_lock
,
flags
);
idr_remove
(
&
query_idr
,
mad_send_wc
->
wr_
id
);
idr_remove
(
&
query_idr
,
query
->
id
);
spin_unlock_irqrestore
(
&
idr_lock
,
flags
);
ib_free_send_mad
(
mad_send_wc
->
send_buf
);
kref_put
(
&
query
->
sm_ah
->
ref
,
free_sm_ah
);
query
->
release
(
query
);
}
static
void
recv_handler
(
struct
ib_mad_agent
*
mad_agent
,
struct
ib_mad_recv_wc
*
mad_recv_wc
)
{
struct
ib_sa_query
*
query
;
unsigned
long
flags
;
struct
ib_mad_send_buf
*
mad_buf
;
spin_lock_irqsave
(
&
idr_lock
,
flags
);
query
=
idr_find
(
&
query_idr
,
mad_recv_wc
->
wc
->
wr_id
);
spin_unlock_irqrestore
(
&
idr_lock
,
flags
);
mad_buf
=
(
void
*
)
(
unsigned
long
)
mad_recv_wc
->
wc
->
wr_id
;
query
=
mad_buf
->
context
[
0
];
if
(
query
&&
query
->
callback
)
{
if
(
query
->
callback
)
{
if
(
mad_recv_wc
->
wc
->
status
==
IB_WC_SUCCESS
)
query
->
callback
(
query
,
mad_recv_wc
->
recv_buf
.
mad
->
mad_hdr
.
status
?
...
...
@@ -975,6 +982,7 @@ static int __init ib_sa_init(void)
static
void
__exit
ib_sa_cleanup
(
void
)
{
ib_unregister_client
(
&
sa_client
);
idr_destroy
(
&
query_idr
);
}
module_init
(
ib_sa_init
);
...
...
drivers/infiniband/core/smi.h
浏览文件 @
89fbb69c
...
...
@@ -39,6 +39,8 @@
#ifndef __SMI_H_
#define __SMI_H_
#include <rdma/ib_smi.h>
int
smi_handle_dr_smp_recv
(
struct
ib_smp
*
smp
,
u8
node_type
,
int
port_num
,
...
...
drivers/infiniband/core/sysfs.c
浏览文件 @
89fbb69c
...
...
@@ -65,6 +65,11 @@ struct port_table_attribute {
int
index
;
};
static
inline
int
ibdev_is_alive
(
const
struct
ib_device
*
dev
)
{
return
dev
->
reg_state
==
IB_DEV_REGISTERED
;
}
static
ssize_t
port_attr_show
(
struct
kobject
*
kobj
,
struct
attribute
*
attr
,
char
*
buf
)
{
...
...
@@ -74,6 +79,8 @@ static ssize_t port_attr_show(struct kobject *kobj,
if
(
!
port_attr
->
show
)
return
-
EIO
;
if
(
!
ibdev_is_alive
(
p
->
ibdev
))
return
-
ENODEV
;
return
port_attr
->
show
(
p
,
port_attr
,
buf
);
}
...
...
@@ -581,6 +588,9 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf)
{
struct
ib_device
*
dev
=
container_of
(
cdev
,
struct
ib_device
,
class_dev
);
if
(
!
ibdev_is_alive
(
dev
))
return
-
ENODEV
;
switch
(
dev
->
node_type
)
{
case
IB_NODE_CA
:
return
sprintf
(
buf
,
"%d: CA
\n
"
,
dev
->
node_type
);
case
IB_NODE_SWITCH
:
return
sprintf
(
buf
,
"%d: switch
\n
"
,
dev
->
node_type
);
...
...
@@ -595,6 +605,9 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
struct
ib_device_attr
attr
;
ssize_t
ret
;
if
(
!
ibdev_is_alive
(
dev
))
return
-
ENODEV
;
ret
=
ib_query_device
(
dev
,
&
attr
);
if
(
ret
)
return
ret
;
...
...
@@ -612,6 +625,9 @@ static ssize_t show_node_guid(struct class_device *cdev, char *buf)
struct
ib_device_attr
attr
;
ssize_t
ret
;
if
(
!
ibdev_is_alive
(
dev
))
return
-
ENODEV
;
ret
=
ib_query_device
(
dev
,
&
attr
);
if
(
ret
)
return
ret
;
...
...
drivers/infiniband/core/ucm.c
浏览文件 @
89fbb69c
...
...
@@ -41,37 +41,81 @@
#include <linux/file.h>
#include <linux/mount.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <asm/uaccess.h>
#include "ucm.h"
#include <rdma/ib_cm.h>
#include <rdma/ib_user_cm.h>
MODULE_AUTHOR
(
"Libor Michalek"
);
MODULE_DESCRIPTION
(
"InfiniBand userspace Connection Manager access"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
static
int
ucm_debug_level
;
struct
ib_ucm_device
{
int
devnum
;
struct
cdev
dev
;
struct
class_device
class_dev
;
struct
ib_device
*
ib_dev
;
};
struct
ib_ucm_file
{
struct
semaphore
mutex
;
struct
file
*
filp
;
struct
ib_ucm_device
*
device
;
struct
list_head
ctxs
;
struct
list_head
events
;
wait_queue_head_t
poll_wait
;
};
struct
ib_ucm_context
{
int
id
;
wait_queue_head_t
wait
;
atomic_t
ref
;
int
events_reported
;
struct
ib_ucm_file
*
file
;
struct
ib_cm_id
*
cm_id
;
__u64
uid
;
struct
list_head
events
;
/* list of pending events. */
struct
list_head
file_list
;
/* member in file ctx list */
};
struct
ib_ucm_event
{
struct
ib_ucm_context
*
ctx
;
struct
list_head
file_list
;
/* member in file event list */
struct
list_head
ctx_list
;
/* member in ctx event list */
module_param_named
(
debug_level
,
ucm_debug_level
,
int
,
0644
);
MODULE_PARM_DESC
(
debug_level
,
"Enable debug tracing if > 0"
);
struct
ib_cm_id
*
cm_id
;
struct
ib_ucm_event_resp
resp
;
void
*
data
;
void
*
info
;
int
data_len
;
int
info_len
;
};
enum
{
IB_UCM_MAJOR
=
231
,
IB_UCM_MINOR
=
255
IB_UCM_BASE_MINOR
=
224
,
IB_UCM_MAX_DEVICES
=
32
};
#define IB_UCM_
DEV MKDEV(IB_UCM_MAJOR, IB_UCM
_MINOR)
#define IB_UCM_
BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE
_MINOR)
#define PFX "UCM: "
static
void
ib_ucm_add_one
(
struct
ib_device
*
device
);
static
void
ib_ucm_remove_one
(
struct
ib_device
*
device
);
#define ucm_dbg(format, arg...) \
do { \
if (ucm_debug_level > 0) \
printk(KERN_DEBUG PFX format, ## arg); \
} while (0)
static
struct
ib_client
ucm_client
=
{
.
name
=
"ucm"
,
.
add
=
ib_ucm_add_one
,
.
remove
=
ib_ucm_remove_one
};
static
struct
semaphore
ctx_id_mutex
;
static
struct
idr
ctx_id_table
;
static
DECLARE_MUTEX
(
ctx_id_mutex
);
static
DEFINE_IDR
(
ctx_id_table
);
static
DECLARE_BITMAP
(
dev_map
,
IB_UCM_MAX_DEVICES
);
static
struct
ib_ucm_context
*
ib_ucm_ctx_get
(
struct
ib_ucm_file
*
file
,
int
id
)
{
...
...
@@ -152,17 +196,13 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
goto
error
;
list_add_tail
(
&
ctx
->
file_list
,
&
file
->
ctxs
);
ucm_dbg
(
"Allocated CM ID <%d>
\n
"
,
ctx
->
id
);
return
ctx
;
error:
kfree
(
ctx
);
return
NULL
;
}
/*
* Event portion of the API, handle CM events
* and allow event polling.
*/
static
void
ib_ucm_event_path_get
(
struct
ib_ucm_path_rec
*
upath
,
struct
ib_sa_path_rec
*
kpath
)
{
...
...
@@ -209,6 +249,7 @@ static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
ureq
->
retry_count
=
kreq
->
retry_count
;
ureq
->
rnr_retry_count
=
kreq
->
rnr_retry_count
;
ureq
->
srq
=
kreq
->
srq
;
ureq
->
port
=
kreq
->
port
;
ib_ucm_event_path_get
(
&
ureq
->
primary_path
,
kreq
->
primary_path
);
ib_ucm_event_path_get
(
&
ureq
->
alternate_path
,
kreq
->
alternate_path
);
...
...
@@ -295,6 +336,8 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
case
IB_CM_SIDR_REQ_RECEIVED
:
uvt
->
resp
.
u
.
sidr_req_resp
.
pkey
=
evt
->
param
.
sidr_req_rcvd
.
pkey
;
uvt
->
resp
.
u
.
sidr_req_resp
.
port
=
evt
->
param
.
sidr_req_rcvd
.
port
;
uvt
->
data_len
=
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
;
break
;
case
IB_CM_SIDR_REP_RECEIVED
:
...
...
@@ -387,9 +430,7 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
if
(
copy_from_user
(
&
cmd
,
inbuf
,
sizeof
(
cmd
)))
return
-
EFAULT
;
/*
* wait
*/
down
(
&
file
->
mutex
);
while
(
list_empty
(
&
file
->
events
))
{
...
...
@@ -471,7 +512,6 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
return
result
;
}
static
ssize_t
ib_ucm_create_id
(
struct
ib_ucm_file
*
file
,
const
char
__user
*
inbuf
,
int
in_len
,
int
out_len
)
...
...
@@ -494,29 +534,27 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
return
-
ENOMEM
;
ctx
->
uid
=
cmd
.
uid
;
ctx
->
cm_id
=
ib_create_cm_id
(
ib_ucm_event_handler
,
ctx
);
ctx
->
cm_id
=
ib_create_cm_id
(
file
->
device
->
ib_dev
,
ib_ucm_event_handler
,
ctx
);
if
(
IS_ERR
(
ctx
->
cm_id
))
{
result
=
PTR_ERR
(
ctx
->
cm_id
);
goto
err
;
goto
err
1
;
}
resp
.
id
=
ctx
->
id
;
if
(
copy_to_user
((
void
__user
*
)(
unsigned
long
)
cmd
.
response
,
&
resp
,
sizeof
(
resp
)))
{
result
=
-
EFAULT
;
goto
err
;
goto
err
2
;
}
return
0
;
err:
err2:
ib_destroy_cm_id
(
ctx
->
cm_id
);
err1:
down
(
&
ctx_id_mutex
);
idr_remove
(
&
ctx_id_table
,
ctx
->
id
);
up
(
&
ctx_id_mutex
);
if
(
!
IS_ERR
(
ctx
->
cm_id
))
ib_destroy_cm_id
(
ctx
->
cm_id
);
kfree
(
ctx
);
return
result
;
}
...
...
@@ -1184,9 +1222,6 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
if
(
copy_from_user
(
&
hdr
,
buf
,
sizeof
(
hdr
)))
return
-
EFAULT
;
ucm_dbg
(
"Write. cmd <%d> in <%d> out <%d> len <%Zu>
\n
"
,
hdr
.
cmd
,
hdr
.
in
,
hdr
.
out
,
len
);
if
(
hdr
.
cmd
<
0
||
hdr
.
cmd
>=
ARRAY_SIZE
(
ucm_cmd_table
))
return
-
EINVAL
;
...
...
@@ -1231,8 +1266,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
filp
->
private_data
=
file
;
file
->
filp
=
filp
;
ucm_dbg
(
"Created struct
\n
"
);
file
->
device
=
container_of
(
inode
->
i_cdev
,
struct
ib_ucm_device
,
dev
);
return
0
;
}
...
...
@@ -1263,7 +1297,17 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
return
0
;
}
static
struct
file_operations
ib_ucm_fops
=
{
static
void
ib_ucm_release_class_dev
(
struct
class_device
*
class_dev
)
{
struct
ib_ucm_device
*
dev
;
dev
=
container_of
(
class_dev
,
struct
ib_ucm_device
,
class_dev
);
cdev_del
(
&
dev
->
dev
);
clear_bit
(
dev
->
devnum
,
dev_map
);
kfree
(
dev
);
}
static
struct
file_operations
ucm_fops
=
{
.
owner
=
THIS_MODULE
,
.
open
=
ib_ucm_open
,
.
release
=
ib_ucm_close
,
...
...
@@ -1271,55 +1315,142 @@ static struct file_operations ib_ucm_fops = {
.
poll
=
ib_ucm_poll
,
};
static
struct
class
ucm_class
=
{
.
name
=
"infiniband_cm"
,
.
release
=
ib_ucm_release_class_dev
};
static
struct
class
*
ib_ucm_class
;
static
struct
cdev
ib_ucm_cdev
;
static
ssize_t
show_dev
(
struct
class_device
*
class_dev
,
char
*
buf
)
{
struct
ib_ucm_device
*
dev
;
dev
=
container_of
(
class_dev
,
struct
ib_ucm_device
,
class_dev
);
return
print_dev_t
(
buf
,
dev
->
dev
.
dev
);
}
static
CLASS_DEVICE_ATTR
(
dev
,
S_IRUGO
,
show_dev
,
NULL
);
static
int
__init
ib_ucm_init
(
void
)
static
ssize_t
show_ibdev
(
struct
class_device
*
class_dev
,
char
*
buf
)
{
int
result
;
struct
ib_ucm_device
*
dev
;
dev
=
container_of
(
class_dev
,
struct
ib_ucm_device
,
class_dev
);
return
sprintf
(
buf
,
"%s
\n
"
,
dev
->
ib_dev
->
name
);
}
static
CLASS_DEVICE_ATTR
(
ibdev
,
S_IRUGO
,
show_ibdev
,
NULL
);
result
=
register_chrdev_region
(
IB_UCM_DEV
,
1
,
"infiniband_cm"
);
if
(
result
)
{
ucm_dbg
(
"Error <%d> registering dev
\n
"
,
result
);
goto
err_chr
;
}
static
void
ib_ucm_add_one
(
struct
ib_device
*
device
)
{
struct
ib_ucm_device
*
ucm_dev
;
if
(
!
device
->
alloc_ucontext
)
return
;
ucm_dev
=
kmalloc
(
sizeof
*
ucm_dev
,
GFP_KERNEL
);
if
(
!
ucm_dev
)
return
;
cdev_init
(
&
ib_ucm_cdev
,
&
ib_ucm_fops
);
memset
(
ucm_dev
,
0
,
sizeof
*
ucm_dev
);
ucm_dev
->
ib_dev
=
device
;
ucm_dev
->
devnum
=
find_first_zero_bit
(
dev_map
,
IB_UCM_MAX_DEVICES
);
if
(
ucm_dev
->
devnum
>=
IB_UCM_MAX_DEVICES
)
goto
err
;
set_bit
(
ucm_dev
->
devnum
,
dev_map
);
cdev_init
(
&
ucm_dev
->
dev
,
&
ucm_fops
);
ucm_dev
->
dev
.
owner
=
THIS_MODULE
;
kobject_set_name
(
&
ucm_dev
->
dev
.
kobj
,
"ucm%d"
,
ucm_dev
->
devnum
);
if
(
cdev_add
(
&
ucm_dev
->
dev
,
IB_UCM_BASE_DEV
+
ucm_dev
->
devnum
,
1
))
goto
err
;
result
=
cdev_add
(
&
ib_ucm_cdev
,
IB_UCM_DEV
,
1
);
if
(
result
)
{
ucm_dbg
(
"Error <%d> adding cdev
\n
"
,
result
);
ucm_dev
->
class_dev
.
class
=
&
ucm_class
;
ucm_dev
->
class_dev
.
dev
=
device
->
dma_device
;
snprintf
(
ucm_dev
->
class_dev
.
class_id
,
BUS_ID_SIZE
,
"ucm%d"
,
ucm_dev
->
devnum
);
if
(
class_device_register
(
&
ucm_dev
->
class_dev
))
goto
err_cdev
;
}
ib_ucm_class
=
class_create
(
THIS_MODULE
,
"infiniband_cm"
);
if
(
IS_ERR
(
ib_ucm_class
))
{
result
=
PTR_ERR
(
ib_ucm_class
);
ucm_dbg
(
"Error <%d> creating class
\n
"
,
result
);
if
(
class_device_create_file
(
&
ucm_dev
->
class_dev
,
&
class_device_attr_dev
))
goto
err_class
;
if
(
class_device_create_file
(
&
ucm_dev
->
class_dev
,
&
class_device_attr_ibdev
))
goto
err_class
;
ib_set_client_data
(
device
,
&
ucm_client
,
ucm_dev
);
return
;
err_class:
class_device_unregister
(
&
ucm_dev
->
class_dev
);
err_cdev:
cdev_del
(
&
ucm_dev
->
dev
);
clear_bit
(
ucm_dev
->
devnum
,
dev_map
);
err:
kfree
(
ucm_dev
);
return
;
}
static
void
ib_ucm_remove_one
(
struct
ib_device
*
device
)
{
struct
ib_ucm_device
*
ucm_dev
=
ib_get_client_data
(
device
,
&
ucm_client
);
if
(
!
ucm_dev
)
return
;
class_device_unregister
(
&
ucm_dev
->
class_dev
);
}
static
ssize_t
show_abi_version
(
struct
class
*
class
,
char
*
buf
)
{
return
sprintf
(
buf
,
"%d
\n
"
,
IB_USER_CM_ABI_VERSION
);
}
static
CLASS_ATTR
(
abi_version
,
S_IRUGO
,
show_abi_version
,
NULL
);
static
int
__init
ib_ucm_init
(
void
)
{
int
ret
;
ret
=
register_chrdev_region
(
IB_UCM_BASE_DEV
,
IB_UCM_MAX_DEVICES
,
"infiniband_cm"
);
if
(
ret
)
{
printk
(
KERN_ERR
"ucm: couldn't register device number
\n
"
);
goto
err
;
}
class_device_create
(
ib_ucm_class
,
NULL
,
IB_UCM_DEV
,
NULL
,
"ucm"
);
ret
=
class_register
(
&
ucm_class
);
if
(
ret
)
{
printk
(
KERN_ERR
"ucm: couldn't create class infiniband_cm
\n
"
);
goto
err_chrdev
;
}
idr_init
(
&
ctx_id_table
);
init_MUTEX
(
&
ctx_id_mutex
);
ret
=
class_create_file
(
&
ucm_class
,
&
class_attr_abi_version
);
if
(
ret
)
{
printk
(
KERN_ERR
"ucm: couldn't create abi_version attribute
\n
"
);
goto
err_class
;
}
ret
=
ib_register_client
(
&
ucm_client
);
if
(
ret
)
{
printk
(
KERN_ERR
"ucm: couldn't register client
\n
"
);
goto
err_class
;
}
return
0
;
err_class:
c
dev_del
(
&
ib_ucm_cdev
);
err_cdev:
unregister_chrdev_region
(
IB_UCM_
DEV
,
1
);
err
_chr
:
return
re
sul
t
;
c
lass_unregister
(
&
ucm_class
);
err_c
hr
dev:
unregister_chrdev_region
(
IB_UCM_
BASE_DEV
,
IB_UCM_MAX_DEVICES
);
err:
return
ret
;
}
static
void
__exit
ib_ucm_cleanup
(
void
)
{
class_device_destroy
(
ib_ucm_class
,
IB_UCM_DEV
);
class_
destroy
(
ib_
ucm_class
);
cdev_del
(
&
ib_ucm_cdev
);
unregister_chrdev_region
(
IB_UCM_DEV
,
1
);
ib_unregister_client
(
&
ucm_client
);
class_
unregister
(
&
ucm_class
);
unregister_chrdev_region
(
IB_UCM_BASE_DEV
,
IB_UCM_MAX_DEVICES
);
idr_destroy
(
&
ctx_id_table
);
}
module_init
(
ib_ucm_init
);
...
...
drivers/infiniband/core/user_mad.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/core/uverbs.h
浏览文件 @
89fbb69c
...
...
@@ -3,6 +3,7 @@
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
...
...
@@ -38,29 +39,47 @@
#ifndef UVERBS_H
#define UVERBS_H
/* Include device.h and fs.h until cdev.h is self-sufficient */
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/kref.h>
#include <linux/idr.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
/*
* Our lifetime rules for these structs are the following:
*
* struct ib_uverbs_device: One reference is held by the module and
* released in ib_uverbs_remove_one(). Another reference is taken by
* ib_uverbs_open() each time the character special file is opened,
* and released in ib_uverbs_release_file() when the file is released.
*
* struct ib_uverbs_file: One reference is held by the VFS and
* released when the file is closed. Another reference is taken when
* an asynchronous event queue file is created and released when the
* event file is closed.
*
* struct ib_uverbs_event_file: One reference is held by the VFS and
* released when the file is closed. For asynchronous event files,
* another reference is held by the corresponding main context file
* and released when that file is closed. For completion event files,
* a reference is taken when a CQ is created that uses the file, and
* released when the CQ is destroyed.
*/
struct
ib_uverbs_device
{
struct
kref
ref
;
int
devnum
;
struct
cdev
dev
;
struct
class_device
class_dev
;
struct
cdev
*
dev
;
struct
class_device
*
class_dev
;
struct
ib_device
*
ib_dev
;
int
num_comp
;
int
num_comp
_vectors
;
};
struct
ib_uverbs_event_file
{
struct
kref
ref
;
struct
file
*
file
;
struct
ib_uverbs_file
*
uverbs_file
;
spinlock_t
lock
;
int
fd
;
int
is_async
;
wait_queue_head_t
poll_wait
;
struct
fasync_struct
*
async_queue
;
...
...
@@ -73,8 +92,7 @@ struct ib_uverbs_file {
struct
ib_uverbs_device
*
device
;
struct
ib_ucontext
*
ucontext
;
struct
ib_event_handler
event_handler
;
struct
ib_uverbs_event_file
async_file
;
struct
ib_uverbs_event_file
comp_file
[
1
];
struct
ib_uverbs_event_file
*
async_file
;
};
struct
ib_uverbs_event
{
...
...
@@ -110,10 +128,23 @@ extern struct idr ib_uverbs_cq_idr;
extern
struct
idr
ib_uverbs_qp_idr
;
extern
struct
idr
ib_uverbs_srq_idr
;
struct
file
*
ib_uverbs_alloc_event_file
(
struct
ib_uverbs_file
*
uverbs_file
,
int
is_async
,
int
*
fd
);
void
ib_uverbs_release_event_file
(
struct
kref
*
ref
);
struct
ib_uverbs_event_file
*
ib_uverbs_lookup_comp_file
(
int
fd
);
void
ib_uverbs_release_ucq
(
struct
ib_uverbs_file
*
file
,
struct
ib_uverbs_event_file
*
ev_file
,
struct
ib_ucq_object
*
uobj
);
void
ib_uverbs_release_uevent
(
struct
ib_uverbs_file
*
file
,
struct
ib_uevent_object
*
uobj
);
void
ib_uverbs_comp_handler
(
struct
ib_cq
*
cq
,
void
*
cq_context
);
void
ib_uverbs_cq_event_handler
(
struct
ib_event
*
event
,
void
*
context_ptr
);
void
ib_uverbs_qp_event_handler
(
struct
ib_event
*
event
,
void
*
context_ptr
);
void
ib_uverbs_srq_event_handler
(
struct
ib_event
*
event
,
void
*
context_ptr
);
void
ib_uverbs_event_handler
(
struct
ib_event_handler
*
handler
,
struct
ib_event
*
event
);
int
ib_umem_get
(
struct
ib_device
*
dev
,
struct
ib_umem
*
mem
,
void
*
addr
,
size_t
size
,
int
write
);
...
...
@@ -125,21 +156,26 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem);
const char __user *buf, int in_len, \
int out_len)
IB_UVERBS_DECLARE_CMD
(
query_params
);
IB_UVERBS_DECLARE_CMD
(
get_context
);
IB_UVERBS_DECLARE_CMD
(
query_device
);
IB_UVERBS_DECLARE_CMD
(
query_port
);
IB_UVERBS_DECLARE_CMD
(
query_gid
);
IB_UVERBS_DECLARE_CMD
(
query_pkey
);
IB_UVERBS_DECLARE_CMD
(
alloc_pd
);
IB_UVERBS_DECLARE_CMD
(
dealloc_pd
);
IB_UVERBS_DECLARE_CMD
(
reg_mr
);
IB_UVERBS_DECLARE_CMD
(
dereg_mr
);
IB_UVERBS_DECLARE_CMD
(
create_comp_channel
);
IB_UVERBS_DECLARE_CMD
(
create_cq
);
IB_UVERBS_DECLARE_CMD
(
poll_cq
);
IB_UVERBS_DECLARE_CMD
(
req_notify_cq
);
IB_UVERBS_DECLARE_CMD
(
destroy_cq
);
IB_UVERBS_DECLARE_CMD
(
create_qp
);
IB_UVERBS_DECLARE_CMD
(
modify_qp
);
IB_UVERBS_DECLARE_CMD
(
destroy_qp
);
IB_UVERBS_DECLARE_CMD
(
post_send
);
IB_UVERBS_DECLARE_CMD
(
post_recv
);
IB_UVERBS_DECLARE_CMD
(
post_srq_recv
);
IB_UVERBS_DECLARE_CMD
(
create_ah
);
IB_UVERBS_DECLARE_CMD
(
destroy_ah
);
IB_UVERBS_DECLARE_CMD
(
attach_mcast
);
IB_UVERBS_DECLARE_CMD
(
detach_mcast
);
IB_UVERBS_DECLARE_CMD
(
create_srq
);
...
...
drivers/infiniband/core/uverbs_cmd.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/core/uverbs_main.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/core/verbs.c
浏览文件 @
89fbb69c
...
...
@@ -523,16 +523,22 @@ EXPORT_SYMBOL(ib_dealloc_fmr);
int
ib_attach_mcast
(
struct
ib_qp
*
qp
,
union
ib_gid
*
gid
,
u16
lid
)
{
return
qp
->
device
->
attach_mcast
?
qp
->
device
->
attach_mcast
(
qp
,
gid
,
lid
)
:
-
ENOSYS
;
if
(
!
qp
->
device
->
attach_mcast
)
return
-
ENOSYS
;
if
(
gid
->
raw
[
0
]
!=
0xff
||
qp
->
qp_type
!=
IB_QPT_UD
)
return
-
EINVAL
;
return
qp
->
device
->
attach_mcast
(
qp
,
gid
,
lid
);
}
EXPORT_SYMBOL
(
ib_attach_mcast
);
int
ib_detach_mcast
(
struct
ib_qp
*
qp
,
union
ib_gid
*
gid
,
u16
lid
)
{
return
qp
->
device
->
detach_mcast
?
qp
->
device
->
detach_mcast
(
qp
,
gid
,
lid
)
:
-
ENOSYS
;
if
(
!
qp
->
device
->
detach_mcast
)
return
-
ENOSYS
;
if
(
gid
->
raw
[
0
]
!=
0xff
||
qp
->
qp_type
!=
IB_QPT_UD
)
return
-
EINVAL
;
return
qp
->
device
->
detach_mcast
(
qp
,
gid
,
lid
);
}
EXPORT_SYMBOL
(
ib_detach_mcast
);
drivers/infiniband/hw/mthca/Makefile
浏览文件 @
89fbb69c
...
...
@@ -7,4 +7,5 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o
ib_mthca-y
:=
mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o
\
mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o
\
mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o
\
mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o
mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o
\
mthca_catas.o
drivers/infiniband/
core/ucm.h
→
drivers/infiniband/
hw/mthca/mthca_catas.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/mthca/mthca_cmd.c
浏览文件 @
89fbb69c
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
...
...
@@ -706,9 +707,13 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
MTHCA_GET
(
lg
,
outbox
,
QUERY_FW_MAX_CMD_OFFSET
);
dev
->
cmd
.
max_cmds
=
1
<<
lg
;
MTHCA_GET
(
dev
->
catas_err
.
addr
,
outbox
,
QUERY_FW_ERR_START_OFFSET
);
MTHCA_GET
(
dev
->
catas_err
.
size
,
outbox
,
QUERY_FW_ERR_SIZE_OFFSET
);
mthca_dbg
(
dev
,
"FW version %012llx, max commands %d
\n
"
,
(
unsigned
long
long
)
dev
->
fw_ver
,
dev
->
cmd
.
max_cmds
);
mthca_dbg
(
dev
,
"Catastrophic error buffer at 0x%llx, size 0x%x
\n
"
,
(
unsigned
long
long
)
dev
->
catas_err
.
addr
,
dev
->
catas_err
.
size
);
if
(
mthca_is_memfree
(
dev
))
{
MTHCA_GET
(
dev
->
fw
.
arbel
.
fw_pages
,
outbox
,
QUERY_FW_SIZE_OFFSET
);
...
...
@@ -933,9 +938,9 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
goto
out
;
MTHCA_GET
(
field
,
outbox
,
QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET
);
dev_lim
->
max_srq_sz
=
1
<<
field
;
dev_lim
->
max_srq_sz
=
(
1
<<
field
)
-
1
;
MTHCA_GET
(
field
,
outbox
,
QUERY_DEV_LIM_MAX_QP_SZ_OFFSET
);
dev_lim
->
max_qp_sz
=
1
<<
field
;
dev_lim
->
max_qp_sz
=
(
1
<<
field
)
-
1
;
MTHCA_GET
(
field
,
outbox
,
QUERY_DEV_LIM_RSVD_QP_OFFSET
);
dev_lim
->
reserved_qps
=
1
<<
(
field
&
0xf
);
MTHCA_GET
(
field
,
outbox
,
QUERY_DEV_LIM_MAX_QP_OFFSET
);
...
...
@@ -1045,6 +1050,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
dev_lim
->
max_pds
,
dev_lim
->
reserved_pds
,
dev_lim
->
reserved_uars
);
mthca_dbg
(
dev
,
"Max QP/MCG: %d, reserved MGMs: %d
\n
"
,
dev_lim
->
max_pds
,
dev_lim
->
reserved_mgms
);
mthca_dbg
(
dev
,
"Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d
\n
"
,
dev_lim
->
max_cq_sz
,
dev_lim
->
max_qp_sz
,
dev_lim
->
max_srq_sz
);
mthca_dbg
(
dev
,
"Flags: %08x
\n
"
,
dev_lim
->
flags
);
...
...
drivers/infiniband/hw/mthca/mthca_dev.h
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/mthca/mthca_eq.c
浏览文件 @
89fbb69c
...
...
@@ -83,7 +83,8 @@ enum {
MTHCA_EVENT_TYPE_PATH_MIG
=
0x01
,
MTHCA_EVENT_TYPE_COMM_EST
=
0x02
,
MTHCA_EVENT_TYPE_SQ_DRAINED
=
0x03
,
MTHCA_EVENT_TYPE_SRQ_LAST_WQE
=
0x13
,
MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE
=
0x13
,
MTHCA_EVENT_TYPE_SRQ_LIMIT
=
0x14
,
MTHCA_EVENT_TYPE_CQ_ERROR
=
0x04
,
MTHCA_EVENT_TYPE_WQ_CATAS_ERROR
=
0x05
,
MTHCA_EVENT_TYPE_EEC_CATAS_ERROR
=
0x06
,
...
...
@@ -110,8 +111,9 @@ enum {
(1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
(1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
#define MTHCA_SRQ_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_LAST_WQE)
#define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
#define MTHCA_EQ_DB_INC_CI (1 << 24)
...
...
@@ -141,6 +143,9 @@ struct mthca_eqe {
struct
{
__be32
qpn
;
}
__attribute__
((
packed
))
qp
;
struct
{
__be32
srqn
;
}
__attribute__
((
packed
))
srq
;
struct
{
__be32
cqn
;
u32
reserved1
;
...
...
@@ -305,6 +310,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
IB_EVENT_SQ_DRAINED
);
break
;
case
MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE
:
mthca_qp_event
(
dev
,
be32_to_cpu
(
eqe
->
event
.
qp
.
qpn
)
&
0xffffff
,
IB_EVENT_QP_LAST_WQE_REACHED
);
break
;
case
MTHCA_EVENT_TYPE_SRQ_LIMIT
:
mthca_srq_event
(
dev
,
be32_to_cpu
(
eqe
->
event
.
srq
.
srqn
)
&
0xffffff
,
IB_EVENT_SRQ_LIMIT_REACHED
);
break
;
case
MTHCA_EVENT_TYPE_WQ_CATAS_ERROR
:
mthca_qp_event
(
dev
,
be32_to_cpu
(
eqe
->
event
.
qp
.
qpn
)
&
0xffffff
,
IB_EVENT_QP_FATAL
);
...
...
drivers/infiniband/hw/mthca/mthca_mad.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/mthca/mthca_main.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/mthca/mthca_mcg.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/mthca/mthca_memfree.c
浏览文件 @
89fbb69c
...
...
@@ -487,7 +487,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
}
}
int
mthca_alloc_db
(
struct
mthca_dev
*
dev
,
int
type
,
u32
qn
,
__be32
**
db
)
int
mthca_alloc_db
(
struct
mthca_dev
*
dev
,
enum
mthca_db_type
type
,
u32
qn
,
__be32
**
db
)
{
int
group
;
int
start
,
end
,
dir
;
...
...
drivers/infiniband/hw/mthca/mthca_memfree.h
浏览文件 @
89fbb69c
...
...
@@ -173,7 +173,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
int
mthca_init_db_tab
(
struct
mthca_dev
*
dev
);
void
mthca_cleanup_db_tab
(
struct
mthca_dev
*
dev
);
int
mthca_alloc_db
(
struct
mthca_dev
*
dev
,
int
type
,
u32
qn
,
__be32
**
db
);
int
mthca_alloc_db
(
struct
mthca_dev
*
dev
,
enum
mthca_db_type
type
,
u32
qn
,
__be32
**
db
);
void
mthca_free_db
(
struct
mthca_dev
*
dev
,
int
type
,
int
db_index
);
#endif
/* MTHCA_MEMFREE_H */
drivers/infiniband/hw/mthca/mthca_provider.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/mthca/mthca_qp.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/mthca/mthca_srq.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/hw/mthca/mthca_user.h
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/ulp/ipoib/ipoib.h
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/ulp/ipoib/ipoib_ib.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/ulp/ipoib/ipoib_main.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
include/rdma/ib_cm.h
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
include/rdma/ib_mad.h
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
include/rdma/ib_user_cm.h
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
include/rdma/ib_user_verbs.h
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
include/rdma/ib_verbs.h
浏览文件 @
89fbb69c
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录