Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
44602075
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
44602075
编写于
7月 22, 2011
作者:
R
Roland Dreier
浏览文件
操作
浏览文件
下载
差异文件
Merge branches 'cma', 'cxgb4', 'ipath', 'misc', 'mlx4', 'mthca', 'qib' and 'srp' into for-next
上级
2efdd6a0
3cbe182a
57631811
10e1b54b
e1892fa8
9b89925c
e67306a3
fd1b6c4a
变更
52
展开全部
隐藏空白更改
内联
并排
Showing
52 changed file
with
962 addition
and
1122 deletion
+962
-1122
MAINTAINERS
MAINTAINERS
+8
-3
drivers/infiniband/core/cache.c
drivers/infiniband/core/cache.c
+2
-1
drivers/infiniband/core/device.c
drivers/infiniband/core/device.c
+6
-0
drivers/infiniband/hw/amso1100/c2_provider.c
drivers/infiniband/hw/amso1100/c2_provider.c
+0
-9
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
+0
-8
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/provider.c
+0
-8
drivers/infiniband/hw/cxgb4/resource.c
drivers/infiniband/hw/cxgb4/resource.c
+5
-4
drivers/infiniband/hw/ipath/ipath_file_ops.c
drivers/infiniband/hw/ipath/ipath_file_ops.c
+7
-4
drivers/infiniband/hw/ipath/ipath_mad.c
drivers/infiniband/hw/ipath/ipath_mad.c
+33
-165
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/mad.c
+67
-1
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/main.c
+18
-3
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mlx4_ib.h
+1
-0
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/qp.c
+9
-1
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_cmd.c
+152
-124
drivers/infiniband/hw/mthca/mthca_cmd.h
drivers/infiniband/hw/mthca/mthca_cmd.h
+44
-49
drivers/infiniband/hw/mthca/mthca_cq.c
drivers/infiniband/hw/mthca/mthca_cq.c
+2
-13
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/mthca/mthca_eq.c
+10
-33
drivers/infiniband/hw/mthca/mthca_mad.c
drivers/infiniband/hw/mthca/mthca_mad.c
+4
-11
drivers/infiniband/hw/mthca/mthca_main.c
drivers/infiniband/hw/mthca/mthca_main.c
+48
-127
drivers/infiniband/hw/mthca/mthca_mcg.c
drivers/infiniband/hw/mthca/mthca_mcg.c
+32
-69
drivers/infiniband/hw/mthca/mthca_memfree.c
drivers/infiniband/hw/mthca/mthca_memfree.c
+14
-29
drivers/infiniband/hw/mthca/mthca_mr.c
drivers/infiniband/hw/mthca/mthca_mr.c
+4
-31
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_provider.c
+11
-66
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/hw/mthca/mthca_qp.c
+15
-34
drivers/infiniband/hw/mthca/mthca_reset.c
drivers/infiniband/hw/mthca/mthca_reset.c
+1
-1
drivers/infiniband/hw/mthca/mthca_srq.c
drivers/infiniband/hw/mthca/mthca_srq.c
+7
-26
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/nes/nes_verbs.c
+0
-11
drivers/infiniband/hw/qib/qib.h
drivers/infiniband/hw/qib/qib.h
+3
-0
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/qib/qib_file_ops.c
+9
-7
drivers/infiniband/hw/qib/qib_iba7220.c
drivers/infiniband/hw/qib/qib_iba7220.c
+23
-3
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_iba7322.c
+50
-22
drivers/infiniband/hw/qib/qib_mad.c
drivers/infiniband/hw/qib/qib_mad.c
+39
-39
drivers/infiniband/hw/qib/qib_mad.h
drivers/infiniband/hw/qib/qib_mad.h
+3
-140
drivers/infiniband/hw/qib/qib_pcie.c
drivers/infiniband/hw/qib/qib_pcie.c
+4
-4
drivers/infiniband/hw/qib/qib_sysfs.c
drivers/infiniband/hw/qib/qib_sysfs.c
+14
-0
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srp/ib_srp.c
+2
-0
drivers/net/mlx4/en_ethtool.c
drivers/net/mlx4/en_ethtool.c
+5
-4
drivers/net/mlx4/en_main.c
drivers/net/mlx4/en_main.c
+2
-1
drivers/net/mlx4/en_netdev.c
drivers/net/mlx4/en_netdev.c
+3
-2
drivers/net/mlx4/en_port.c
drivers/net/mlx4/en_port.c
+4
-2
drivers/net/mlx4/en_selftest.c
drivers/net/mlx4/en_selftest.c
+2
-1
drivers/net/mlx4/fw.c
drivers/net/mlx4/fw.c
+24
-15
drivers/net/mlx4/fw.h
drivers/net/mlx4/fw.h
+2
-6
drivers/net/mlx4/main.c
drivers/net/mlx4/main.c
+53
-5
drivers/net/mlx4/mcg.c
drivers/net/mlx4/mcg.c
+10
-7
drivers/net/mlx4/mlx4.h
drivers/net/mlx4/mlx4.h
+3
-2
drivers/net/mlx4/port.c
drivers/net/mlx4/port.c
+4
-4
include/linux/mlx4/cmd.h
include/linux/mlx4/cmd.h
+3
-0
include/linux/mlx4/device.h
include/linux/mlx4/device.h
+38
-22
include/linux/mlx4/qp.h
include/linux/mlx4/qp.h
+4
-4
include/rdma/ib_pma.h
include/rdma/ib_pma.h
+156
-0
include/rdma/ib_verbs.h
include/rdma/ib_verbs.h
+2
-1
未找到文件。
MAINTAINERS
浏览文件 @
44602075
...
...
@@ -3425,10 +3425,9 @@ S: Maintained
F: drivers/net/ipg.*
IPATH DRIVER
M:
Ralph Campbell
<infinipath@qlogic.com>
M:
Mike Marciniszyn
<infinipath@qlogic.com>
L: linux-rdma@vger.kernel.org
T: git git://git.qlogic.com/ipath-linux-2.6
S: Supported
S: Maintained
F: drivers/infiniband/hw/ipath/
IPMI SUBSYSTEM
...
...
@@ -5152,6 +5151,12 @@ M: Robert Jarzmik <robert.jarzmik@free.fr>
L: rtc-linux@googlegroups.com
S: Maintained
QIB DRIVER
M: Mike Marciniszyn <infinipath@qlogic.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/qib/
QLOGIC QLA1280 SCSI DRIVER
M: Michael Reed <mdr@sgi.com>
L: linux-scsi@vger.kernel.org
...
...
drivers/infiniband/core/cache.c
浏览文件 @
44602075
...
...
@@ -302,7 +302,8 @@ static void ib_cache_event(struct ib_event_handler *handler,
event
->
event
==
IB_EVENT_LID_CHANGE
||
event
->
event
==
IB_EVENT_PKEY_CHANGE
||
event
->
event
==
IB_EVENT_SM_CHANGE
||
event
->
event
==
IB_EVENT_CLIENT_REREGISTER
)
{
event
->
event
==
IB_EVENT_CLIENT_REREGISTER
||
event
->
event
==
IB_EVENT_GID_CHANGE
)
{
work
=
kmalloc
(
sizeof
*
work
,
GFP_ATOMIC
);
if
(
work
)
{
INIT_WORK
(
&
work
->
work
,
ib_cache_task
);
...
...
drivers/infiniband/core/device.c
浏览文件 @
44602075
...
...
@@ -627,6 +627,9 @@ int ib_modify_device(struct ib_device *device,
int
device_modify_mask
,
struct
ib_device_modify
*
device_modify
)
{
if
(
!
device
->
modify_device
)
return
-
ENOSYS
;
return
device
->
modify_device
(
device
,
device_modify_mask
,
device_modify
);
}
...
...
@@ -647,6 +650,9 @@ int ib_modify_port(struct ib_device *device,
u8
port_num
,
int
port_modify_mask
,
struct
ib_port_modify
*
port_modify
)
{
if
(
!
device
->
modify_port
)
return
-
ENOSYS
;
if
(
port_num
<
start_port
(
device
)
||
port_num
>
end_port
(
device
))
return
-
EINVAL
;
...
...
drivers/infiniband/hw/amso1100/c2_provider.c
浏览文件 @
44602075
...
...
@@ -99,14 +99,6 @@ static int c2_query_port(struct ib_device *ibdev,
return
0
;
}
static
int
c2_modify_port
(
struct
ib_device
*
ibdev
,
u8
port
,
int
port_modify_mask
,
struct
ib_port_modify
*
props
)
{
pr_debug
(
"%s:%u
\n
"
,
__func__
,
__LINE__
);
return
0
;
}
static
int
c2_query_pkey
(
struct
ib_device
*
ibdev
,
u8
port
,
u16
index
,
u16
*
pkey
)
{
...
...
@@ -817,7 +809,6 @@ int c2_register_device(struct c2_dev *dev)
dev
->
ibdev
.
dma_device
=
&
dev
->
pcidev
->
dev
;
dev
->
ibdev
.
query_device
=
c2_query_device
;
dev
->
ibdev
.
query_port
=
c2_query_port
;
dev
->
ibdev
.
modify_port
=
c2_modify_port
;
dev
->
ibdev
.
query_pkey
=
c2_query_pkey
;
dev
->
ibdev
.
query_gid
=
c2_query_gid
;
dev
->
ibdev
.
alloc_ucontext
=
c2_alloc_ucontext
;
...
...
drivers/infiniband/hw/cxgb3/iwch_provider.c
浏览文件 @
44602075
...
...
@@ -61,13 +61,6 @@
#include "iwch_user.h"
#include "common.h"
static
int
iwch_modify_port
(
struct
ib_device
*
ibdev
,
u8
port
,
int
port_modify_mask
,
struct
ib_port_modify
*
props
)
{
return
-
ENOSYS
;
}
static
struct
ib_ah
*
iwch_ah_create
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
)
{
...
...
@@ -1392,7 +1385,6 @@ int iwch_register_device(struct iwch_dev *dev)
dev
->
ibdev
.
dma_device
=
&
(
dev
->
rdev
.
rnic_info
.
pdev
->
dev
);
dev
->
ibdev
.
query_device
=
iwch_query_device
;
dev
->
ibdev
.
query_port
=
iwch_query_port
;
dev
->
ibdev
.
modify_port
=
iwch_modify_port
;
dev
->
ibdev
.
query_pkey
=
iwch_query_pkey
;
dev
->
ibdev
.
query_gid
=
iwch_query_gid
;
dev
->
ibdev
.
alloc_ucontext
=
iwch_alloc_ucontext
;
...
...
drivers/infiniband/hw/cxgb4/provider.c
浏览文件 @
44602075
...
...
@@ -58,13 +58,6 @@ static int fastreg_support = 1;
module_param
(
fastreg_support
,
int
,
0644
);
MODULE_PARM_DESC
(
fastreg_support
,
"Advertise fastreg support (default=1)"
);
static
int
c4iw_modify_port
(
struct
ib_device
*
ibdev
,
u8
port
,
int
port_modify_mask
,
struct
ib_port_modify
*
props
)
{
return
-
ENOSYS
;
}
static
struct
ib_ah
*
c4iw_ah_create
(
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
ah_attr
)
{
...
...
@@ -456,7 +449,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev
->
ibdev
.
dma_device
=
&
(
dev
->
rdev
.
lldi
.
pdev
->
dev
);
dev
->
ibdev
.
query_device
=
c4iw_query_device
;
dev
->
ibdev
.
query_port
=
c4iw_query_port
;
dev
->
ibdev
.
modify_port
=
c4iw_modify_port
;
dev
->
ibdev
.
query_pkey
=
c4iw_query_pkey
;
dev
->
ibdev
.
query_gid
=
c4iw_query_gid
;
dev
->
ibdev
.
alloc_ucontext
=
c4iw_alloc_ucontext
;
...
...
drivers/infiniband/hw/cxgb4/resource.c
浏览文件 @
44602075
...
...
@@ -37,6 +37,7 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/genalloc.h>
#include <linux/ratelimit.h>
#include "iw_cxgb4.h"
#define RANDOM_SIZE 16
...
...
@@ -311,8 +312,8 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned
long
addr
=
gen_pool_alloc
(
rdev
->
pbl_pool
,
size
);
PDBG
(
"%s addr 0x%x size %d
\n
"
,
__func__
,
(
u32
)
addr
,
size
);
if
(
!
addr
&&
printk_ratelimit
()
)
printk
(
KERN_WARNING
MOD
"%s: Out of PBL memory
\n
"
,
if
(
!
addr
)
printk
_ratelimited
(
KERN_WARNING
MOD
"%s: Out of PBL memory
\n
"
,
pci_name
(
rdev
->
lldi
.
pdev
));
return
(
u32
)
addr
;
}
...
...
@@ -373,8 +374,8 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned
long
addr
=
gen_pool_alloc
(
rdev
->
rqt_pool
,
size
<<
6
);
PDBG
(
"%s addr 0x%x size %d
\n
"
,
__func__
,
(
u32
)
addr
,
size
<<
6
);
if
(
!
addr
&&
printk_ratelimit
()
)
printk
(
KERN_WARNING
MOD
"%s: Out of RQT memory
\n
"
,
if
(
!
addr
)
printk
_ratelimited
(
KERN_WARNING
MOD
"%s: Out of RQT memory
\n
"
,
pci_name
(
rdev
->
lldi
.
pdev
));
return
(
u32
)
addr
;
}
...
...
drivers/infiniband/hw/ipath/ipath_file_ops.c
浏览文件 @
44602075
...
...
@@ -40,6 +40,7 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/cpu.h>
#include <asm/pgtable.h>
#include "ipath_kernel.h"
...
...
@@ -1684,17 +1685,19 @@ static int find_best_unit(struct file *fp,
* information. There may be some issues with dual core numbering
* as well. This needs more work prior to release.
*/
if
(
!
cpumask_empty
(
&
current
->
cpus_allowed
)
&&
!
cpumask_full
(
&
current
->
cpus_allowed
))
{
if
(
!
cpumask_empty
(
tsk_cpus_allowed
(
current
)
)
&&
!
cpumask_full
(
tsk_cpus_allowed
(
current
)
))
{
int
ncpus
=
num_online_cpus
(),
curcpu
=
-
1
,
nset
=
0
;
for
(
i
=
0
;
i
<
ncpus
;
i
++
)
if
(
cpumask_test_cpu
(
i
,
&
current
->
cpus_allowed
))
{
get_online_cpus
();
for_each_online_cpu
(
i
)
if
(
cpumask_test_cpu
(
i
,
tsk_cpus_allowed
(
current
)))
{
ipath_cdbg
(
PROC
,
"%s[%u] affinity set for "
"cpu %d/%d
\n
"
,
current
->
comm
,
current
->
pid
,
i
,
ncpus
);
curcpu
=
i
;
nset
++
;
}
put_online_cpus
();
if
(
curcpu
!=
-
1
&&
nset
!=
ncpus
)
{
if
(
npresent
)
{
prefunit
=
curcpu
/
(
ncpus
/
npresent
);
...
...
drivers/infiniband/hw/ipath/ipath_mad.c
浏览文件 @
44602075
...
...
@@ -32,6 +32,7 @@
*/
#include <rdma/ib_smi.h>
#include <rdma/ib_pma.h>
#include "ipath_kernel.h"
#include "ipath_verbs.h"
...
...
@@ -789,151 +790,18 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
return
recv_subn_get_pkeytable
(
smp
,
ibdev
);
}
#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
struct
ib_perf
{
u8
base_version
;
u8
mgmt_class
;
u8
class_version
;
u8
method
;
__be16
status
;
__be16
unused
;
__be64
tid
;
__be16
attr_id
;
__be16
resv
;
__be32
attr_mod
;
u8
reserved
[
40
];
u8
data
[
192
];
}
__attribute__
((
packed
));
struct
ib_pma_classportinfo
{
u8
base_version
;
u8
class_version
;
__be16
cap_mask
;
u8
reserved
[
3
];
u8
resp_time_value
;
/* only lower 5 bits */
union
ib_gid
redirect_gid
;
__be32
redirect_tc_sl_fl
;
/* 8, 4, 20 bits respectively */
__be16
redirect_lid
;
__be16
redirect_pkey
;
__be32
redirect_qp
;
/* only lower 24 bits */
__be32
redirect_qkey
;
union
ib_gid
trap_gid
;
__be32
trap_tc_sl_fl
;
/* 8, 4, 20 bits respectively */
__be16
trap_lid
;
__be16
trap_pkey
;
__be32
trap_hl_qp
;
/* 8, 24 bits respectively */
__be32
trap_qkey
;
}
__attribute__
((
packed
));
struct
ib_pma_portsamplescontrol
{
u8
opcode
;
u8
port_select
;
u8
tick
;
u8
counter_width
;
/* only lower 3 bits */
__be32
counter_mask0_9
;
/* 2, 10 * 3, bits */
__be16
counter_mask10_14
;
/* 1, 5 * 3, bits */
u8
sample_mechanisms
;
u8
sample_status
;
/* only lower 2 bits */
__be64
option_mask
;
__be64
vendor_mask
;
__be32
sample_start
;
__be32
sample_interval
;
__be16
tag
;
__be16
counter_select
[
15
];
}
__attribute__
((
packed
));
struct
ib_pma_portsamplesresult
{
__be16
tag
;
__be16
sample_status
;
/* only lower 2 bits */
__be32
counter
[
15
];
}
__attribute__
((
packed
));
struct
ib_pma_portsamplesresult_ext
{
__be16
tag
;
__be16
sample_status
;
/* only lower 2 bits */
__be32
extended_width
;
/* only upper 2 bits */
__be64
counter
[
15
];
}
__attribute__
((
packed
));
struct
ib_pma_portcounters
{
u8
reserved
;
u8
port_select
;
__be16
counter_select
;
__be16
symbol_error_counter
;
u8
link_error_recovery_counter
;
u8
link_downed_counter
;
__be16
port_rcv_errors
;
__be16
port_rcv_remphys_errors
;
__be16
port_rcv_switch_relay_errors
;
__be16
port_xmit_discards
;
u8
port_xmit_constraint_errors
;
u8
port_rcv_constraint_errors
;
u8
reserved1
;
u8
lli_ebor_errors
;
/* 4, 4, bits */
__be16
reserved2
;
__be16
vl15_dropped
;
__be32
port_xmit_data
;
__be32
port_rcv_data
;
__be32
port_xmit_packets
;
__be32
port_rcv_packets
;
}
__attribute__
((
packed
));
#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
struct
ib_pma_portcounters_ext
{
u8
reserved
;
u8
port_select
;
__be16
counter_select
;
__be32
reserved1
;
__be64
port_xmit_data
;
__be64
port_rcv_data
;
__be64
port_xmit_packets
;
__be64
port_rcv_packets
;
__be64
port_unicast_xmit_packets
;
__be64
port_unicast_rcv_packets
;
__be64
port_multicast_xmit_packets
;
__be64
port_multicast_rcv_packets
;
}
__attribute__
((
packed
));
#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
static
int
recv_pma_get_classportinfo
(
struct
ib_perf
*
pmp
)
static
int
recv_pma_get_classportinfo
(
struct
ib_pma_mad
*
pmp
)
{
struct
ib_
pma_classport
info
*
p
=
(
struct
ib_
pma_classport
info
*
)
pmp
->
data
;
struct
ib_
class_port_
info
*
p
=
(
struct
ib_
class_port_
info
*
)
pmp
->
data
;
memset
(
pmp
->
data
,
0
,
sizeof
(
pmp
->
data
));
if
(
pmp
->
attr_mod
!=
0
)
pmp
->
status
|=
IB_SMP_INVALID_FIELD
;
if
(
pmp
->
mad_hdr
.
attr_mod
!=
0
)
pmp
->
mad_hdr
.
status
|=
IB_SMP_INVALID_FIELD
;
/* Indicate AllPortSelect is valid (only one port anyway) */
p
->
cap_mask
=
cpu_to_be16
(
1
<<
8
);
p
->
cap
ability
_mask
=
cpu_to_be16
(
1
<<
8
);
p
->
base_version
=
1
;
p
->
class_version
=
1
;
/*
...
...
@@ -957,7 +825,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
static
int
recv_pma_get_portsamplescontrol
(
struct
ib_p
erf
*
pmp
,
static
int
recv_pma_get_portsamplescontrol
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portsamplescontrol
*
p
=
...
...
@@ -970,9 +838,9 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
memset
(
pmp
->
data
,
0
,
sizeof
(
pmp
->
data
));
p
->
port_select
=
port_select
;
if
(
pmp
->
attr_mod
!=
0
||
if
(
pmp
->
mad_hdr
.
attr_mod
!=
0
||
(
port_select
!=
port
&&
port_select
!=
0xFF
))
pmp
->
status
|=
IB_SMP_INVALID_FIELD
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_INVALID_FIELD
;
/*
* Ticks are 10x the link transfer period which for 2.5Gbs is 4
* nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
...
...
@@ -1006,7 +874,7 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
recv_pma_set_portsamplescontrol
(
struct
ib_p
erf
*
pmp
,
static
int
recv_pma_set_portsamplescontrol
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portsamplescontrol
*
p
=
...
...
@@ -1017,9 +885,9 @@ static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
u8
status
;
int
ret
;
if
(
pmp
->
attr_mod
!=
0
||
if
(
pmp
->
mad_hdr
.
attr_mod
!=
0
||
(
p
->
port_select
!=
port
&&
p
->
port_select
!=
0xFF
))
{
pmp
->
status
|=
IB_SMP_INVALID_FIELD
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_INVALID_FIELD
;
ret
=
reply
((
struct
ib_smp
*
)
pmp
);
goto
bail
;
}
...
...
@@ -1093,7 +961,7 @@ static u64 get_counter(struct ipath_ibdev *dev,
return
ret
;
}
static
int
recv_pma_get_portsamplesresult
(
struct
ib_p
erf
*
pmp
,
static
int
recv_pma_get_portsamplesresult
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
)
{
struct
ib_pma_portsamplesresult
*
p
=
...
...
@@ -1118,7 +986,7 @@ static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
recv_pma_get_portsamplesresult_ext
(
struct
ib_p
erf
*
pmp
,
static
int
recv_pma_get_portsamplesresult_ext
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
)
{
struct
ib_pma_portsamplesresult_ext
*
p
=
...
...
@@ -1145,7 +1013,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
recv_pma_get_portcounters
(
struct
ib_p
erf
*
pmp
,
static
int
recv_pma_get_portcounters
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portcounters
*
p
=
(
struct
ib_pma_portcounters
*
)
...
...
@@ -1179,9 +1047,9 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
memset
(
pmp
->
data
,
0
,
sizeof
(
pmp
->
data
));
p
->
port_select
=
port_select
;
if
(
pmp
->
attr_mod
!=
0
||
if
(
pmp
->
mad_hdr
.
attr_mod
!=
0
||
(
port_select
!=
port
&&
port_select
!=
0xFF
))
pmp
->
status
|=
IB_SMP_INVALID_FIELD
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_INVALID_FIELD
;
if
(
cntrs
.
symbol_error_counter
>
0xFFFFUL
)
p
->
symbol_error_counter
=
cpu_to_be16
(
0xFFFF
);
...
...
@@ -1216,7 +1084,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
cntrs
.
local_link_integrity_errors
=
0xFUL
;
if
(
cntrs
.
excessive_buffer_overrun_errors
>
0xFUL
)
cntrs
.
excessive_buffer_overrun_errors
=
0xFUL
;
p
->
l
li_ebor
_errors
=
(
cntrs
.
local_link_integrity_errors
<<
4
)
|
p
->
l
ink_overrun
_errors
=
(
cntrs
.
local_link_integrity_errors
<<
4
)
|
cntrs
.
excessive_buffer_overrun_errors
;
if
(
cntrs
.
vl15_dropped
>
0xFFFFUL
)
p
->
vl15_dropped
=
cpu_to_be16
(
0xFFFF
);
...
...
@@ -1244,7 +1112,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
recv_pma_get_portcounters_ext
(
struct
ib_p
erf
*
pmp
,
static
int
recv_pma_get_portcounters_ext
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portcounters_ext
*
p
=
...
...
@@ -1265,9 +1133,9 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
memset
(
pmp
->
data
,
0
,
sizeof
(
pmp
->
data
));
p
->
port_select
=
port_select
;
if
(
pmp
->
attr_mod
!=
0
||
if
(
pmp
->
mad_hdr
.
attr_mod
!=
0
||
(
port_select
!=
port
&&
port_select
!=
0xFF
))
pmp
->
status
|=
IB_SMP_INVALID_FIELD
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_INVALID_FIELD
;
p
->
port_xmit_data
=
cpu_to_be64
(
swords
);
p
->
port_rcv_data
=
cpu_to_be64
(
rwords
);
...
...
@@ -1281,7 +1149,7 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
recv_pma_set_portcounters
(
struct
ib_p
erf
*
pmp
,
static
int
recv_pma_set_portcounters
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portcounters
*
p
=
(
struct
ib_pma_portcounters
*
)
...
...
@@ -1344,7 +1212,7 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp,
return
recv_pma_get_portcounters
(
pmp
,
ibdev
,
port
);
}
static
int
recv_pma_set_portcounters_ext
(
struct
ib_p
erf
*
pmp
,
static
int
recv_pma_set_portcounters_ext
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portcounters
*
p
=
(
struct
ib_pma_portcounters
*
)
...
...
@@ -1518,19 +1386,19 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
struct
ib_mad
*
in_mad
,
struct
ib_mad
*
out_mad
)
{
struct
ib_p
erf
*
pmp
=
(
struct
ib_perf
*
)
out_mad
;
struct
ib_p
ma_mad
*
pmp
=
(
struct
ib_pma_mad
*
)
out_mad
;
int
ret
;
*
out_mad
=
*
in_mad
;
if
(
pmp
->
class_version
!=
1
)
{
pmp
->
status
|=
IB_SMP_UNSUP_VERSION
;
if
(
pmp
->
mad_hdr
.
class_version
!=
1
)
{
pmp
->
mad_hdr
.
status
|=
IB_SMP_UNSUP_VERSION
;
ret
=
reply
((
struct
ib_smp
*
)
pmp
);
goto
bail
;
}
switch
(
pmp
->
method
)
{
switch
(
pmp
->
m
ad_hdr
.
m
ethod
)
{
case
IB_MGMT_METHOD_GET
:
switch
(
pmp
->
attr_id
)
{
switch
(
pmp
->
mad_hdr
.
attr_id
)
{
case
IB_PMA_CLASS_PORT_INFO
:
ret
=
recv_pma_get_classportinfo
(
pmp
);
goto
bail
;
...
...
@@ -1554,13 +1422,13 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
port_num
);
goto
bail
;
default:
pmp
->
status
|=
IB_SMP_UNSUP_METH_ATTR
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_UNSUP_METH_ATTR
;
ret
=
reply
((
struct
ib_smp
*
)
pmp
);
goto
bail
;
}
case
IB_MGMT_METHOD_SET
:
switch
(
pmp
->
attr_id
)
{
switch
(
pmp
->
mad_hdr
.
attr_id
)
{
case
IB_PMA_PORT_SAMPLES_CONTROL
:
ret
=
recv_pma_set_portsamplescontrol
(
pmp
,
ibdev
,
port_num
);
...
...
@@ -1574,7 +1442,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
port_num
);
goto
bail
;
default:
pmp
->
status
|=
IB_SMP_UNSUP_METH_ATTR
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_UNSUP_METH_ATTR
;
ret
=
reply
((
struct
ib_smp
*
)
pmp
);
goto
bail
;
}
...
...
@@ -1588,7 +1456,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
ret
=
IB_MAD_RESULT_SUCCESS
;
goto
bail
;
default:
pmp
->
status
|=
IB_SMP_UNSUP_METHOD
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_UNSUP_METHOD
;
ret
=
reply
((
struct
ib_smp
*
)
pmp
);
}
...
...
drivers/infiniband/hw/mlx4/mad.c
浏览文件 @
44602075
...
...
@@ -35,6 +35,7 @@
#include <linux/mlx4/cmd.h>
#include <linux/gfp.h>
#include <rdma/ib_pma.h>
#include "mlx4_ib.h"
...
...
@@ -232,7 +233,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
}
}
int
mlx4_ib_process_mad
(
struct
ib_device
*
ibdev
,
int
mad_flags
,
u8
port_num
,
static
int
ib_process_mad
(
struct
ib_device
*
ibdev
,
int
mad_flags
,
u8
port_num
,
struct
ib_wc
*
in_wc
,
struct
ib_grh
*
in_grh
,
struct
ib_mad
*
in_mad
,
struct
ib_mad
*
out_mad
)
{
...
...
@@ -302,6 +303,71 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return
IB_MAD_RESULT_SUCCESS
|
IB_MAD_RESULT_REPLY
;
}
static
void
edit_counter
(
struct
mlx4_counter
*
cnt
,
struct
ib_pma_portcounters
*
pma_cnt
)
{
pma_cnt
->
port_xmit_data
=
cpu_to_be32
((
be64_to_cpu
(
cnt
->
tx_bytes
)
>>
2
));
pma_cnt
->
port_rcv_data
=
cpu_to_be32
((
be64_to_cpu
(
cnt
->
rx_bytes
)
>>
2
));
pma_cnt
->
port_xmit_packets
=
cpu_to_be32
(
be64_to_cpu
(
cnt
->
tx_frames
));
pma_cnt
->
port_rcv_packets
=
cpu_to_be32
(
be64_to_cpu
(
cnt
->
rx_frames
));
}
static
int
iboe_process_mad
(
struct
ib_device
*
ibdev
,
int
mad_flags
,
u8
port_num
,
struct
ib_wc
*
in_wc
,
struct
ib_grh
*
in_grh
,
struct
ib_mad
*
in_mad
,
struct
ib_mad
*
out_mad
)
{
struct
mlx4_cmd_mailbox
*
mailbox
;
struct
mlx4_ib_dev
*
dev
=
to_mdev
(
ibdev
);
int
err
;
u32
inmod
=
dev
->
counters
[
port_num
-
1
]
&
0xffff
;
u8
mode
;
if
(
in_mad
->
mad_hdr
.
mgmt_class
!=
IB_MGMT_CLASS_PERF_MGMT
)
return
-
EINVAL
;
mailbox
=
mlx4_alloc_cmd_mailbox
(
dev
->
dev
);
if
(
IS_ERR
(
mailbox
))
return
IB_MAD_RESULT_FAILURE
;
err
=
mlx4_cmd_box
(
dev
->
dev
,
0
,
mailbox
->
dma
,
inmod
,
0
,
MLX4_CMD_QUERY_IF_STAT
,
MLX4_CMD_TIME_CLASS_C
);
if
(
err
)
err
=
IB_MAD_RESULT_FAILURE
;
else
{
memset
(
out_mad
->
data
,
0
,
sizeof
out_mad
->
data
);
mode
=
((
struct
mlx4_counter
*
)
mailbox
->
buf
)
->
counter_mode
;
switch
(
mode
&
0xf
)
{
case
0
:
edit_counter
(
mailbox
->
buf
,
(
void
*
)(
out_mad
->
data
+
40
));
err
=
IB_MAD_RESULT_SUCCESS
|
IB_MAD_RESULT_REPLY
;
break
;
default:
err
=
IB_MAD_RESULT_FAILURE
;
}
}
mlx4_free_cmd_mailbox
(
dev
->
dev
,
mailbox
);
return
err
;
}
int
mlx4_ib_process_mad
(
struct
ib_device
*
ibdev
,
int
mad_flags
,
u8
port_num
,
struct
ib_wc
*
in_wc
,
struct
ib_grh
*
in_grh
,
struct
ib_mad
*
in_mad
,
struct
ib_mad
*
out_mad
)
{
switch
(
rdma_port_get_link_layer
(
ibdev
,
port_num
))
{
case
IB_LINK_LAYER_INFINIBAND
:
return
ib_process_mad
(
ibdev
,
mad_flags
,
port_num
,
in_wc
,
in_grh
,
in_mad
,
out_mad
);
case
IB_LINK_LAYER_ETHERNET
:
return
iboe_process_mad
(
ibdev
,
mad_flags
,
port_num
,
in_wc
,
in_grh
,
in_mad
,
out_mad
);
default:
return
-
EINVAL
;
}
}
static
void
send_handler
(
struct
ib_mad_agent
*
agent
,
struct
ib_mad_send_wc
*
mad_send_wc
)
{
...
...
drivers/infiniband/hw/mlx4/main.c
浏览文件 @
44602075
...
...
@@ -816,7 +816,7 @@ static void update_gids_task(struct work_struct *work)
memcpy
(
gw
->
dev
->
iboe
.
gid_table
[
gw
->
port
-
1
],
gw
->
gids
,
sizeof
gw
->
gids
);
event
.
device
=
&
gw
->
dev
->
ib_dev
;
event
.
element
.
port_num
=
gw
->
port
;
event
.
event
=
IB_EVENT_
L
ID_CHANGE
;
event
.
event
=
IB_EVENT_
G
ID_CHANGE
;
ib_dispatch_event
(
&
event
);
}
...
...
@@ -1098,11 +1098,21 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if
(
init_node_data
(
ibdev
))
goto
err_map
;
for
(
i
=
0
;
i
<
ibdev
->
num_ports
;
++
i
)
{
if
(
mlx4_ib_port_link_layer
(
&
ibdev
->
ib_dev
,
i
+
1
)
==
IB_LINK_LAYER_ETHERNET
)
{
err
=
mlx4_counter_alloc
(
ibdev
->
dev
,
&
ibdev
->
counters
[
i
]);
if
(
err
)
ibdev
->
counters
[
i
]
=
-
1
;
}
else
ibdev
->
counters
[
i
]
=
-
1
;
}
spin_lock_init
(
&
ibdev
->
sm_lock
);
mutex_init
(
&
ibdev
->
cap_mask_mutex
);
if
(
ib_register_device
(
&
ibdev
->
ib_dev
,
NULL
))
goto
err_
map
;
goto
err_
counter
;
if
(
mlx4_ib_mad_init
(
ibdev
))
goto
err_reg
;
...
...
@@ -1132,6 +1142,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
err_reg:
ib_unregister_device
(
&
ibdev
->
ib_dev
);
err_counter:
for
(;
i
;
--
i
)
mlx4_counter_free
(
ibdev
->
dev
,
ibdev
->
counters
[
i
-
1
]);
err_map:
iounmap
(
ibdev
->
uar_map
);
...
...
@@ -1160,7 +1174,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev
->
iboe
.
nb
.
notifier_call
=
NULL
;
}
iounmap
(
ibdev
->
uar_map
);
for
(
p
=
0
;
p
<
ibdev
->
num_ports
;
++
p
)
mlx4_counter_free
(
ibdev
->
dev
,
ibdev
->
counters
[
p
]);
mlx4_foreach_port
(
p
,
dev
,
MLX4_PORT_TYPE_IB
)
mlx4_CLOSE_PORT
(
dev
,
p
);
...
...
drivers/infiniband/hw/mlx4/mlx4_ib.h
浏览文件 @
44602075
...
...
@@ -193,6 +193,7 @@ struct mlx4_ib_dev {
struct
mutex
cap_mask_mutex
;
bool
ib_active
;
struct
mlx4_ib_iboe
iboe
;
int
counters
[
MLX4_MAX_PORTS
];
};
static
inline
struct
mlx4_ib_dev
*
to_mdev
(
struct
ib_device
*
ibdev
)
...
...
drivers/infiniband/hw/mlx4/qp.c
浏览文件 @
44602075
...
...
@@ -893,7 +893,6 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
--
path
->
static_rate
;
}
else
path
->
static_rate
=
0
;
path
->
counter_index
=
0xff
;
if
(
ah
->
ah_flags
&
IB_AH_GRH
)
{
if
(
ah
->
grh
.
sgid_index
>=
dev
->
dev
->
caps
.
gid_table_len
[
port
])
{
...
...
@@ -1034,6 +1033,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
if
(
cur_state
==
IB_QPS_INIT
&&
new_state
==
IB_QPS_RTR
)
{
if
(
dev
->
counters
[
qp
->
port
-
1
]
!=
-
1
)
{
context
->
pri_path
.
counter_index
=
dev
->
counters
[
qp
->
port
-
1
];
optpar
|=
MLX4_QP_OPTPAR_COUNTER_INDEX
;
}
else
context
->
pri_path
.
counter_index
=
0xff
;
}
if
(
attr_mask
&
IB_QP_PKEY_INDEX
)
{
context
->
pri_path
.
pkey_index
=
attr
->
pkey_index
;
optpar
|=
MLX4_QP_OPTPAR_PKEY_INDEX
;
...
...
drivers/infiniband/hw/mthca/mthca_cmd.c
浏览文件 @
44602075
此差异已折叠。
点击以展开。
drivers/infiniband/hw/mthca/mthca_cmd.h
浏览文件 @
44602075
...
...
@@ -252,79 +252,74 @@ struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
gfp_t
gfp_mask
);
void
mthca_free_mailbox
(
struct
mthca_dev
*
dev
,
struct
mthca_mailbox
*
mailbox
);
int
mthca_SYS_EN
(
struct
mthca_dev
*
dev
,
u8
*
status
);
int
mthca_SYS_DIS
(
struct
mthca_dev
*
dev
,
u8
*
status
);
int
mthca_MAP_FA
(
struct
mthca_dev
*
dev
,
struct
mthca_icm
*
icm
,
u8
*
status
);
int
mthca_UNMAP_FA
(
struct
mthca_dev
*
dev
,
u8
*
status
);
int
mthca_RUN_FW
(
struct
mthca_dev
*
dev
,
u8
*
status
);
int
mthca_QUERY_FW
(
struct
mthca_dev
*
dev
,
u8
*
status
);
int
mthca_ENABLE_LAM
(
struct
mthca_dev
*
dev
,
u8
*
status
);
int
mthca_DISABLE_LAM
(
struct
mthca_dev
*
dev
,
u8
*
status
);
int
mthca_QUERY_DDR
(
struct
mthca_dev
*
dev
,
u8
*
status
);
int
mthca_SYS_EN
(
struct
mthca_dev
*
dev
);
int
mthca_SYS_DIS
(
struct
mthca_dev
*
dev
);
int
mthca_MAP_FA
(
struct
mthca_dev
*
dev
,
struct
mthca_icm
*
icm
);
int
mthca_UNMAP_FA
(
struct
mthca_dev
*
dev
);
int
mthca_RUN_FW
(
struct
mthca_dev
*
dev
);
int
mthca_QUERY_FW
(
struct
mthca_dev
*
dev
);
int
mthca_ENABLE_LAM
(
struct
mthca_dev
*
dev
);
int
mthca_DISABLE_LAM
(
struct
mthca_dev
*
dev
);
int
mthca_QUERY_DDR
(
struct
mthca_dev
*
dev
);
int
mthca_QUERY_DEV_LIM
(
struct
mthca_dev
*
dev
,
struct
mthca_dev_lim
*
dev_lim
,
u8
*
status
);
struct
mthca_dev_lim
*
dev_lim
);
int
mthca_QUERY_ADAPTER
(
struct
mthca_dev
*
dev
,
struct
mthca_adapter
*
adapter
,
u8
*
status
);
struct
mthca_adapter
*
adapter
);
int
mthca_INIT_HCA
(
struct
mthca_dev
*
dev
,
struct
mthca_init_hca_param
*
param
,
u8
*
status
);
struct
mthca_init_hca_param
*
param
);
int
mthca_INIT_IB
(
struct
mthca_dev
*
dev
,
struct
mthca_init_ib_param
*
param
,
int
port
,
u8
*
status
);
int
mthca_CLOSE_IB
(
struct
mthca_dev
*
dev
,
int
port
,
u8
*
status
);
int
mthca_CLOSE_HCA
(
struct
mthca_dev
*
dev
,
int
panic
,
u8
*
status
);
int
port
);
int
mthca_CLOSE_IB
(
struct
mthca_dev
*
dev
,
int
port
);
int
mthca_CLOSE_HCA
(
struct
mthca_dev
*
dev
,
int
panic
);
int
mthca_SET_IB
(
struct
mthca_dev
*
dev
,
struct
mthca_set_ib_param
*
param
,
int
port
,
u8
*
status
);
int
mthca_MAP_ICM
(
struct
mthca_dev
*
dev
,
struct
mthca_icm
*
icm
,
u64
virt
,
u8
*
status
);
int
mthca_MAP_ICM_page
(
struct
mthca_dev
*
dev
,
u64
dma_addr
,
u64
virt
,
u8
*
status
);
int
mthca_UNMAP_ICM
(
struct
mthca_dev
*
dev
,
u64
virt
,
u32
page_count
,
u8
*
status
);
int
mthca_MAP_ICM_AUX
(
struct
mthca_dev
*
dev
,
struct
mthca_icm
*
icm
,
u8
*
status
);
int
mthca_UNMAP_ICM_AUX
(
struct
mthca_dev
*
dev
,
u8
*
status
);
int
mthca_SET_ICM_SIZE
(
struct
mthca_dev
*
dev
,
u64
icm_size
,
u64
*
aux_pages
,
u8
*
status
);
int
port
);
int
mthca_MAP_ICM
(
struct
mthca_dev
*
dev
,
struct
mthca_icm
*
icm
,
u64
virt
);
int
mthca_MAP_ICM_page
(
struct
mthca_dev
*
dev
,
u64
dma_addr
,
u64
virt
);
int
mthca_UNMAP_ICM
(
struct
mthca_dev
*
dev
,
u64
virt
,
u32
page_count
);
int
mthca_MAP_ICM_AUX
(
struct
mthca_dev
*
dev
,
struct
mthca_icm
*
icm
);
int
mthca_UNMAP_ICM_AUX
(
struct
mthca_dev
*
dev
);
int
mthca_SET_ICM_SIZE
(
struct
mthca_dev
*
dev
,
u64
icm_size
,
u64
*
aux_pages
);
int
mthca_SW2HW_MPT
(
struct
mthca_dev
*
dev
,
struct
mthca_mailbox
*
mailbox
,
int
mpt_index
,
u8
*
status
);
int
mpt_index
);
int
mthca_HW2SW_MPT
(
struct
mthca_dev
*
dev
,
struct
mthca_mailbox
*
mailbox
,
int
mpt_index
,
u8
*
status
);
int
mpt_index
);
int
mthca_WRITE_MTT
(
struct
mthca_dev
*
dev
,
struct
mthca_mailbox
*
mailbox
,
int
num_mtt
,
u8
*
status
);
int
mthca_SYNC_TPT
(
struct
mthca_dev
*
dev
,
u8
*
status
);
int
num_mtt
);
int
mthca_SYNC_TPT
(
struct
mthca_dev
*
dev
);
int
mthca_MAP_EQ
(
struct
mthca_dev
*
dev
,
u64
event_mask
,
int
unmap
,
int
eq_num
,
u8
*
status
);
int
eq_num
);
int
mthca_SW2HW_EQ
(
struct
mthca_dev
*
dev
,
struct
mthca_mailbox
*
mailbox
,
int
eq_num
,
u8
*
status
);
int
eq_num
);
int
mthca_HW2SW_EQ
(
struct
mthca_dev
*
dev
,
struct
mthca_mailbox
*
mailbox
,
int
eq_num
,
u8
*
status
);
int
eq_num
);
int
mthca_SW2HW_CQ
(
struct
mthca_dev
*
dev
,
struct
mthca_mailbox
*
mailbox
,
int
cq_num
,
u8
*
status
);
int
cq_num
);
int
mthca_HW2SW_CQ
(
struct
mthca_dev
*
dev
,
struct
mthca_mailbox
*
mailbox
,
int
cq_num
,
u8
*
status
);
int
mthca_RESIZE_CQ
(
struct
mthca_dev
*
dev
,
int
cq_num
,
u32
lkey
,
u8
log_size
,
u8
*
status
);
int
cq_num
);
int
mthca_RESIZE_CQ
(
struct
mthca_dev
*
dev
,
int
cq_num
,
u32
lkey
,
u8
log_size
);
int
mthca_SW2HW_SRQ
(
struct
mthca_dev
*
dev
,
struct
mthca_mailbox
*
mailbox
,
int
srq_num
,
u8
*
status
);
int
srq_num
);
int
mthca_HW2SW_SRQ
(
struct
mthca_dev
*
dev
,
struct
mthca_mailbox
*
mailbox
,
int
srq_num
,
u8
*
status
);
int
srq_num
);
int
mthca_QUERY_SRQ
(
struct
mthca_dev
*
dev
,
u32
num
,
struct
mthca_mailbox
*
mailbox
,
u8
*
status
);
int
mthca_ARM_SRQ
(
struct
mthca_dev
*
dev
,
int
srq_num
,
int
limit
,
u8
*
status
);
struct
mthca_mailbox
*
mailbox
);
int
mthca_ARM_SRQ
(
struct
mthca_dev
*
dev
,
int
srq_num
,
int
limit
);
int
mthca_MODIFY_QP
(
struct
mthca_dev
*
dev
,
enum
ib_qp_state
cur
,
enum
ib_qp_state
next
,
u32
num
,
int
is_ee
,
struct
mthca_mailbox
*
mailbox
,
u32
optmask
,
u8
*
status
);
struct
mthca_mailbox
*
mailbox
,
u32
optmask
);
int
mthca_QUERY_QP
(
struct
mthca_dev
*
dev
,
u32
num
,
int
is_ee
,
struct
mthca_mailbox
*
mailbox
,
u8
*
status
);
int
mthca_CONF_SPECIAL_QP
(
struct
mthca_dev
*
dev
,
int
type
,
u32
qpn
,
u8
*
status
);
struct
mthca_mailbox
*
mailbox
);
int
mthca_CONF_SPECIAL_QP
(
struct
mthca_dev
*
dev
,
int
type
,
u32
qpn
);
int
mthca_MAD_IFC
(
struct
mthca_dev
*
dev
,
int
ignore_mkey
,
int
ignore_bkey
,
int
port
,
struct
ib_wc
*
in_wc
,
struct
ib_grh
*
in_grh
,
void
*
in_mad
,
void
*
response_mad
,
u8
*
status
);
void
*
in_mad
,
void
*
response_mad
);
int
mthca_READ_MGM
(
struct
mthca_dev
*
dev
,
int
index
,
struct
mthca_mailbox
*
mailbox
,
u8
*
status
);
struct
mthca_mailbox
*
mailbox
);
int
mthca_WRITE_MGM
(
struct
mthca_dev
*
dev
,
int
index
,
struct
mthca_mailbox
*
mailbox
,
u8
*
status
);
struct
mthca_mailbox
*
mailbox
);
int
mthca_MGID_HASH
(
struct
mthca_dev
*
dev
,
struct
mthca_mailbox
*
mailbox
,
u16
*
hash
,
u8
*
status
);
int
mthca_NOP
(
struct
mthca_dev
*
dev
,
u8
*
status
);
u16
*
hash
);
int
mthca_NOP
(
struct
mthca_dev
*
dev
);
#endif
/* MTHCA_CMD_H */
drivers/infiniband/hw/mthca/mthca_cq.c
浏览文件 @
44602075
...
...
@@ -779,7 +779,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
struct
mthca_mailbox
*
mailbox
;
struct
mthca_cq_context
*
cq_context
;
int
err
=
-
ENOMEM
;
u8
status
;
cq
->
ibcq
.
cqe
=
nent
-
1
;
cq
->
is_kernel
=
!
ctx
;
...
...
@@ -847,19 +846,12 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq_context
->
state_db
=
cpu_to_be32
(
cq
->
arm_db_index
);
}
err
=
mthca_SW2HW_CQ
(
dev
,
mailbox
,
cq
->
cqn
,
&
status
);
err
=
mthca_SW2HW_CQ
(
dev
,
mailbox
,
cq
->
cqn
);
if
(
err
)
{
mthca_warn
(
dev
,
"SW2HW_CQ failed (%d)
\n
"
,
err
);
goto
err_out_free_mr
;
}
if
(
status
)
{
mthca_warn
(
dev
,
"SW2HW_CQ returned status 0x%02x
\n
"
,
status
);
err
=
-
EINVAL
;
goto
err_out_free_mr
;
}
spin_lock_irq
(
&
dev
->
cq_table
.
lock
);
if
(
mthca_array_set
(
&
dev
->
cq_table
.
cq
,
cq
->
cqn
&
(
dev
->
limits
.
num_cqs
-
1
),
...
...
@@ -915,7 +907,6 @@ void mthca_free_cq(struct mthca_dev *dev,
{
struct
mthca_mailbox
*
mailbox
;
int
err
;
u8
status
;
mailbox
=
mthca_alloc_mailbox
(
dev
,
GFP_KERNEL
);
if
(
IS_ERR
(
mailbox
))
{
...
...
@@ -923,11 +914,9 @@ void mthca_free_cq(struct mthca_dev *dev,
return
;
}
err
=
mthca_HW2SW_CQ
(
dev
,
mailbox
,
cq
->
cqn
,
&
status
);
err
=
mthca_HW2SW_CQ
(
dev
,
mailbox
,
cq
->
cqn
);
if
(
err
)
mthca_warn
(
dev
,
"HW2SW_CQ failed (%d)
\n
"
,
err
);
else
if
(
status
)
mthca_warn
(
dev
,
"HW2SW_CQ returned status 0x%02x
\n
"
,
status
);
if
(
0
)
{
__be32
*
ctx
=
mailbox
->
buf
;
...
...
drivers/infiniband/hw/mthca/mthca_eq.c
浏览文件 @
44602075
...
...
@@ -474,7 +474,6 @@ static int mthca_create_eq(struct mthca_dev *dev,
struct
mthca_eq_context
*
eq_context
;
int
err
=
-
ENOMEM
;
int
i
;
u8
status
;
eq
->
dev
=
dev
;
eq
->
nent
=
roundup_pow_of_two
(
max
(
nent
,
2
));
...
...
@@ -543,15 +542,9 @@ static int mthca_create_eq(struct mthca_dev *dev,
eq_context
->
intr
=
intr
;
eq_context
->
lkey
=
cpu_to_be32
(
eq
->
mr
.
ibmr
.
lkey
);
err
=
mthca_SW2HW_EQ
(
dev
,
mailbox
,
eq
->
eqn
,
&
status
);
err
=
mthca_SW2HW_EQ
(
dev
,
mailbox
,
eq
->
eqn
);
if
(
err
)
{
mthca_warn
(
dev
,
"SW2HW_EQ failed (%d)
\n
"
,
err
);
goto
err_out_free_mr
;
}
if
(
status
)
{
mthca_warn
(
dev
,
"SW2HW_EQ returned status 0x%02x
\n
"
,
status
);
err
=
-
EINVAL
;
mthca_warn
(
dev
,
"SW2HW_EQ returned %d
\n
"
,
err
);
goto
err_out_free_mr
;
}
...
...
@@ -597,7 +590,6 @@ static void mthca_free_eq(struct mthca_dev *dev,
{
struct
mthca_mailbox
*
mailbox
;
int
err
;
u8
status
;
int
npages
=
(
eq
->
nent
*
MTHCA_EQ_ENTRY_SIZE
+
PAGE_SIZE
-
1
)
/
PAGE_SIZE
;
int
i
;
...
...
@@ -606,11 +598,9 @@ static void mthca_free_eq(struct mthca_dev *dev,
if
(
IS_ERR
(
mailbox
))
return
;
err
=
mthca_HW2SW_EQ
(
dev
,
mailbox
,
eq
->
eqn
,
&
status
);
err
=
mthca_HW2SW_EQ
(
dev
,
mailbox
,
eq
->
eqn
);
if
(
err
)
mthca_warn
(
dev
,
"HW2SW_EQ failed (%d)
\n
"
,
err
);
if
(
status
)
mthca_warn
(
dev
,
"HW2SW_EQ returned status 0x%02x
\n
"
,
status
);
mthca_warn
(
dev
,
"HW2SW_EQ returned %d
\n
"
,
err
);
dev
->
eq_table
.
arm_mask
&=
~
eq
->
eqn_mask
;
...
...
@@ -738,7 +728,6 @@ static void mthca_unmap_eq_regs(struct mthca_dev *dev)
int
mthca_map_eq_icm
(
struct
mthca_dev
*
dev
,
u64
icm_virt
)
{
int
ret
;
u8
status
;
/*
* We assume that mapping one page is enough for the whole EQ
...
...
@@ -757,9 +746,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
return
-
ENOMEM
;
}
ret
=
mthca_MAP_ICM_page
(
dev
,
dev
->
eq_table
.
icm_dma
,
icm_virt
,
&
status
);
if
(
!
ret
&&
status
)
ret
=
-
EINVAL
;
ret
=
mthca_MAP_ICM_page
(
dev
,
dev
->
eq_table
.
icm_dma
,
icm_virt
);
if
(
ret
)
{
pci_unmap_page
(
dev
->
pdev
,
dev
->
eq_table
.
icm_dma
,
PAGE_SIZE
,
PCI_DMA_BIDIRECTIONAL
);
...
...
@@ -771,9 +758,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
void
mthca_unmap_eq_icm
(
struct
mthca_dev
*
dev
)
{
u8
status
;
mthca_UNMAP_ICM
(
dev
,
dev
->
eq_table
.
icm_virt
,
1
,
&
status
);
mthca_UNMAP_ICM
(
dev
,
dev
->
eq_table
.
icm_virt
,
1
);
pci_unmap_page
(
dev
->
pdev
,
dev
->
eq_table
.
icm_dma
,
PAGE_SIZE
,
PCI_DMA_BIDIRECTIONAL
);
__free_page
(
dev
->
eq_table
.
icm_page
);
...
...
@@ -782,7 +767,6 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev)
int
mthca_init_eq_table
(
struct
mthca_dev
*
dev
)
{
int
err
;
u8
status
;
u8
intr
;
int
i
;
...
...
@@ -864,22 +848,16 @@ int mthca_init_eq_table(struct mthca_dev *dev)
}
err
=
mthca_MAP_EQ
(
dev
,
async_mask
(
dev
),
0
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_ASYNC
].
eqn
,
&
status
);
0
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_ASYNC
].
eqn
);
if
(
err
)
mthca_warn
(
dev
,
"MAP_EQ for async EQ %d failed (%d)
\n
"
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_ASYNC
].
eqn
,
err
);
if
(
status
)
mthca_warn
(
dev
,
"MAP_EQ for async EQ %d returned status 0x%02x
\n
"
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_ASYNC
].
eqn
,
status
);
err
=
mthca_MAP_EQ
(
dev
,
MTHCA_CMD_EVENT_MASK
,
0
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_CMD
].
eqn
,
&
status
);
0
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_CMD
].
eqn
);
if
(
err
)
mthca_warn
(
dev
,
"MAP_EQ for cmd EQ %d failed (%d)
\n
"
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_CMD
].
eqn
,
err
);
if
(
status
)
mthca_warn
(
dev
,
"MAP_EQ for cmd EQ %d returned status 0x%02x
\n
"
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_CMD
].
eqn
,
status
);
for
(
i
=
0
;
i
<
MTHCA_NUM_EQ
;
++
i
)
if
(
mthca_is_memfree
(
dev
))
...
...
@@ -909,15 +887,14 @@ int mthca_init_eq_table(struct mthca_dev *dev)
void
mthca_cleanup_eq_table
(
struct
mthca_dev
*
dev
)
{
u8
status
;
int
i
;
mthca_free_irqs
(
dev
);
mthca_MAP_EQ
(
dev
,
async_mask
(
dev
),
1
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_ASYNC
].
eqn
,
&
status
);
1
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_ASYNC
].
eqn
);
mthca_MAP_EQ
(
dev
,
MTHCA_CMD_EVENT_MASK
,
1
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_CMD
].
eqn
,
&
status
);
1
,
dev
->
eq_table
.
eq
[
MTHCA_EQ_CMD
].
eqn
);
for
(
i
=
0
;
i
<
MTHCA_NUM_EQ
;
++
i
)
mthca_free_eq
(
dev
,
&
dev
->
eq_table
.
eq
[
i
]);
...
...
drivers/infiniband/hw/mthca/mthca_mad.c
浏览文件 @
44602075
...
...
@@ -201,7 +201,6 @@ int mthca_process_mad(struct ib_device *ibdev,
struct
ib_mad
*
out_mad
)
{
int
err
;
u8
status
;
u16
slid
=
in_wc
?
in_wc
->
slid
:
be16_to_cpu
(
IB_LID_PERMISSIVE
);
u16
prev_lid
=
0
;
struct
ib_port_attr
pattr
;
...
...
@@ -252,17 +251,11 @@ int mthca_process_mad(struct ib_device *ibdev,
err
=
mthca_MAD_IFC
(
to_mdev
(
ibdev
),
mad_flags
&
IB_MAD_IGNORE_MKEY
,
mad_flags
&
IB_MAD_IGNORE_BKEY
,
port_num
,
in_wc
,
in_grh
,
in_mad
,
out_mad
,
&
status
);
if
(
err
)
{
mthca_err
(
to_mdev
(
ibdev
),
"MAD_IFC failed
\n
"
);
return
IB_MAD_RESULT_FAILURE
;
}
if
(
status
==
MTHCA_CMD_STAT_BAD_PKT
)
port_num
,
in_wc
,
in_grh
,
in_mad
,
out_mad
);
if
(
err
==
-
EBADMSG
)
return
IB_MAD_RESULT_SUCCESS
;
if
(
status
)
{
mthca_err
(
to_mdev
(
ibdev
),
"MAD_IFC returned status %02x
\n
"
,
status
);
else
if
(
err
)
{
mthca_err
(
to_mdev
(
ibdev
),
"MAD_IFC returned %d
\n
"
,
err
);
return
IB_MAD_RESULT_FAILURE
;
}
...
...
drivers/infiniband/hw/mthca/mthca_main.c
浏览文件 @
44602075
...
...
@@ -149,7 +149,7 @@ static int mthca_tune_pci(struct mthca_dev *mdev)
}
else
if
(
!
(
mdev
->
mthca_flags
&
MTHCA_FLAG_PCIE
))
mthca_info
(
mdev
,
"No PCI-X capability, not setting RBC.
\n
"
);
if
(
pci_
find_capability
(
mdev
->
pdev
,
PCI_CAP_ID_EXP
))
{
if
(
pci_
is_pcie
(
mdev
->
pdev
))
{
if
(
pcie_set_readrq
(
mdev
->
pdev
,
4096
))
{
mthca_err
(
mdev
,
"Couldn't write PCI Express read request, "
"aborting.
\n
"
);
...
...
@@ -165,19 +165,14 @@ static int mthca_tune_pci(struct mthca_dev *mdev)
static
int
mthca_dev_lim
(
struct
mthca_dev
*
mdev
,
struct
mthca_dev_lim
*
dev_lim
)
{
int
err
;
u8
status
;
mdev
->
limits
.
mtt_seg_size
=
(
1
<<
log_mtts_per_seg
)
*
8
;
err
=
mthca_QUERY_DEV_LIM
(
mdev
,
dev_lim
,
&
status
);
err
=
mthca_QUERY_DEV_LIM
(
mdev
,
dev_lim
);
if
(
err
)
{
mthca_err
(
mdev
,
"QUERY_DEV_LIM command failed, aborting.
\n
"
);
mthca_err
(
mdev
,
"QUERY_DEV_LIM command returned %d"
", aborting.
\n
"
,
err
);
return
err
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"QUERY_DEV_LIM returned status 0x%02x, "
"aborting.
\n
"
,
status
);
return
-
EINVAL
;
}
if
(
dev_lim
->
min_page_sz
>
PAGE_SIZE
)
{
mthca_err
(
mdev
,
"HCA minimum page size of %d bigger than "
"kernel PAGE_SIZE of %ld, aborting.
\n
"
,
...
...
@@ -293,49 +288,32 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
static
int
mthca_init_tavor
(
struct
mthca_dev
*
mdev
)
{
s64
size
;
u8
status
;
int
err
;
struct
mthca_dev_lim
dev_lim
;
struct
mthca_profile
profile
;
struct
mthca_init_hca_param
init_hca
;
err
=
mthca_SYS_EN
(
mdev
,
&
status
);
err
=
mthca_SYS_EN
(
mdev
);
if
(
err
)
{
mthca_err
(
mdev
,
"SYS_EN command
failed, aborting.
\n
"
);
mthca_err
(
mdev
,
"SYS_EN command
returned %d, aborting.
\n
"
,
err
);
return
err
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"SYS_EN returned status 0x%02x, "
"aborting.
\n
"
,
status
);
return
-
EINVAL
;
}
err
=
mthca_QUERY_FW
(
mdev
,
&
status
);
err
=
mthca_QUERY_FW
(
mdev
);
if
(
err
)
{
mthca_err
(
mdev
,
"QUERY_FW command failed, aborting.
\n
"
);
goto
err_disable
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"QUERY_FW returned status 0x%02x, "
"aborting.
\n
"
,
status
);
err
=
-
EINVAL
;
mthca_err
(
mdev
,
"QUERY_FW command returned %d,"
" aborting.
\n
"
,
err
);
goto
err_disable
;
}
err
=
mthca_QUERY_DDR
(
mdev
,
&
status
);
err
=
mthca_QUERY_DDR
(
mdev
);
if
(
err
)
{
mthca_err
(
mdev
,
"QUERY_DDR command failed, aborting.
\n
"
);
goto
err_disable
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"QUERY_DDR returned status 0x%02x, "
"aborting.
\n
"
,
status
);
err
=
-
EINVAL
;
mthca_err
(
mdev
,
"QUERY_DDR command returned %d, aborting.
\n
"
,
err
);
goto
err_disable
;
}
err
=
mthca_dev_lim
(
mdev
,
&
dev_lim
);
if
(
err
)
{
mthca_err
(
mdev
,
"QUERY_DEV_LIM command
failed, aborting.
\n
"
);
mthca_err
(
mdev
,
"QUERY_DEV_LIM command
returned %d, aborting.
\n
"
,
err
);
goto
err_disable
;
}
...
...
@@ -351,29 +329,22 @@ static int mthca_init_tavor(struct mthca_dev *mdev)
goto
err_disable
;
}
err
=
mthca_INIT_HCA
(
mdev
,
&
init_hca
,
&
status
);
err
=
mthca_INIT_HCA
(
mdev
,
&
init_hca
);
if
(
err
)
{
mthca_err
(
mdev
,
"INIT_HCA command failed, aborting.
\n
"
);
goto
err_disable
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"INIT_HCA returned status 0x%02x, "
"aborting.
\n
"
,
status
);
err
=
-
EINVAL
;
mthca_err
(
mdev
,
"INIT_HCA command returned %d, aborting.
\n
"
,
err
);
goto
err_disable
;
}
return
0
;
err_disable:
mthca_SYS_DIS
(
mdev
,
&
status
);
mthca_SYS_DIS
(
mdev
);
return
err
;
}
static
int
mthca_load_fw
(
struct
mthca_dev
*
mdev
)
{
u8
status
;
int
err
;
/* FIXME: use HCA-attached memory for FW if present */
...
...
@@ -386,31 +357,21 @@ static int mthca_load_fw(struct mthca_dev *mdev)
return
-
ENOMEM
;
}
err
=
mthca_MAP_FA
(
mdev
,
mdev
->
fw
.
arbel
.
fw_icm
,
&
status
);
err
=
mthca_MAP_FA
(
mdev
,
mdev
->
fw
.
arbel
.
fw_icm
);
if
(
err
)
{
mthca_err
(
mdev
,
"MAP_FA command failed, aborting.
\n
"
);
goto
err_free
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"MAP_FA returned status 0x%02x, aborting.
\n
"
,
status
);
err
=
-
EINVAL
;
mthca_err
(
mdev
,
"MAP_FA command returned %d, aborting.
\n
"
,
err
);
goto
err_free
;
}
err
=
mthca_RUN_FW
(
mdev
,
&
status
);
err
=
mthca_RUN_FW
(
mdev
);
if
(
err
)
{
mthca_err
(
mdev
,
"RUN_FW command failed, aborting.
\n
"
);
goto
err_unmap_fa
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"RUN_FW returned status 0x%02x, aborting.
\n
"
,
status
);
err
=
-
EINVAL
;
mthca_err
(
mdev
,
"RUN_FW command returned %d, aborting.
\n
"
,
err
);
goto
err_unmap_fa
;
}
return
0
;
err_unmap_fa:
mthca_UNMAP_FA
(
mdev
,
&
status
);
mthca_UNMAP_FA
(
mdev
);
err_free:
mthca_free_icm
(
mdev
,
mdev
->
fw
.
arbel
.
fw_icm
,
0
);
...
...
@@ -423,19 +384,13 @@ static int mthca_init_icm(struct mthca_dev *mdev,
u64
icm_size
)
{
u64
aux_pages
;
u8
status
;
int
err
;
err
=
mthca_SET_ICM_SIZE
(
mdev
,
icm_size
,
&
aux_pages
,
&
status
);
err
=
mthca_SET_ICM_SIZE
(
mdev
,
icm_size
,
&
aux_pages
);
if
(
err
)
{
mthca_err
(
mdev
,
"SET_ICM_SIZE command
failed, aborting.
\n
"
);
mthca_err
(
mdev
,
"SET_ICM_SIZE command
returned %d, aborting.
\n
"
,
err
);
return
err
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"SET_ICM_SIZE returned status 0x%02x, "
"aborting.
\n
"
,
status
);
return
-
EINVAL
;
}
mthca_dbg
(
mdev
,
"%lld KB of HCA context requires %lld KB aux memory.
\n
"
,
(
unsigned
long
long
)
icm_size
>>
10
,
...
...
@@ -448,14 +403,9 @@ static int mthca_init_icm(struct mthca_dev *mdev,
return
-
ENOMEM
;
}
err
=
mthca_MAP_ICM_AUX
(
mdev
,
mdev
->
fw
.
arbel
.
aux_icm
,
&
status
);
err
=
mthca_MAP_ICM_AUX
(
mdev
,
mdev
->
fw
.
arbel
.
aux_icm
);
if
(
err
)
{
mthca_err
(
mdev
,
"MAP_ICM_AUX command failed, aborting.
\n
"
);
goto
err_free_aux
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"MAP_ICM_AUX returned status 0x%02x, aborting.
\n
"
,
status
);
err
=
-
EINVAL
;
mthca_err
(
mdev
,
"MAP_ICM_AUX returned %d, aborting.
\n
"
,
err
);
goto
err_free_aux
;
}
...
...
@@ -596,7 +546,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
mthca_unmap_eq_icm
(
mdev
);
err_unmap_aux:
mthca_UNMAP_ICM_AUX
(
mdev
,
&
status
);
mthca_UNMAP_ICM_AUX
(
mdev
);
err_free_aux:
mthca_free_icm
(
mdev
,
mdev
->
fw
.
arbel
.
aux_icm
,
0
);
...
...
@@ -606,7 +556,6 @@ static int mthca_init_icm(struct mthca_dev *mdev,
static
void
mthca_free_icms
(
struct
mthca_dev
*
mdev
)
{
u8
status
;
mthca_free_icm_table
(
mdev
,
mdev
->
mcg_table
.
table
);
if
(
mdev
->
mthca_flags
&
MTHCA_FLAG_SRQ
)
...
...
@@ -619,7 +568,7 @@ static void mthca_free_icms(struct mthca_dev *mdev)
mthca_free_icm_table
(
mdev
,
mdev
->
mr_table
.
mtt_table
);
mthca_unmap_eq_icm
(
mdev
);
mthca_UNMAP_ICM_AUX
(
mdev
,
&
status
);
mthca_UNMAP_ICM_AUX
(
mdev
);
mthca_free_icm
(
mdev
,
mdev
->
fw
.
arbel
.
aux_icm
,
0
);
}
...
...
@@ -629,43 +578,32 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
struct
mthca_profile
profile
;
struct
mthca_init_hca_param
init_hca
;
s64
icm_size
;
u8
status
;
int
err
;
err
=
mthca_QUERY_FW
(
mdev
,
&
status
);
err
=
mthca_QUERY_FW
(
mdev
);
if
(
err
)
{
mthca_err
(
mdev
,
"QUERY_FW command failed
, aborting.
\n
"
);
mthca_err
(
mdev
,
"QUERY_FW command failed
%d, aborting.
\n
"
,
err
);
return
err
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"QUERY_FW returned status 0x%02x, "
"aborting.
\n
"
,
status
);
return
-
EINVAL
;
}
err
=
mthca_ENABLE_LAM
(
mdev
,
&
status
);
if
(
err
)
{
mthca_err
(
mdev
,
"ENABLE_LAM command failed, aborting.
\n
"
);
return
err
;
}
if
(
status
==
MTHCA_CMD_STAT_LAM_NOT_PRE
)
{
err
=
mthca_ENABLE_LAM
(
mdev
);
if
(
err
==
-
EAGAIN
)
{
mthca_dbg
(
mdev
,
"No HCA-attached memory (running in MemFree mode)
\n
"
);
mdev
->
mthca_flags
|=
MTHCA_FLAG_NO_LAM
;
}
else
if
(
status
)
{
mthca_err
(
mdev
,
"ENABLE_LAM returned status 0x%02x, "
"aborting.
\n
"
,
status
);
return
-
EINVAL
;
}
else
if
(
err
)
{
mthca_err
(
mdev
,
"ENABLE_LAM returned %d, aborting.
\n
"
,
err
);
return
err
;
}
err
=
mthca_load_fw
(
mdev
);
if
(
err
)
{
mthca_err
(
mdev
,
"
Failed to start FW, aborting.
\n
"
);
mthca_err
(
mdev
,
"
Loading FW returned %d, aborting.
\n
"
,
err
);
goto
err_disable
;
}
err
=
mthca_dev_lim
(
mdev
,
&
dev_lim
);
if
(
err
)
{
mthca_err
(
mdev
,
"QUERY_DEV_LIM
command failed, aborting.
\n
"
);
mthca_err
(
mdev
,
"QUERY_DEV_LIM
returned %d, aborting.
\n
"
,
err
);
goto
err_stop_fw
;
}
...
...
@@ -685,15 +623,9 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
if
(
err
)
goto
err_stop_fw
;
err
=
mthca_INIT_HCA
(
mdev
,
&
init_hca
,
&
status
);
err
=
mthca_INIT_HCA
(
mdev
,
&
init_hca
);
if
(
err
)
{
mthca_err
(
mdev
,
"INIT_HCA command failed, aborting.
\n
"
);
goto
err_free_icm
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"INIT_HCA returned status 0x%02x, "
"aborting.
\n
"
,
status
);
err
=
-
EINVAL
;
mthca_err
(
mdev
,
"INIT_HCA command returned %d, aborting.
\n
"
,
err
);
goto
err_free_icm
;
}
...
...
@@ -703,37 +635,34 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
mthca_free_icms
(
mdev
);
err_stop_fw:
mthca_UNMAP_FA
(
mdev
,
&
status
);
mthca_UNMAP_FA
(
mdev
);
mthca_free_icm
(
mdev
,
mdev
->
fw
.
arbel
.
fw_icm
,
0
);
err_disable:
if
(
!
(
mdev
->
mthca_flags
&
MTHCA_FLAG_NO_LAM
))
mthca_DISABLE_LAM
(
mdev
,
&
status
);
mthca_DISABLE_LAM
(
mdev
);
return
err
;
}
static
void
mthca_close_hca
(
struct
mthca_dev
*
mdev
)
{
u8
status
;
mthca_CLOSE_HCA
(
mdev
,
0
,
&
status
);
mthca_CLOSE_HCA
(
mdev
,
0
);
if
(
mthca_is_memfree
(
mdev
))
{
mthca_free_icms
(
mdev
);
mthca_UNMAP_FA
(
mdev
,
&
status
);
mthca_UNMAP_FA
(
mdev
);
mthca_free_icm
(
mdev
,
mdev
->
fw
.
arbel
.
fw_icm
,
0
);
if
(
!
(
mdev
->
mthca_flags
&
MTHCA_FLAG_NO_LAM
))
mthca_DISABLE_LAM
(
mdev
,
&
status
);
mthca_DISABLE_LAM
(
mdev
);
}
else
mthca_SYS_DIS
(
mdev
,
&
status
);
mthca_SYS_DIS
(
mdev
);
}
static
int
mthca_init_hca
(
struct
mthca_dev
*
mdev
)
{
u8
status
;
int
err
;
struct
mthca_adapter
adapter
;
...
...
@@ -745,15 +674,9 @@ static int mthca_init_hca(struct mthca_dev *mdev)
if
(
err
)
return
err
;
err
=
mthca_QUERY_ADAPTER
(
mdev
,
&
adapter
,
&
status
);
err
=
mthca_QUERY_ADAPTER
(
mdev
,
&
adapter
);
if
(
err
)
{
mthca_err
(
mdev
,
"QUERY_ADAPTER command failed, aborting.
\n
"
);
goto
err_close
;
}
if
(
status
)
{
mthca_err
(
mdev
,
"QUERY_ADAPTER returned status 0x%02x, "
"aborting.
\n
"
,
status
);
err
=
-
EINVAL
;
mthca_err
(
mdev
,
"QUERY_ADAPTER command returned %d, aborting.
\n
"
,
err
);
goto
err_close
;
}
...
...
@@ -772,7 +695,6 @@ static int mthca_init_hca(struct mthca_dev *mdev)
static
int
mthca_setup_hca
(
struct
mthca_dev
*
dev
)
{
int
err
;
u8
status
;
MTHCA_INIT_DOORBELL_LOCK
(
&
dev
->
doorbell_lock
);
...
...
@@ -833,8 +755,8 @@ static int mthca_setup_hca(struct mthca_dev *dev)
goto
err_eq_table_free
;
}
err
=
mthca_NOP
(
dev
,
&
status
);
if
(
err
||
status
)
{
err
=
mthca_NOP
(
dev
);
if
(
err
)
{
if
(
dev
->
mthca_flags
&
MTHCA_FLAG_MSI_X
)
{
mthca_warn
(
dev
,
"NOP command failed to generate interrupt "
"(IRQ %d).
\n
"
,
...
...
@@ -1166,7 +1088,6 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
static
void
__mthca_remove_one
(
struct
pci_dev
*
pdev
)
{
struct
mthca_dev
*
mdev
=
pci_get_drvdata
(
pdev
);
u8
status
;
int
p
;
if
(
mdev
)
{
...
...
@@ -1174,7 +1095,7 @@ static void __mthca_remove_one(struct pci_dev *pdev)
mthca_unregister_device
(
mdev
);
for
(
p
=
1
;
p
<=
mdev
->
limits
.
num_ports
;
++
p
)
mthca_CLOSE_IB
(
mdev
,
p
,
&
status
);
mthca_CLOSE_IB
(
mdev
,
p
);
mthca_cleanup_mcg_table
(
mdev
);
mthca_cleanup_av_table
(
mdev
);
...
...
drivers/infiniband/hw/mthca/mthca_mcg.c
浏览文件 @
44602075
...
...
@@ -68,7 +68,6 @@ static int find_mgm(struct mthca_dev *dev,
struct
mthca_mgm
*
mgm
=
mgm_mailbox
->
buf
;
u8
*
mgid
;
int
err
;
u8
status
;
mailbox
=
mthca_alloc_mailbox
(
dev
,
GFP_KERNEL
);
if
(
IS_ERR
(
mailbox
))
...
...
@@ -77,12 +76,9 @@ static int find_mgm(struct mthca_dev *dev,
memcpy
(
mgid
,
gid
,
16
);
err
=
mthca_MGID_HASH
(
dev
,
mailbox
,
hash
,
&
status
);
if
(
err
)
goto
out
;
if
(
status
)
{
mthca_err
(
dev
,
"MGID_HASH returned status %02x
\n
"
,
status
);
err
=
-
EINVAL
;
err
=
mthca_MGID_HASH
(
dev
,
mailbox
,
hash
);
if
(
err
)
{
mthca_err
(
dev
,
"MGID_HASH failed (%d)
\n
"
,
err
);
goto
out
;
}
...
...
@@ -93,12 +89,9 @@ static int find_mgm(struct mthca_dev *dev,
*
prev
=
-
1
;
do
{
err
=
mthca_READ_MGM
(
dev
,
*
index
,
mgm_mailbox
,
&
status
);
if
(
err
)
goto
out
;
if
(
status
)
{
mthca_err
(
dev
,
"READ_MGM returned status %02x
\n
"
,
status
);
err
=
-
EINVAL
;
err
=
mthca_READ_MGM
(
dev
,
*
index
,
mgm_mailbox
);
if
(
err
)
{
mthca_err
(
dev
,
"READ_MGM failed (%d)
\n
"
,
err
);
goto
out
;
}
...
...
@@ -134,7 +127,6 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int
link
=
0
;
int
i
;
int
err
;
u8
status
;
mailbox
=
mthca_alloc_mailbox
(
dev
,
GFP_KERNEL
);
if
(
IS_ERR
(
mailbox
))
...
...
@@ -160,12 +152,9 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto
out
;
}
err
=
mthca_READ_MGM
(
dev
,
index
,
mailbox
,
&
status
);
if
(
err
)
goto
out
;
if
(
status
)
{
mthca_err
(
dev
,
"READ_MGM returned status %02x
\n
"
,
status
);
err
=
-
EINVAL
;
err
=
mthca_READ_MGM
(
dev
,
index
,
mailbox
);
if
(
err
)
{
mthca_err
(
dev
,
"READ_MGM failed (%d)
\n
"
,
err
);
goto
out
;
}
memset
(
mgm
,
0
,
sizeof
*
mgm
);
...
...
@@ -189,11 +178,9 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto
out
;
}
err
=
mthca_WRITE_MGM
(
dev
,
index
,
mailbox
,
&
status
);
if
(
err
)
goto
out
;
if
(
status
)
{
mthca_err
(
dev
,
"WRITE_MGM returned status %02x
\n
"
,
status
);
err
=
mthca_WRITE_MGM
(
dev
,
index
,
mailbox
);
if
(
err
)
{
mthca_err
(
dev
,
"WRITE_MGM failed %d
\n
"
,
err
);
err
=
-
EINVAL
;
goto
out
;
}
...
...
@@ -201,24 +188,17 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if
(
!
link
)
goto
out
;
err
=
mthca_READ_MGM
(
dev
,
prev
,
mailbox
,
&
status
);
if
(
err
)
goto
out
;
if
(
status
)
{
mthca_err
(
dev
,
"READ_MGM returned status %02x
\n
"
,
status
);
err
=
-
EINVAL
;
err
=
mthca_READ_MGM
(
dev
,
prev
,
mailbox
);
if
(
err
)
{
mthca_err
(
dev
,
"READ_MGM failed %d
\n
"
,
err
);
goto
out
;
}
mgm
->
next_gid_index
=
cpu_to_be32
(
index
<<
6
);
err
=
mthca_WRITE_MGM
(
dev
,
prev
,
mailbox
,
&
status
);
err
=
mthca_WRITE_MGM
(
dev
,
prev
,
mailbox
);
if
(
err
)
goto
out
;
if
(
status
)
{
mthca_err
(
dev
,
"WRITE_MGM returned status %02x
\n
"
,
status
);
err
=
-
EINVAL
;
}
mthca_err
(
dev
,
"WRITE_MGM returned %d
\n
"
,
err
);
out:
if
(
err
&&
link
&&
index
!=
-
1
)
{
...
...
@@ -240,7 +220,6 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int
prev
,
index
;
int
i
,
loc
;
int
err
;
u8
status
;
mailbox
=
mthca_alloc_mailbox
(
dev
,
GFP_KERNEL
);
if
(
IS_ERR
(
mailbox
))
...
...
@@ -275,12 +254,9 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
mgm
->
qp
[
loc
]
=
mgm
->
qp
[
i
-
1
];
mgm
->
qp
[
i
-
1
]
=
0
;
err
=
mthca_WRITE_MGM
(
dev
,
index
,
mailbox
,
&
status
);
if
(
err
)
goto
out
;
if
(
status
)
{
mthca_err
(
dev
,
"WRITE_MGM returned status %02x
\n
"
,
status
);
err
=
-
EINVAL
;
err
=
mthca_WRITE_MGM
(
dev
,
index
,
mailbox
);
if
(
err
)
{
mthca_err
(
dev
,
"WRITE_MGM returned %d
\n
"
,
err
);
goto
out
;
}
...
...
@@ -292,24 +268,17 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int
amgm_index_to_free
=
be32_to_cpu
(
mgm
->
next_gid_index
)
>>
6
;
if
(
amgm_index_to_free
)
{
err
=
mthca_READ_MGM
(
dev
,
amgm_index_to_free
,
mailbox
,
&
status
);
if
(
err
)
goto
out
;
if
(
status
)
{
mthca_err
(
dev
,
"READ_MGM returned status %02x
\n
"
,
status
);
err
=
-
EINVAL
;
mailbox
);
if
(
err
)
{
mthca_err
(
dev
,
"READ_MGM returned %d
\n
"
,
err
);
goto
out
;
}
}
else
memset
(
mgm
->
gid
,
0
,
16
);
err
=
mthca_WRITE_MGM
(
dev
,
index
,
mailbox
,
&
status
);
if
(
err
)
goto
out
;
if
(
status
)
{
mthca_err
(
dev
,
"WRITE_MGM returned status %02x
\n
"
,
status
);
err
=
-
EINVAL
;
err
=
mthca_WRITE_MGM
(
dev
,
index
,
mailbox
);
if
(
err
)
{
mthca_err
(
dev
,
"WRITE_MGM returned %d
\n
"
,
err
);
goto
out
;
}
if
(
amgm_index_to_free
)
{
...
...
@@ -319,23 +288,17 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
else
{
/* Remove entry from AMGM */
int
curr_next_index
=
be32_to_cpu
(
mgm
->
next_gid_index
)
>>
6
;
err
=
mthca_READ_MGM
(
dev
,
prev
,
mailbox
,
&
status
);
if
(
err
)
goto
out
;
if
(
status
)
{
mthca_err
(
dev
,
"READ_MGM returned status %02x
\n
"
,
status
);
err
=
-
EINVAL
;
err
=
mthca_READ_MGM
(
dev
,
prev
,
mailbox
);
if
(
err
)
{
mthca_err
(
dev
,
"READ_MGM returned %d
\n
"
,
err
);
goto
out
;
}
mgm
->
next_gid_index
=
cpu_to_be32
(
curr_next_index
<<
6
);
err
=
mthca_WRITE_MGM
(
dev
,
prev
,
mailbox
,
&
status
);
if
(
err
)
goto
out
;
if
(
status
)
{
mthca_err
(
dev
,
"WRITE_MGM returned status %02x
\n
"
,
status
);
err
=
-
EINVAL
;
err
=
mthca_WRITE_MGM
(
dev
,
prev
,
mailbox
);
if
(
err
)
{
mthca_err
(
dev
,
"WRITE_MGM returned %d
\n
"
,
err
);
goto
out
;
}
BUG_ON
(
index
<
dev
->
limits
.
num_mgms
);
...
...
drivers/infiniband/hw/mthca/mthca_memfree.c
浏览文件 @
44602075
...
...
@@ -223,7 +223,6 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
{
int
i
=
(
obj
&
(
table
->
num_obj
-
1
))
*
table
->
obj_size
/
MTHCA_TABLE_CHUNK_SIZE
;
int
ret
=
0
;
u8
status
;
mutex_lock
(
&
table
->
mutex
);
...
...
@@ -240,8 +239,8 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
goto
out
;
}
if
(
mthca_MAP_ICM
(
dev
,
table
->
icm
[
i
],
table
->
virt
+
i
*
MTHCA_TABLE_CHUNK_SIZE
,
&
status
)
||
status
)
{
if
(
mthca_MAP_ICM
(
dev
,
table
->
icm
[
i
],
table
->
virt
+
i
*
MTHCA_TABLE_CHUNK_SIZE
)
)
{
mthca_free_icm
(
dev
,
table
->
icm
[
i
],
table
->
coherent
);
table
->
icm
[
i
]
=
NULL
;
ret
=
-
ENOMEM
;
...
...
@@ -258,7 +257,6 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
void
mthca_table_put
(
struct
mthca_dev
*
dev
,
struct
mthca_icm_table
*
table
,
int
obj
)
{
int
i
;
u8
status
;
if
(
!
mthca_is_memfree
(
dev
))
return
;
...
...
@@ -269,8 +267,7 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o
if
(
--
table
->
icm
[
i
]
->
refcount
==
0
)
{
mthca_UNMAP_ICM
(
dev
,
table
->
virt
+
i
*
MTHCA_TABLE_CHUNK_SIZE
,
MTHCA_TABLE_CHUNK_SIZE
/
MTHCA_ICM_PAGE_SIZE
,
&
status
);
MTHCA_TABLE_CHUNK_SIZE
/
MTHCA_ICM_PAGE_SIZE
);
mthca_free_icm
(
dev
,
table
->
icm
[
i
],
table
->
coherent
);
table
->
icm
[
i
]
=
NULL
;
}
...
...
@@ -366,7 +363,6 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
int
num_icm
;
unsigned
chunk_size
;
int
i
;
u8
status
;
obj_per_chunk
=
MTHCA_TABLE_CHUNK_SIZE
/
obj_size
;
num_icm
=
DIV_ROUND_UP
(
nobj
,
obj_per_chunk
);
...
...
@@ -396,8 +392,8 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
__GFP_NOWARN
,
use_coherent
);
if
(
!
table
->
icm
[
i
])
goto
err
;
if
(
mthca_MAP_ICM
(
dev
,
table
->
icm
[
i
],
virt
+
i
*
MTHCA_TABLE_CHUNK_SIZE
,
&
status
)
||
status
)
{
if
(
mthca_MAP_ICM
(
dev
,
table
->
icm
[
i
],
virt
+
i
*
MTHCA_TABLE_CHUNK_SIZE
)
)
{
mthca_free_icm
(
dev
,
table
->
icm
[
i
],
table
->
coherent
);
table
->
icm
[
i
]
=
NULL
;
goto
err
;
...
...
@@ -416,8 +412,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
for
(
i
=
0
;
i
<
num_icm
;
++
i
)
if
(
table
->
icm
[
i
])
{
mthca_UNMAP_ICM
(
dev
,
virt
+
i
*
MTHCA_TABLE_CHUNK_SIZE
,
MTHCA_TABLE_CHUNK_SIZE
/
MTHCA_ICM_PAGE_SIZE
,
&
status
);
MTHCA_TABLE_CHUNK_SIZE
/
MTHCA_ICM_PAGE_SIZE
);
mthca_free_icm
(
dev
,
table
->
icm
[
i
],
table
->
coherent
);
}
...
...
@@ -429,13 +424,12 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
void
mthca_free_icm_table
(
struct
mthca_dev
*
dev
,
struct
mthca_icm_table
*
table
)
{
int
i
;
u8
status
;
for
(
i
=
0
;
i
<
table
->
num_icm
;
++
i
)
if
(
table
->
icm
[
i
])
{
mthca_UNMAP_ICM
(
dev
,
table
->
virt
+
i
*
MTHCA_TABLE_CHUNK_SIZE
,
MTHCA_TABLE_CHUNK_SIZE
/
MTHCA_ICM_PAGE
_SIZE
,
&
status
);
mthca_UNMAP_ICM
(
dev
,
table
->
virt
+
i
*
MTHCA_TABLE_CHUNK
_SIZE
,
MTHCA_TABLE_CHUNK_SIZE
/
MTHCA_ICM_PAGE_SIZE
);
mthca_free_icm
(
dev
,
table
->
icm
[
i
],
table
->
coherent
);
}
...
...
@@ -454,7 +448,6 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
{
struct
page
*
pages
[
1
];
int
ret
=
0
;
u8
status
;
int
i
;
if
(
!
mthca_is_memfree
(
dev
))
...
...
@@ -494,9 +487,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
}
ret
=
mthca_MAP_ICM_page
(
dev
,
sg_dma_address
(
&
db_tab
->
page
[
i
].
mem
),
mthca_uarc_virt
(
dev
,
uar
,
i
),
&
status
);
if
(
!
ret
&&
status
)
ret
=
-
EINVAL
;
mthca_uarc_virt
(
dev
,
uar
,
i
));
if
(
ret
)
{
pci_unmap_sg
(
dev
->
pdev
,
&
db_tab
->
page
[
i
].
mem
,
1
,
PCI_DMA_TODEVICE
);
put_page
(
sg_page
(
&
db_tab
->
page
[
i
].
mem
));
...
...
@@ -557,14 +548,13 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
struct
mthca_user_db_table
*
db_tab
)
{
int
i
;
u8
status
;
if
(
!
mthca_is_memfree
(
dev
))
return
;
for
(
i
=
0
;
i
<
dev
->
uar_table
.
uarc_size
/
MTHCA_ICM_PAGE_SIZE
;
++
i
)
{
if
(
db_tab
->
page
[
i
].
uvirt
)
{
mthca_UNMAP_ICM
(
dev
,
mthca_uarc_virt
(
dev
,
uar
,
i
),
1
,
&
status
);
mthca_UNMAP_ICM
(
dev
,
mthca_uarc_virt
(
dev
,
uar
,
i
),
1
);
pci_unmap_sg
(
dev
->
pdev
,
&
db_tab
->
page
[
i
].
mem
,
1
,
PCI_DMA_TODEVICE
);
put_page
(
sg_page
(
&
db_tab
->
page
[
i
].
mem
));
}
...
...
@@ -581,7 +571,6 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
int
i
,
j
;
struct
mthca_db_page
*
page
;
int
ret
=
0
;
u8
status
;
mutex_lock
(
&
dev
->
db_tab
->
mutex
);
...
...
@@ -644,9 +633,7 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
memset
(
page
->
db_rec
,
0
,
MTHCA_ICM_PAGE_SIZE
);
ret
=
mthca_MAP_ICM_page
(
dev
,
page
->
mapping
,
mthca_uarc_virt
(
dev
,
&
dev
->
driver_uar
,
i
),
&
status
);
if
(
!
ret
&&
status
)
ret
=
-
EINVAL
;
mthca_uarc_virt
(
dev
,
&
dev
->
driver_uar
,
i
));
if
(
ret
)
{
dma_free_coherent
(
&
dev
->
pdev
->
dev
,
MTHCA_ICM_PAGE_SIZE
,
page
->
db_rec
,
page
->
mapping
);
...
...
@@ -678,7 +665,6 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
{
int
i
,
j
;
struct
mthca_db_page
*
page
;
u8
status
;
i
=
db_index
/
MTHCA_DB_REC_PER_PAGE
;
j
=
db_index
%
MTHCA_DB_REC_PER_PAGE
;
...
...
@@ -694,7 +680,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
if
(
bitmap_empty
(
page
->
used
,
MTHCA_DB_REC_PER_PAGE
)
&&
i
>=
dev
->
db_tab
->
max_group1
-
1
)
{
mthca_UNMAP_ICM
(
dev
,
mthca_uarc_virt
(
dev
,
&
dev
->
driver_uar
,
i
),
1
,
&
status
);
mthca_UNMAP_ICM
(
dev
,
mthca_uarc_virt
(
dev
,
&
dev
->
driver_uar
,
i
),
1
);
dma_free_coherent
(
&
dev
->
pdev
->
dev
,
MTHCA_ICM_PAGE_SIZE
,
page
->
db_rec
,
page
->
mapping
);
...
...
@@ -745,7 +731,6 @@ int mthca_init_db_tab(struct mthca_dev *dev)
void
mthca_cleanup_db_tab
(
struct
mthca_dev
*
dev
)
{
int
i
;
u8
status
;
if
(
!
mthca_is_memfree
(
dev
))
return
;
...
...
@@ -763,7 +748,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
if
(
!
bitmap_empty
(
dev
->
db_tab
->
page
[
i
].
used
,
MTHCA_DB_REC_PER_PAGE
))
mthca_warn
(
dev
,
"Kernel UARC page %d not empty
\n
"
,
i
);
mthca_UNMAP_ICM
(
dev
,
mthca_uarc_virt
(
dev
,
&
dev
->
driver_uar
,
i
),
1
,
&
status
);
mthca_UNMAP_ICM
(
dev
,
mthca_uarc_virt
(
dev
,
&
dev
->
driver_uar
,
i
),
1
);
dma_free_coherent
(
&
dev
->
pdev
->
dev
,
MTHCA_ICM_PAGE_SIZE
,
dev
->
db_tab
->
page
[
i
].
db_rec
,
...
...
drivers/infiniband/hw/mthca/mthca_mr.c
浏览文件 @
44602075
...
...
@@ -257,7 +257,6 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
struct
mthca_mailbox
*
mailbox
;
__be64
*
mtt_entry
;
int
err
=
0
;
u8
status
;
int
i
;
mailbox
=
mthca_alloc_mailbox
(
dev
,
GFP_KERNEL
);
...
...
@@ -281,17 +280,11 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
if
(
i
&
1
)
mtt_entry
[
i
+
2
]
=
0
;
err
=
mthca_WRITE_MTT
(
dev
,
mailbox
,
(
i
+
1
)
&
~
1
,
&
status
);
err
=
mthca_WRITE_MTT
(
dev
,
mailbox
,
(
i
+
1
)
&
~
1
);
if
(
err
)
{
mthca_warn
(
dev
,
"WRITE_MTT failed (%d)
\n
"
,
err
);
goto
out
;
}
if
(
status
)
{
mthca_warn
(
dev
,
"WRITE_MTT returned status 0x%02x
\n
"
,
status
);
err
=
-
EINVAL
;
goto
out
;
}
list_len
-=
i
;
start_index
+=
i
;
...
...
@@ -441,7 +434,6 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
u32
key
;
int
i
;
int
err
;
u8
status
;
WARN_ON
(
buffer_size_shift
>=
32
);
...
...
@@ -497,16 +489,10 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
}
err
=
mthca_SW2HW_MPT
(
dev
,
mailbox
,
key
&
(
dev
->
limits
.
num_mpts
-
1
),
&
status
);
key
&
(
dev
->
limits
.
num_mpts
-
1
));
if
(
err
)
{
mthca_warn
(
dev
,
"SW2HW_MPT failed (%d)
\n
"
,
err
);
goto
err_out_mailbox
;
}
else
if
(
status
)
{
mthca_warn
(
dev
,
"SW2HW_MPT returned status 0x%02x
\n
"
,
status
);
err
=
-
EINVAL
;
goto
err_out_mailbox
;
}
mthca_free_mailbox
(
dev
,
mailbox
);
...
...
@@ -567,17 +553,12 @@ static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
void
mthca_free_mr
(
struct
mthca_dev
*
dev
,
struct
mthca_mr
*
mr
)
{
int
err
;
u8
status
;
err
=
mthca_HW2SW_MPT
(
dev
,
NULL
,
key_to_hw_index
(
dev
,
mr
->
ibmr
.
lkey
)
&
(
dev
->
limits
.
num_mpts
-
1
),
&
status
);
(
dev
->
limits
.
num_mpts
-
1
));
if
(
err
)
mthca_warn
(
dev
,
"HW2SW_MPT failed (%d)
\n
"
,
err
);
else
if
(
status
)
mthca_warn
(
dev
,
"HW2SW_MPT returned status 0x%02x
\n
"
,
status
);
mthca_free_region
(
dev
,
mr
->
ibmr
.
lkey
);
mthca_free_mtt
(
dev
,
mr
->
mtt
);
...
...
@@ -590,7 +571,6 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
struct
mthca_mailbox
*
mailbox
;
u64
mtt_seg
;
u32
key
,
idx
;
u8
status
;
int
list_len
=
mr
->
attr
.
max_pages
;
int
err
=
-
ENOMEM
;
int
i
;
...
...
@@ -672,18 +652,11 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
}
err
=
mthca_SW2HW_MPT
(
dev
,
mailbox
,
key
&
(
dev
->
limits
.
num_mpts
-
1
),
&
status
);
key
&
(
dev
->
limits
.
num_mpts
-
1
));
if
(
err
)
{
mthca_warn
(
dev
,
"SW2HW_MPT failed (%d)
\n
"
,
err
);
goto
err_out_mailbox_free
;
}
if
(
status
)
{
mthca_warn
(
dev
,
"SW2HW_MPT returned status 0x%02x
\n
"
,
status
);
err
=
-
EINVAL
;
goto
err_out_mailbox_free
;
}
mthca_free_mailbox
(
dev
,
mailbox
);
return
0
;
...
...
drivers/infiniband/hw/mthca/mthca_provider.c
浏览文件 @
44602075
...
...
@@ -63,8 +63,6 @@ static int mthca_query_device(struct ib_device *ibdev,
int
err
=
-
ENOMEM
;
struct
mthca_dev
*
mdev
=
to_mdev
(
ibdev
);
u8
status
;
in_mad
=
kzalloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
out_mad
=
kmalloc
(
sizeof
*
out_mad
,
GFP_KERNEL
);
if
(
!
in_mad
||
!
out_mad
)
...
...
@@ -78,14 +76,9 @@ static int mthca_query_device(struct ib_device *ibdev,
in_mad
->
attr_id
=
IB_SMP_ATTR_NODE_INFO
;
err
=
mthca_MAD_IFC
(
mdev
,
1
,
1
,
1
,
NULL
,
NULL
,
in_mad
,
out_mad
,
&
status
);
1
,
NULL
,
NULL
,
in_mad
,
out_mad
);
if
(
err
)
goto
out
;
if
(
status
)
{
err
=
-
EINVAL
;
goto
out
;
}
props
->
device_cap_flags
=
mdev
->
device_cap_flags
;
props
->
vendor_id
=
be32_to_cpup
((
__be32
*
)
(
out_mad
->
data
+
36
))
&
...
...
@@ -141,7 +134,6 @@ static int mthca_query_port(struct ib_device *ibdev,
struct
ib_smp
*
in_mad
=
NULL
;
struct
ib_smp
*
out_mad
=
NULL
;
int
err
=
-
ENOMEM
;
u8
status
;
in_mad
=
kzalloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
out_mad
=
kmalloc
(
sizeof
*
out_mad
,
GFP_KERNEL
);
...
...
@@ -155,14 +147,9 @@ static int mthca_query_port(struct ib_device *ibdev,
in_mad
->
attr_mod
=
cpu_to_be32
(
port
);
err
=
mthca_MAD_IFC
(
to_mdev
(
ibdev
),
1
,
1
,
port
,
NULL
,
NULL
,
in_mad
,
out_mad
,
&
status
);
port
,
NULL
,
NULL
,
in_mad
,
out_mad
);
if
(
err
)
goto
out
;
if
(
status
)
{
err
=
-
EINVAL
;
goto
out
;
}
props
->
lid
=
be16_to_cpup
((
__be16
*
)
(
out_mad
->
data
+
16
));
props
->
lmc
=
out_mad
->
data
[
34
]
&
0x7
;
...
...
@@ -214,7 +201,6 @@ static int mthca_modify_port(struct ib_device *ibdev,
struct
mthca_set_ib_param
set_ib
;
struct
ib_port_attr
attr
;
int
err
;
u8
status
;
if
(
mutex_lock_interruptible
(
&
to_mdev
(
ibdev
)
->
cap_mask_mutex
))
return
-
ERESTARTSYS
;
...
...
@@ -229,14 +215,9 @@ static int mthca_modify_port(struct ib_device *ibdev,
set_ib
.
cap_mask
=
(
attr
.
port_cap_flags
|
props
->
set_port_cap_mask
)
&
~
props
->
clr_port_cap_mask
;
err
=
mthca_SET_IB
(
to_mdev
(
ibdev
),
&
set_ib
,
port
,
&
status
);
err
=
mthca_SET_IB
(
to_mdev
(
ibdev
),
&
set_ib
,
port
);
if
(
err
)
goto
out
;
if
(
status
)
{
err
=
-
EINVAL
;
goto
out
;
}
out:
mutex_unlock
(
&
to_mdev
(
ibdev
)
->
cap_mask_mutex
);
return
err
;
...
...
@@ -248,7 +229,6 @@ static int mthca_query_pkey(struct ib_device *ibdev,
struct
ib_smp
*
in_mad
=
NULL
;
struct
ib_smp
*
out_mad
=
NULL
;
int
err
=
-
ENOMEM
;
u8
status
;
in_mad
=
kzalloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
out_mad
=
kmalloc
(
sizeof
*
out_mad
,
GFP_KERNEL
);
...
...
@@ -260,14 +240,9 @@ static int mthca_query_pkey(struct ib_device *ibdev,
in_mad
->
attr_mod
=
cpu_to_be32
(
index
/
32
);
err
=
mthca_MAD_IFC
(
to_mdev
(
ibdev
),
1
,
1
,
port
,
NULL
,
NULL
,
in_mad
,
out_mad
,
&
status
);
port
,
NULL
,
NULL
,
in_mad
,
out_mad
);
if
(
err
)
goto
out
;
if
(
status
)
{
err
=
-
EINVAL
;
goto
out
;
}
*
pkey
=
be16_to_cpu
(((
__be16
*
)
out_mad
->
data
)[
index
%
32
]);
...
...
@@ -283,7 +258,6 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
struct
ib_smp
*
in_mad
=
NULL
;
struct
ib_smp
*
out_mad
=
NULL
;
int
err
=
-
ENOMEM
;
u8
status
;
in_mad
=
kzalloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
out_mad
=
kmalloc
(
sizeof
*
out_mad
,
GFP_KERNEL
);
...
...
@@ -295,14 +269,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
in_mad
->
attr_mod
=
cpu_to_be32
(
port
);
err
=
mthca_MAD_IFC
(
to_mdev
(
ibdev
),
1
,
1
,
port
,
NULL
,
NULL
,
in_mad
,
out_mad
,
&
status
);
port
,
NULL
,
NULL
,
in_mad
,
out_mad
);
if
(
err
)
goto
out
;
if
(
status
)
{
err
=
-
EINVAL
;
goto
out
;
}
memcpy
(
gid
->
raw
,
out_mad
->
data
+
8
,
8
);
...
...
@@ -311,14 +280,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
in_mad
->
attr_mod
=
cpu_to_be32
(
index
/
8
);
err
=
mthca_MAD_IFC
(
to_mdev
(
ibdev
),
1
,
1
,
port
,
NULL
,
NULL
,
in_mad
,
out_mad
,
&
status
);
port
,
NULL
,
NULL
,
in_mad
,
out_mad
);
if
(
err
)
goto
out
;
if
(
status
)
{
err
=
-
EINVAL
;
goto
out
;
}
memcpy
(
gid
->
raw
+
8
,
out_mad
->
data
+
(
index
%
8
)
*
8
,
8
);
...
...
@@ -800,7 +764,6 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
struct
mthca_cq
*
cq
=
to_mcq
(
ibcq
);
struct
mthca_resize_cq
ucmd
;
u32
lkey
;
u8
status
;
int
ret
;
if
(
entries
<
1
||
entries
>
dev
->
limits
.
max_cqes
)
...
...
@@ -827,9 +790,7 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
lkey
=
ucmd
.
lkey
;
}
ret
=
mthca_RESIZE_CQ
(
dev
,
cq
->
cqn
,
lkey
,
ilog2
(
entries
),
&
status
);
if
(
status
)
ret
=
-
EINVAL
;
ret
=
mthca_RESIZE_CQ
(
dev
,
cq
->
cqn
,
lkey
,
ilog2
(
entries
));
if
(
ret
)
{
if
(
cq
->
resize_buf
)
{
...
...
@@ -1161,7 +1122,6 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
{
struct
ib_fmr
*
fmr
;
int
err
;
u8
status
;
struct
mthca_dev
*
mdev
=
NULL
;
list_for_each_entry
(
fmr
,
fmr_list
,
list
)
{
...
...
@@ -1182,12 +1142,8 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
list_for_each_entry
(
fmr
,
fmr_list
,
list
)
mthca_tavor_fmr_unmap
(
mdev
,
to_mfmr
(
fmr
));
err
=
mthca_SYNC_TPT
(
mdev
,
&
status
);
if
(
err
)
return
err
;
if
(
status
)
return
-
EINVAL
;
return
0
;
err
=
mthca_SYNC_TPT
(
mdev
);
return
err
;
}
static
ssize_t
show_rev
(
struct
device
*
device
,
struct
device_attribute
*
attr
,
...
...
@@ -1253,7 +1209,6 @@ static int mthca_init_node_data(struct mthca_dev *dev)
struct
ib_smp
*
in_mad
=
NULL
;
struct
ib_smp
*
out_mad
=
NULL
;
int
err
=
-
ENOMEM
;
u8
status
;
in_mad
=
kzalloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
out_mad
=
kmalloc
(
sizeof
*
out_mad
,
GFP_KERNEL
);
...
...
@@ -1264,28 +1219,18 @@ static int mthca_init_node_data(struct mthca_dev *dev)
in_mad
->
attr_id
=
IB_SMP_ATTR_NODE_DESC
;
err
=
mthca_MAD_IFC
(
dev
,
1
,
1
,
1
,
NULL
,
NULL
,
in_mad
,
out_mad
,
&
status
);
1
,
NULL
,
NULL
,
in_mad
,
out_mad
);
if
(
err
)
goto
out
;
if
(
status
)
{
err
=
-
EINVAL
;
goto
out
;
}
memcpy
(
dev
->
ib_dev
.
node_desc
,
out_mad
->
data
,
64
);
in_mad
->
attr_id
=
IB_SMP_ATTR_NODE_INFO
;
err
=
mthca_MAD_IFC
(
dev
,
1
,
1
,
1
,
NULL
,
NULL
,
in_mad
,
out_mad
,
&
status
);
1
,
NULL
,
NULL
,
in_mad
,
out_mad
);
if
(
err
)
goto
out
;
if
(
status
)
{
err
=
-
EINVAL
;
goto
out
;
}
if
(
mthca_is_memfree
(
dev
))
dev
->
rev_id
=
be32_to_cpup
((
__be32
*
)
(
out_mad
->
data
+
32
));
...
...
drivers/infiniband/hw/mthca/mthca_qp.c
浏览文件 @
44602075
...
...
@@ -308,7 +308,6 @@ static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
static
void
init_port
(
struct
mthca_dev
*
dev
,
int
port
)
{
int
err
;
u8
status
;
struct
mthca_init_ib_param
param
;
memset
(
&
param
,
0
,
sizeof
param
);
...
...
@@ -319,11 +318,9 @@ static void init_port(struct mthca_dev *dev, int port)
param
.
gid_cap
=
dev
->
limits
.
gid_table_len
;
param
.
pkey_cap
=
dev
->
limits
.
pkey_table_len
;
err
=
mthca_INIT_IB
(
dev
,
&
param
,
port
,
&
status
);
err
=
mthca_INIT_IB
(
dev
,
&
param
,
port
);
if
(
err
)
mthca_warn
(
dev
,
"INIT_IB failed, return code %d.
\n
"
,
err
);
if
(
status
)
mthca_warn
(
dev
,
"INIT_IB returned status %02x.
\n
"
,
status
);
}
static
__be32
get_hw_access_flags
(
struct
mthca_qp
*
qp
,
const
struct
ib_qp_attr
*
attr
,
...
...
@@ -433,7 +430,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
struct
mthca_qp_param
*
qp_param
;
struct
mthca_qp_context
*
context
;
int
mthca_state
;
u8
status
;
mutex_lock
(
&
qp
->
mutex
);
...
...
@@ -448,12 +444,9 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
goto
out
;
}
err
=
mthca_QUERY_QP
(
dev
,
qp
->
qpn
,
0
,
mailbox
,
&
status
);
if
(
err
)
goto
out_mailbox
;
if
(
status
)
{
mthca_warn
(
dev
,
"QUERY_QP returned status %02x
\n
"
,
status
);
err
=
-
EINVAL
;
err
=
mthca_QUERY_QP
(
dev
,
qp
->
qpn
,
0
,
mailbox
);
if
(
err
)
{
mthca_warn
(
dev
,
"QUERY_QP failed (%d)
\n
"
,
err
);
goto
out_mailbox
;
}
...
...
@@ -555,7 +548,6 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
struct
mthca_qp_param
*
qp_param
;
struct
mthca_qp_context
*
qp_context
;
u32
sqd_event
=
0
;
u8
status
;
int
err
=
-
EINVAL
;
mailbox
=
mthca_alloc_mailbox
(
dev
,
GFP_KERNEL
);
...
...
@@ -781,13 +773,10 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
sqd_event
=
1
<<
31
;
err
=
mthca_MODIFY_QP
(
dev
,
cur_state
,
new_state
,
qp
->
qpn
,
0
,
mailbox
,
sqd_event
,
&
status
);
if
(
err
)
goto
out_mailbox
;
if
(
status
)
{
mthca_warn
(
dev
,
"modify QP %d->%d returned status %02x.
\n
"
,
cur_state
,
new_state
,
status
);
err
=
-
EINVAL
;
mailbox
,
sqd_event
);
if
(
err
)
{
mthca_warn
(
dev
,
"modify QP %d->%d returned %d.
\n
"
,
cur_state
,
new_state
,
err
);
goto
out_mailbox
;
}
...
...
@@ -817,7 +806,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
cur_state
!=
IB_QPS_ERR
&&
(
new_state
==
IB_QPS_RESET
||
new_state
==
IB_QPS_ERR
))
mthca_CLOSE_IB
(
dev
,
qp
->
port
,
&
status
);
mthca_CLOSE_IB
(
dev
,
qp
->
port
);
}
/*
...
...
@@ -1429,7 +1418,6 @@ static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
void
mthca_free_qp
(
struct
mthca_dev
*
dev
,
struct
mthca_qp
*
qp
)
{
u8
status
;
struct
mthca_cq
*
send_cq
;
struct
mthca_cq
*
recv_cq
;
...
...
@@ -1454,7 +1442,7 @@ void mthca_free_qp(struct mthca_dev *dev,
if
(
qp
->
state
!=
IB_QPS_RESET
)
mthca_MODIFY_QP
(
dev
,
qp
->
state
,
IB_QPS_RESET
,
qp
->
qpn
,
0
,
NULL
,
0
,
&
status
);
NULL
,
0
);
/*
* If this is a userspace QP, the buffers, MR, CQs and so on
...
...
@@ -2263,7 +2251,6 @@ void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
int
mthca_init_qp_table
(
struct
mthca_dev
*
dev
)
{
int
err
;
u8
status
;
int
i
;
spin_lock_init
(
&
dev
->
qp_table
.
lock
);
...
...
@@ -2290,15 +2277,10 @@ int mthca_init_qp_table(struct mthca_dev *dev)
for
(
i
=
0
;
i
<
2
;
++
i
)
{
err
=
mthca_CONF_SPECIAL_QP
(
dev
,
i
?
IB_QPT_GSI
:
IB_QPT_SMI
,
dev
->
qp_table
.
sqp_start
+
i
*
2
,
&
status
);
if
(
err
)
goto
err_out
;
if
(
status
)
{
dev
->
qp_table
.
sqp_start
+
i
*
2
);
if
(
err
)
{
mthca_warn
(
dev
,
"CONF_SPECIAL_QP returned "
"status %02x, aborting.
\n
"
,
status
);
err
=
-
EINVAL
;
"%d, aborting.
\n
"
,
err
);
goto
err_out
;
}
}
...
...
@@ -2306,7 +2288,7 @@ int mthca_init_qp_table(struct mthca_dev *dev)
err_out:
for
(
i
=
0
;
i
<
2
;
++
i
)
mthca_CONF_SPECIAL_QP
(
dev
,
i
,
0
,
&
status
);
mthca_CONF_SPECIAL_QP
(
dev
,
i
,
0
);
mthca_array_cleanup
(
&
dev
->
qp_table
.
qp
,
dev
->
limits
.
num_qps
);
mthca_alloc_cleanup
(
&
dev
->
qp_table
.
alloc
);
...
...
@@ -2317,10 +2299,9 @@ int mthca_init_qp_table(struct mthca_dev *dev)
void
mthca_cleanup_qp_table
(
struct
mthca_dev
*
dev
)
{
int
i
;
u8
status
;
for
(
i
=
0
;
i
<
2
;
++
i
)
mthca_CONF_SPECIAL_QP
(
dev
,
i
,
0
,
&
status
);
mthca_CONF_SPECIAL_QP
(
dev
,
i
,
0
);
mthca_array_cleanup
(
&
dev
->
qp_table
.
qp
,
dev
->
limits
.
num_qps
);
mthca_alloc_cleanup
(
&
dev
->
qp_table
.
alloc
);
...
...
drivers/infiniband/hw/mthca/mthca_reset.c
浏览文件 @
44602075
...
...
@@ -113,7 +113,7 @@ int mthca_reset(struct mthca_dev *mdev)
}
hca_pcix_cap
=
pci_find_capability
(
mdev
->
pdev
,
PCI_CAP_ID_PCIX
);
hca_pcie_cap
=
pci_
find_capability
(
mdev
->
pdev
,
PCI_CAP_ID_EXP
);
hca_pcie_cap
=
pci_
pcie_cap
(
mdev
->
pdev
);
if
(
bridge
)
{
bridge_header
=
kmalloc
(
256
,
GFP_KERNEL
);
...
...
drivers/infiniband/hw/mthca/mthca_srq.c
浏览文件 @
44602075
...
...
@@ -200,7 +200,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
struct
ib_srq_attr
*
attr
,
struct
mthca_srq
*
srq
)
{
struct
mthca_mailbox
*
mailbox
;
u8
status
;
int
ds
;
int
err
;
...
...
@@ -266,18 +265,12 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
else
mthca_tavor_init_srq_context
(
dev
,
pd
,
srq
,
mailbox
->
buf
);
err
=
mthca_SW2HW_SRQ
(
dev
,
mailbox
,
srq
->
srqn
,
&
status
);
err
=
mthca_SW2HW_SRQ
(
dev
,
mailbox
,
srq
->
srqn
);
if
(
err
)
{
mthca_warn
(
dev
,
"SW2HW_SRQ failed (%d)
\n
"
,
err
);
goto
err_out_free_buf
;
}
if
(
status
)
{
mthca_warn
(
dev
,
"SW2HW_SRQ returned status 0x%02x
\n
"
,
status
);
err
=
-
EINVAL
;
goto
err_out_free_buf
;
}
spin_lock_irq
(
&
dev
->
srq_table
.
lock
);
if
(
mthca_array_set
(
&
dev
->
srq_table
.
srq
,
...
...
@@ -299,11 +292,9 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
return
0
;
err_out_free_srq:
err
=
mthca_HW2SW_SRQ
(
dev
,
mailbox
,
srq
->
srqn
,
&
status
);
err
=
mthca_HW2SW_SRQ
(
dev
,
mailbox
,
srq
->
srqn
);
if
(
err
)
mthca_warn
(
dev
,
"HW2SW_SRQ failed (%d)
\n
"
,
err
);
else
if
(
status
)
mthca_warn
(
dev
,
"HW2SW_SRQ returned status 0x%02x
\n
"
,
status
);
err_out_free_buf:
if
(
!
pd
->
ibpd
.
uobject
)
...
...
@@ -340,7 +331,6 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
{
struct
mthca_mailbox
*
mailbox
;
int
err
;
u8
status
;
mailbox
=
mthca_alloc_mailbox
(
dev
,
GFP_KERNEL
);
if
(
IS_ERR
(
mailbox
))
{
...
...
@@ -348,11 +338,9 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
return
;
}
err
=
mthca_HW2SW_SRQ
(
dev
,
mailbox
,
srq
->
srqn
,
&
status
);
err
=
mthca_HW2SW_SRQ
(
dev
,
mailbox
,
srq
->
srqn
);
if
(
err
)
mthca_warn
(
dev
,
"HW2SW_SRQ failed (%d)
\n
"
,
err
);
else
if
(
status
)
mthca_warn
(
dev
,
"HW2SW_SRQ returned status 0x%02x
\n
"
,
status
);
spin_lock_irq
(
&
dev
->
srq_table
.
lock
);
mthca_array_clear
(
&
dev
->
srq_table
.
srq
,
...
...
@@ -378,8 +366,7 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
{
struct
mthca_dev
*
dev
=
to_mdev
(
ibsrq
->
device
);
struct
mthca_srq
*
srq
=
to_msrq
(
ibsrq
);
int
ret
;
u8
status
;
int
ret
=
0
;
/* We don't support resizing SRQs (yet?) */
if
(
attr_mask
&
IB_SRQ_MAX_WR
)
...
...
@@ -391,16 +378,11 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return
-
EINVAL
;
mutex_lock
(
&
srq
->
mutex
);
ret
=
mthca_ARM_SRQ
(
dev
,
srq
->
srqn
,
attr
->
srq_limit
,
&
status
);
ret
=
mthca_ARM_SRQ
(
dev
,
srq
->
srqn
,
attr
->
srq_limit
);
mutex_unlock
(
&
srq
->
mutex
);
if
(
ret
)
return
ret
;
if
(
status
)
return
-
EINVAL
;
}
return
0
;
return
ret
;
}
int
mthca_query_srq
(
struct
ib_srq
*
ibsrq
,
struct
ib_srq_attr
*
srq_attr
)
...
...
@@ -410,14 +392,13 @@ int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
struct
mthca_mailbox
*
mailbox
;
struct
mthca_arbel_srq_context
*
arbel_ctx
;
struct
mthca_tavor_srq_context
*
tavor_ctx
;
u8
status
;
int
err
;
mailbox
=
mthca_alloc_mailbox
(
dev
,
GFP_KERNEL
);
if
(
IS_ERR
(
mailbox
))
return
PTR_ERR
(
mailbox
);
err
=
mthca_QUERY_SRQ
(
dev
,
srq
->
srqn
,
mailbox
,
&
status
);
err
=
mthca_QUERY_SRQ
(
dev
,
srq
->
srqn
,
mailbox
);
if
(
err
)
goto
out
;
...
...
drivers/infiniband/hw/nes/nes_verbs.c
浏览文件 @
44602075
...
...
@@ -604,16 +604,6 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
}
/**
* nes_modify_port
*/
static
int
nes_modify_port
(
struct
ib_device
*
ibdev
,
u8
port
,
int
port_modify_mask
,
struct
ib_port_modify
*
props
)
{
return
0
;
}
/**
* nes_query_pkey
*/
...
...
@@ -3882,7 +3872,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev
->
ibdev
.
dev
.
parent
=
&
nesdev
->
pcidev
->
dev
;
nesibdev
->
ibdev
.
query_device
=
nes_query_device
;
nesibdev
->
ibdev
.
query_port
=
nes_query_port
;
nesibdev
->
ibdev
.
modify_port
=
nes_modify_port
;
nesibdev
->
ibdev
.
query_pkey
=
nes_query_pkey
;
nesibdev
->
ibdev
.
query_gid
=
nes_query_gid
;
nesibdev
->
ibdev
.
alloc_ucontext
=
nes_alloc_ucontext
;
...
...
drivers/infiniband/hw/qib/qib.h
浏览文件 @
44602075
...
...
@@ -1012,6 +1012,8 @@ struct qib_devdata {
u8
psxmitwait_supported
;
/* cycle length of PS* counters in HW (in picoseconds) */
u16
psxmitwait_check_rate
;
/* high volume overflow errors defered to tasklet */
struct
tasklet_struct
error_tasklet
;
};
/* hol_state values */
...
...
@@ -1433,6 +1435,7 @@ extern struct mutex qib_mutex;
struct
qib_hwerror_msgs
{
u64
mask
;
const
char
*
msg
;
size_t
sz
;
};
#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
...
...
drivers/infiniband/hw/qib/qib_file_ops.c
浏览文件 @
44602075
...
...
@@ -1527,6 +1527,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
struct
qib_filedata
*
fd
=
fp
->
private_data
;
const
struct
qib_ctxtdata
*
rcd
=
fd
->
rcd
;
const
struct
qib_devdata
*
dd
=
rcd
->
dd
;
unsigned
int
weight
;
if
(
dd
->
flags
&
QIB_HAS_SEND_DMA
)
{
fd
->
pq
=
qib_user_sdma_queue_create
(
&
dd
->
pcidev
->
dev
,
...
...
@@ -1545,8 +1546,8 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
* it just means that sooner or later we don't recommend
* a cpu, and let the scheduler do it's best.
*/
if
(
!
ret
&&
cpus_weight
(
current
->
cpus_allowed
)
>=
qib_cpulist_count
)
{
weight
=
cpumask_weight
(
tsk_cpus_allowed
(
current
));
if
(
!
ret
&&
weight
>=
qib_cpulist_count
)
{
int
cpu
;
cpu
=
find_first_zero_bit
(
qib_cpulist
,
qib_cpulist_count
);
...
...
@@ -1554,13 +1555,13 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
__set_bit
(
cpu
,
qib_cpulist
);
fd
->
rec_cpu_num
=
cpu
;
}
}
else
if
(
cpus_weight
(
current
->
cpus_allowed
)
==
1
&&
test_bit
(
first_cpu
(
current
->
cpus_allowed
),
}
else
if
(
weight
==
1
&&
test_bit
(
cpumask_first
(
tsk_cpus_allowed
(
current
)
),
qib_cpulist
))
qib_devinfo
(
dd
->
pcidev
,
"%s PID %u affinity "
"set to cpu %d; already allocated
\n
"
,
current
->
comm
,
current
->
pid
,
first_cpu
(
current
->
cpus_allowed
));
cpumask_first
(
tsk_cpus_allowed
(
current
)
));
}
mutex_unlock
(
&
qib_mutex
);
...
...
@@ -1904,8 +1905,9 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
struct
qib_ctxtdata
*
rcd
;
unsigned
ctxt
;
int
ret
=
0
;
unsigned
long
flags
;
spin_lock
(
&
ppd
->
dd
->
uctxt_lock
);
spin_lock
_irqsave
(
&
ppd
->
dd
->
uctxt_lock
,
flags
);
for
(
ctxt
=
ppd
->
dd
->
first_user_ctxt
;
ctxt
<
ppd
->
dd
->
cfgctxts
;
ctxt
++
)
{
rcd
=
ppd
->
dd
->
rcd
[
ctxt
];
...
...
@@ -1924,7 +1926,7 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
ret
=
1
;
break
;
}
spin_unlock
(
&
ppd
->
dd
->
uctxt_lock
);
spin_unlock
_irqrestore
(
&
ppd
->
dd
->
uctxt_lock
,
flags
);
return
ret
;
}
...
...
drivers/infiniband/hw/qib/qib_iba7220.c
浏览文件 @
44602075
...
...
@@ -2434,6 +2434,7 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
int
lsb
,
ret
=
0
,
setforce
=
0
;
u16
lcmd
,
licmd
;
unsigned
long
flags
;
u32
tmp
=
0
;
switch
(
which
)
{
case
QIB_IB_CFG_LIDLMC
:
...
...
@@ -2467,9 +2468,6 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
maskr
=
IBA7220_IBC_WIDTH_MASK
;
lsb
=
IBA7220_IBC_WIDTH_SHIFT
;
setforce
=
1
;
spin_lock_irqsave
(
&
ppd
->
lflags_lock
,
flags
);
ppd
->
lflags
|=
QIBL_IB_FORCE_NOTIFY
;
spin_unlock_irqrestore
(
&
ppd
->
lflags_lock
,
flags
);
break
;
case
QIB_IB_CFG_SPD_ENB
:
/* set allowed Link speeds */
...
...
@@ -2643,6 +2641,28 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
goto
bail
;
}
qib_set_ib_7220_lstate
(
ppd
,
lcmd
,
licmd
);
maskr
=
IBA7220_IBC_WIDTH_MASK
;
lsb
=
IBA7220_IBC_WIDTH_SHIFT
;
tmp
=
(
ppd
->
cpspec
->
ibcddrctrl
>>
lsb
)
&
maskr
;
/* If the width active on the chip does not match the
* width in the shadow register, write the new active
* width to the chip.
* We don't have to worry about speed as the speed is taken
* care of by set_7220_ibspeed_fast called by ib_updown.
*/
if
(
ppd
->
link_width_enabled
-
1
!=
tmp
)
{
ppd
->
cpspec
->
ibcddrctrl
&=
~
(
maskr
<<
lsb
);
ppd
->
cpspec
->
ibcddrctrl
|=
(((
u64
)(
ppd
->
link_width_enabled
-
1
)
&
maskr
)
<<
lsb
);
qib_write_kreg
(
dd
,
kr_ibcddrctrl
,
ppd
->
cpspec
->
ibcddrctrl
);
qib_write_kreg
(
dd
,
kr_scratch
,
0
);
spin_lock_irqsave
(
&
ppd
->
lflags_lock
,
flags
);
ppd
->
lflags
|=
QIBL_IB_FORCE_NOTIFY
;
spin_unlock_irqrestore
(
&
ppd
->
lflags_lock
,
flags
);
}
goto
bail
;
case
QIB_IB_CFG_HRTBT
:
/* set Heartbeat off/enable/auto */
...
...
drivers/infiniband/hw/qib/qib_iba7322.c
浏览文件 @
44602075
...
...
@@ -114,6 +114,10 @@ static ushort qib_singleport;
module_param_named
(
singleport
,
qib_singleport
,
ushort
,
S_IRUGO
);
MODULE_PARM_DESC
(
singleport
,
"Use only IB port 1; more per-port buffer space"
);
static
ushort
qib_krcvq01_no_msi
;
module_param_named
(
krcvq01_no_msi
,
qib_krcvq01_no_msi
,
ushort
,
S_IRUGO
);
MODULE_PARM_DESC
(
krcvq01_no_msi
,
"No MSI for kctx < 2"
);
/*
* Receive header queue sizes
*/
...
...
@@ -397,7 +401,6 @@ MODULE_PARM_DESC(txselect, \
#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
#define crp_txlenerr CREG_IDX(TxLenErrCnt)
#define crp_txlenerr CREG_IDX(TxLenErrCnt)
#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
...
...
@@ -1107,9 +1110,9 @@ static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
#define AUTONEG_TRIES 3
/* sequential retries to negotiate DDR */
#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
.msg = #fldname }
.msg = #fldname
, .sz = sizeof(#fldname)
}
#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
fldname##Mask##_##port), .msg = #fldname }
fldname##Mask##_##port), .msg = #fldname
, .sz = sizeof(#fldname)
}
static
const
struct
qib_hwerror_msgs
qib_7322_hwerror_msgs
[]
=
{
HWE_AUTO_P
(
IBSerdesPClkNotDetect
,
1
),
HWE_AUTO_P
(
IBSerdesPClkNotDetect
,
0
),
...
...
@@ -1127,14 +1130,16 @@ static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
HWE_AUTO_P
(
IBCBusFromSPCParityErr
,
0
),
HWE_AUTO
(
statusValidNoEop
),
HWE_AUTO
(
LATriggered
),
{
.
mask
=
0
}
{
.
mask
=
0
,
.
sz
=
0
}
};
#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
.msg = #fldname }
.msg = #fldname
, .sz = sizeof(#fldname)
}
#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
.msg = #fldname }
.msg = #fldname
, .sz = sizeof(#fldname)
}
static
const
struct
qib_hwerror_msgs
qib_7322error_msgs
[]
=
{
E_AUTO
(
RcvEgrFullErr
),
E_AUTO
(
RcvHdrFullErr
),
E_AUTO
(
ResetNegated
),
E_AUTO
(
HardwareErr
),
E_AUTO
(
InvalidAddrErr
),
...
...
@@ -1147,9 +1152,7 @@ static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
E_AUTO
(
SendSpecialTriggerErr
),
E_AUTO
(
SDmaWrongPortErr
),
E_AUTO
(
SDmaBufMaskDuplicateErr
),
E_AUTO
(
RcvHdrFullErr
),
E_AUTO
(
RcvEgrFullErr
),
{
.
mask
=
0
}
{
.
mask
=
0
,
.
sz
=
0
}
};
static
const
struct
qib_hwerror_msgs
qib_7322p_error_msgs
[]
=
{
...
...
@@ -1159,7 +1162,8 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
/*
* SDmaHaltErr is not really an error, make it clearer;
*/
{.
mask
=
SYM_MASK
(
ErrMask_0
,
SDmaHaltErrMask
),
.
msg
=
"SDmaHalted"
},
{.
mask
=
SYM_MASK
(
ErrMask_0
,
SDmaHaltErrMask
),
.
msg
=
"SDmaHalted"
,
.
sz
=
11
},
E_P_AUTO
(
SDmaDescAddrMisalignErr
),
E_P_AUTO
(
SDmaUnexpDataErr
),
E_P_AUTO
(
SDmaMissingDwErr
),
...
...
@@ -1195,7 +1199,7 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
E_P_AUTO
(
RcvICRCErr
),
E_P_AUTO
(
RcvVCRCErr
),
E_P_AUTO
(
RcvFormatErr
),
{
.
mask
=
0
}
{
.
mask
=
0
,
.
sz
=
0
}
};
/*
...
...
@@ -1203,17 +1207,17 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
* context
*/
#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
.msg = #fldname }
.msg = #fldname
, .sz = sizeof(#fldname)
}
/* Below generates "auto-message" for interrupts specific to a port */
#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_0), \
SYM_LSB(IntMask, fldname##Mask##_1)), \
.msg = #fldname "_P" }
.msg = #fldname "_P"
, .sz = sizeof(#fldname "_P")
}
/* For some reason, the SerDesTrimDone bits are reversed */
#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_1), \
SYM_LSB(IntMask, fldname##Mask##_0)), \
.msg = #fldname "_P" }
.msg = #fldname "_P"
, .sz = sizeof(#fldname "_P")
}
/*
* Below generates "auto-message" for interrupts specific to a context,
* with ctxt-number appended
...
...
@@ -1221,7 +1225,7 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##0IntMask), \
SYM_LSB(IntMask, fldname##17IntMask)), \
.msg = #fldname "_C"}
.msg = #fldname "_C"
, .sz = sizeof(#fldname "_C")
}
static
const
struct
qib_hwerror_msgs
qib_7322_intr_msgs
[]
=
{
INTR_AUTO_P
(
SDmaInt
),
...
...
@@ -1235,11 +1239,12 @@ static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
INTR_AUTO_P
(
SendDoneInt
),
INTR_AUTO
(
SendBufAvailInt
),
INTR_AUTO_C
(
RcvAvail
),
{
.
mask
=
0
}
{
.
mask
=
0
,
.
sz
=
0
}
};
#define TXSYMPTOM_AUTO_P(fldname) \
{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
.msg = #fldname, .sz = sizeof(#fldname) }
static
const
struct
qib_hwerror_msgs
hdrchk_msgs
[]
=
{
TXSYMPTOM_AUTO_P
(
NonKeyPacket
),
TXSYMPTOM_AUTO_P
(
GRHFail
),
...
...
@@ -1248,7 +1253,7 @@ static const struct qib_hwerror_msgs hdrchk_msgs[] = {
TXSYMPTOM_AUTO_P
(
SLIDFail
),
TXSYMPTOM_AUTO_P
(
RawIPV6
),
TXSYMPTOM_AUTO_P
(
PacketTooSmall
),
{
.
mask
=
0
}
{
.
mask
=
0
,
.
sz
=
0
}
};
#define IBA7322_HDRHEAD_PKTINT_SHIFT 32
/* interrupt cnt in upper 32 bits */
...
...
@@ -1293,7 +1298,7 @@ static void err_decode(char *msg, size_t len, u64 errs,
u64
these
,
lmask
;
int
took
,
multi
,
n
=
0
;
while
(
msp
&&
msp
->
mask
)
{
while
(
errs
&&
msp
&&
msp
->
mask
)
{
multi
=
(
msp
->
mask
&
(
msp
->
mask
-
1
));
while
(
errs
&
msp
->
mask
)
{
these
=
(
errs
&
msp
->
mask
);
...
...
@@ -1304,9 +1309,14 @@ static void err_decode(char *msg, size_t len, u64 errs,
*
msg
++
=
','
;
len
--
;
}
took
=
scnprintf
(
msg
,
len
,
"%s"
,
msp
->
msg
);
BUG_ON
(
!
msp
->
sz
);
/* msp->sz counts the nul */
took
=
min_t
(
size_t
,
msp
->
sz
-
(
size_t
)
1
,
len
);
memcpy
(
msg
,
msp
->
msg
,
took
);
len
-=
took
;
msg
+=
took
;
if
(
len
)
*
msg
=
'\0'
;
}
errs
&=
~
lmask
;
if
(
len
&&
multi
)
{
...
...
@@ -1644,6 +1654,14 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
return
;
}
static
void
qib_error_tasklet
(
unsigned
long
data
)
{
struct
qib_devdata
*
dd
=
(
struct
qib_devdata
*
)
data
;
handle_7322_errors
(
dd
);
qib_write_kreg
(
dd
,
kr_errmask
,
dd
->
cspec
->
errormask
);
}
static
void
reenable_chase
(
unsigned
long
opaque
)
{
struct
qib_pportdata
*
ppd
=
(
struct
qib_pportdata
*
)
opaque
;
...
...
@@ -2725,8 +2743,10 @@ static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
unknown_7322_ibits
(
dd
,
istat
);
if
(
istat
&
QIB_I_GPIO
)
unknown_7322_gpio_intr
(
dd
);
if
(
istat
&
QIB_I_C_ERROR
)
handle_7322_errors
(
dd
);
if
(
istat
&
QIB_I_C_ERROR
)
{
qib_write_kreg
(
dd
,
kr_errmask
,
0ULL
);
tasklet_schedule
(
&
dd
->
error_tasklet
);
}
if
(
istat
&
INT_MASK_P
(
Err
,
0
)
&&
dd
->
rcd
[
0
])
handle_7322_p_errors
(
dd
->
rcd
[
0
]
->
ppd
);
if
(
istat
&
INT_MASK_P
(
Err
,
1
)
&&
dd
->
rcd
[
1
])
...
...
@@ -3125,6 +3145,8 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
arg
=
dd
->
rcd
[
ctxt
];
if
(
!
arg
)
continue
;
if
(
qib_krcvq01_no_msi
&&
ctxt
<
2
)
continue
;
lsb
=
QIB_I_RCVAVAIL_LSB
+
ctxt
;
handler
=
qib_7322pintr
;
name
=
QIB_DRV_NAME
" (kctx)"
;
...
...
@@ -3159,6 +3181,8 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
redirect
);
i
++
)
qib_write_kreg
(
dd
,
kr_intredirect
+
i
,
redirect
[
i
]);
dd
->
cspec
->
main_int_mask
=
mask
;
tasklet_init
(
&
dd
->
error_tasklet
,
qib_error_tasklet
,
(
unsigned
long
)
dd
);
bail:
;
}
...
...
@@ -6788,6 +6812,10 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
(
i
>=
ARRAY_SIZE
(
irq_table
)
&&
dd
->
rcd
[
i
-
ARRAY_SIZE
(
irq_table
)]))
actual_cnt
++
;
/* reduce by ctxt's < 2 */
if
(
qib_krcvq01_no_msi
)
actual_cnt
-=
dd
->
num_pports
;
tabsize
=
actual_cnt
;
dd
->
cspec
->
msix_entries
=
kmalloc
(
tabsize
*
sizeof
(
struct
msix_entry
),
GFP_KERNEL
);
...
...
drivers/infiniband/hw/qib/qib_mad.c
浏览文件 @
44602075
...
...
@@ -1125,22 +1125,22 @@ static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
return
IB_MAD_RESULT_SUCCESS
|
IB_MAD_RESULT_CONSUMED
;
}
static
int
pma_get_classportinfo
(
struct
ib_p
erf
*
pmp
,
static
int
pma_get_classportinfo
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
)
{
struct
ib_
pma_classport
info
*
p
=
(
struct
ib_
pma_classport
info
*
)
pmp
->
data
;
struct
ib_
class_port_
info
*
p
=
(
struct
ib_
class_port_
info
*
)
pmp
->
data
;
struct
qib_devdata
*
dd
=
dd_from_ibdev
(
ibdev
);
memset
(
pmp
->
data
,
0
,
sizeof
(
pmp
->
data
));
if
(
pmp
->
attr_mod
!=
0
)
pmp
->
status
|=
IB_SMP_INVALID_FIELD
;
if
(
pmp
->
mad_hdr
.
attr_mod
!=
0
)
pmp
->
mad_hdr
.
status
|=
IB_SMP_INVALID_FIELD
;
/* Note that AllPortSelect is not valid */
p
->
base_version
=
1
;
p
->
class_version
=
1
;
p
->
cap_mask
=
IB_PMA_CLASS_CAP_EXT_WIDTH
;
p
->
cap
ability
_mask
=
IB_PMA_CLASS_CAP_EXT_WIDTH
;
/*
* Set the most significant bit of CM2 to indicate support for
* congestion statistics
...
...
@@ -1154,7 +1154,7 @@ static int pma_get_classportinfo(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
pma_get_portsamplescontrol
(
struct
ib_p
erf
*
pmp
,
static
int
pma_get_portsamplescontrol
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portsamplescontrol
*
p
=
...
...
@@ -1169,8 +1169,8 @@ static int pma_get_portsamplescontrol(struct ib_perf *pmp,
memset
(
pmp
->
data
,
0
,
sizeof
(
pmp
->
data
));
p
->
port_select
=
port_select
;
if
(
pmp
->
attr_mod
!=
0
||
port_select
!=
port
)
{
pmp
->
status
|=
IB_SMP_INVALID_FIELD
;
if
(
pmp
->
mad_hdr
.
attr_mod
!=
0
||
port_select
!=
port
)
{
pmp
->
mad_hdr
.
status
|=
IB_SMP_INVALID_FIELD
;
goto
bail
;
}
spin_lock_irqsave
(
&
ibp
->
lock
,
flags
);
...
...
@@ -1192,7 +1192,7 @@ static int pma_get_portsamplescontrol(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
pma_set_portsamplescontrol
(
struct
ib_p
erf
*
pmp
,
static
int
pma_set_portsamplescontrol
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portsamplescontrol
*
p
=
...
...
@@ -1205,8 +1205,8 @@ static int pma_set_portsamplescontrol(struct ib_perf *pmp,
u8
status
,
xmit_flags
;
int
ret
;
if
(
pmp
->
attr_mod
!=
0
||
p
->
port_select
!=
port
)
{
pmp
->
status
|=
IB_SMP_INVALID_FIELD
;
if
(
pmp
->
mad_hdr
.
attr_mod
!=
0
||
p
->
port_select
!=
port
)
{
pmp
->
mad_hdr
.
status
|=
IB_SMP_INVALID_FIELD
;
ret
=
reply
((
struct
ib_smp
*
)
pmp
);
goto
bail
;
}
...
...
@@ -1321,7 +1321,7 @@ static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
return
ret
;
}
static
int
pma_get_portsamplesresult
(
struct
ib_p
erf
*
pmp
,
static
int
pma_get_portsamplesresult
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portsamplesresult
*
p
=
...
...
@@ -1360,7 +1360,7 @@ static int pma_get_portsamplesresult(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
pma_get_portsamplesresult_ext
(
struct
ib_p
erf
*
pmp
,
static
int
pma_get_portsamplesresult_ext
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portsamplesresult_ext
*
p
=
...
...
@@ -1402,7 +1402,7 @@ static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
pma_get_portcounters
(
struct
ib_p
erf
*
pmp
,
static
int
pma_get_portcounters
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portcounters
*
p
=
(
struct
ib_pma_portcounters
*
)
...
...
@@ -1436,8 +1436,8 @@ static int pma_get_portcounters(struct ib_perf *pmp,
memset
(
pmp
->
data
,
0
,
sizeof
(
pmp
->
data
));
p
->
port_select
=
port_select
;
if
(
pmp
->
attr_mod
!=
0
||
port_select
!=
port
)
pmp
->
status
|=
IB_SMP_INVALID_FIELD
;
if
(
pmp
->
mad_hdr
.
attr_mod
!=
0
||
port_select
!=
port
)
pmp
->
mad_hdr
.
status
|=
IB_SMP_INVALID_FIELD
;
if
(
cntrs
.
symbol_error_counter
>
0xFFFFUL
)
p
->
symbol_error_counter
=
cpu_to_be16
(
0xFFFF
);
...
...
@@ -1472,7 +1472,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,
cntrs
.
local_link_integrity_errors
=
0xFUL
;
if
(
cntrs
.
excessive_buffer_overrun_errors
>
0xFUL
)
cntrs
.
excessive_buffer_overrun_errors
=
0xFUL
;
p
->
l
li_ebor
_errors
=
(
cntrs
.
local_link_integrity_errors
<<
4
)
|
p
->
l
ink_overrun
_errors
=
(
cntrs
.
local_link_integrity_errors
<<
4
)
|
cntrs
.
excessive_buffer_overrun_errors
;
if
(
cntrs
.
vl15_dropped
>
0xFFFFUL
)
p
->
vl15_dropped
=
cpu_to_be16
(
0xFFFF
);
...
...
@@ -1500,7 +1500,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
pma_get_portcounters_cong
(
struct
ib_p
erf
*
pmp
,
static
int
pma_get_portcounters_cong
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
/* Congestion PMA packets start at offset 24 not 64 */
...
...
@@ -1510,7 +1510,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
struct
qib_ibport
*
ibp
=
to_iport
(
ibdev
,
port
);
struct
qib_pportdata
*
ppd
=
ppd_from_ibp
(
ibp
);
struct
qib_devdata
*
dd
=
dd_from_ppd
(
ppd
);
u32
port_select
=
be32_to_cpu
(
pmp
->
attr_mod
)
&
0xFF
;
u32
port_select
=
be32_to_cpu
(
pmp
->
mad_hdr
.
attr_mod
)
&
0xFF
;
u64
xmit_wait_counter
;
unsigned
long
flags
;
...
...
@@ -1519,9 +1519,9 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
* SET method ends up calling this anyway.
*/
if
(
!
dd
->
psxmitwait_supported
)
pmp
->
status
|=
IB_SMP_UNSUP_METH_ATTR
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_UNSUP_METH_ATTR
;
if
(
port_select
!=
port
)
pmp
->
status
|=
IB_SMP_INVALID_FIELD
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_INVALID_FIELD
;
qib_get_counters
(
ppd
,
&
cntrs
);
spin_lock_irqsave
(
&
ppd
->
ibport_data
.
lock
,
flags
);
...
...
@@ -1603,7 +1603,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
cntrs
.
local_link_integrity_errors
=
0xFUL
;
if
(
cntrs
.
excessive_buffer_overrun_errors
>
0xFUL
)
cntrs
.
excessive_buffer_overrun_errors
=
0xFUL
;
p
->
l
li_ebor
_errors
=
(
cntrs
.
local_link_integrity_errors
<<
4
)
|
p
->
l
ink_overrun
_errors
=
(
cntrs
.
local_link_integrity_errors
<<
4
)
|
cntrs
.
excessive_buffer_overrun_errors
;
if
(
cntrs
.
vl15_dropped
>
0xFFFFUL
)
p
->
vl15_dropped
=
cpu_to_be16
(
0xFFFF
);
...
...
@@ -1613,7 +1613,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
pma_get_portcounters_ext
(
struct
ib_p
erf
*
pmp
,
static
int
pma_get_portcounters_ext
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portcounters_ext
*
p
=
...
...
@@ -1626,8 +1626,8 @@ static int pma_get_portcounters_ext(struct ib_perf *pmp,
memset
(
pmp
->
data
,
0
,
sizeof
(
pmp
->
data
));
p
->
port_select
=
port_select
;
if
(
pmp
->
attr_mod
!=
0
||
port_select
!=
port
)
{
pmp
->
status
|=
IB_SMP_INVALID_FIELD
;
if
(
pmp
->
mad_hdr
.
attr_mod
!=
0
||
port_select
!=
port
)
{
pmp
->
mad_hdr
.
status
|=
IB_SMP_INVALID_FIELD
;
goto
bail
;
}
...
...
@@ -1652,7 +1652,7 @@ static int pma_get_portcounters_ext(struct ib_perf *pmp,
return
reply
((
struct
ib_smp
*
)
pmp
);
}
static
int
pma_set_portcounters
(
struct
ib_p
erf
*
pmp
,
static
int
pma_set_portcounters
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portcounters
*
p
=
(
struct
ib_pma_portcounters
*
)
...
...
@@ -1715,14 +1715,14 @@ static int pma_set_portcounters(struct ib_perf *pmp,
return
pma_get_portcounters
(
pmp
,
ibdev
,
port
);
}
static
int
pma_set_portcounters_cong
(
struct
ib_p
erf
*
pmp
,
static
int
pma_set_portcounters_cong
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
qib_ibport
*
ibp
=
to_iport
(
ibdev
,
port
);
struct
qib_pportdata
*
ppd
=
ppd_from_ibp
(
ibp
);
struct
qib_devdata
*
dd
=
dd_from_ppd
(
ppd
);
struct
qib_verbs_counters
cntrs
;
u32
counter_select
=
(
be32_to_cpu
(
pmp
->
attr_mod
)
>>
24
)
&
0xFF
;
u32
counter_select
=
(
be32_to_cpu
(
pmp
->
mad_hdr
.
attr_mod
)
>>
24
)
&
0xFF
;
int
ret
=
0
;
unsigned
long
flags
;
...
...
@@ -1766,7 +1766,7 @@ static int pma_set_portcounters_cong(struct ib_perf *pmp,
return
ret
;
}
static
int
pma_set_portcounters_ext
(
struct
ib_p
erf
*
pmp
,
static
int
pma_set_portcounters_ext
(
struct
ib_p
ma_mad
*
pmp
,
struct
ib_device
*
ibdev
,
u8
port
)
{
struct
ib_pma_portcounters
*
p
=
(
struct
ib_pma_portcounters
*
)
...
...
@@ -1959,19 +1959,19 @@ static int process_perf(struct ib_device *ibdev, u8 port,
struct
ib_mad
*
in_mad
,
struct
ib_mad
*
out_mad
)
{
struct
ib_p
erf
*
pmp
=
(
struct
ib_perf
*
)
out_mad
;
struct
ib_p
ma_mad
*
pmp
=
(
struct
ib_pma_mad
*
)
out_mad
;
int
ret
;
*
out_mad
=
*
in_mad
;
if
(
pmp
->
class_version
!=
1
)
{
pmp
->
status
|=
IB_SMP_UNSUP_VERSION
;
if
(
pmp
->
mad_hdr
.
class_version
!=
1
)
{
pmp
->
mad_hdr
.
status
|=
IB_SMP_UNSUP_VERSION
;
ret
=
reply
((
struct
ib_smp
*
)
pmp
);
goto
bail
;
}
switch
(
pmp
->
method
)
{
switch
(
pmp
->
m
ad_hdr
.
m
ethod
)
{
case
IB_MGMT_METHOD_GET
:
switch
(
pmp
->
attr_id
)
{
switch
(
pmp
->
mad_hdr
.
attr_id
)
{
case
IB_PMA_CLASS_PORT_INFO
:
ret
=
pma_get_classportinfo
(
pmp
,
ibdev
);
goto
bail
;
...
...
@@ -1994,13 +1994,13 @@ static int process_perf(struct ib_device *ibdev, u8 port,
ret
=
pma_get_portcounters_cong
(
pmp
,
ibdev
,
port
);
goto
bail
;
default:
pmp
->
status
|=
IB_SMP_UNSUP_METH_ATTR
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_UNSUP_METH_ATTR
;
ret
=
reply
((
struct
ib_smp
*
)
pmp
);
goto
bail
;
}
case
IB_MGMT_METHOD_SET
:
switch
(
pmp
->
attr_id
)
{
switch
(
pmp
->
mad_hdr
.
attr_id
)
{
case
IB_PMA_PORT_SAMPLES_CONTROL
:
ret
=
pma_set_portsamplescontrol
(
pmp
,
ibdev
,
port
);
goto
bail
;
...
...
@@ -2014,7 +2014,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
ret
=
pma_set_portcounters_cong
(
pmp
,
ibdev
,
port
);
goto
bail
;
default:
pmp
->
status
|=
IB_SMP_UNSUP_METH_ATTR
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_UNSUP_METH_ATTR
;
ret
=
reply
((
struct
ib_smp
*
)
pmp
);
goto
bail
;
}
...
...
@@ -2030,7 +2030,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
goto
bail
;
default:
pmp
->
status
|=
IB_SMP_UNSUP_METHOD
;
pmp
->
mad_hdr
.
status
|=
IB_SMP_UNSUP_METHOD
;
ret
=
reply
((
struct
ib_smp
*
)
pmp
);
}
...
...
drivers/infiniband/hw/qib/qib_mad.h
浏览文件 @
44602075
...
...
@@ -32,6 +32,8 @@
* SOFTWARE.
*/
#include <rdma/ib_pma.h>
#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
...
...
@@ -180,109 +182,8 @@ struct ib_vl_weight_elem {
#define IB_VLARB_HIGHPRI_0_31 3
#define IB_VLARB_HIGHPRI_32_63 4
/*
* PMA class portinfo capability mask bits
*/
#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
struct
ib_perf
{
u8
base_version
;
u8
mgmt_class
;
u8
class_version
;
u8
method
;
__be16
status
;
__be16
unused
;
__be64
tid
;
__be16
attr_id
;
__be16
resv
;
__be32
attr_mod
;
u8
reserved
[
40
];
u8
data
[
192
];
}
__attribute__
((
packed
));
struct
ib_pma_classportinfo
{
u8
base_version
;
u8
class_version
;
__be16
cap_mask
;
u8
reserved
[
3
];
u8
resp_time_value
;
/* only lower 5 bits */
union
ib_gid
redirect_gid
;
__be32
redirect_tc_sl_fl
;
/* 8, 4, 20 bits respectively */
__be16
redirect_lid
;
__be16
redirect_pkey
;
__be32
redirect_qp
;
/* only lower 24 bits */
__be32
redirect_qkey
;
union
ib_gid
trap_gid
;
__be32
trap_tc_sl_fl
;
/* 8, 4, 20 bits respectively */
__be16
trap_lid
;
__be16
trap_pkey
;
__be32
trap_hl_qp
;
/* 8, 24 bits respectively */
__be32
trap_qkey
;
}
__attribute__
((
packed
));
struct
ib_pma_portsamplescontrol
{
u8
opcode
;
u8
port_select
;
u8
tick
;
u8
counter_width
;
/* only lower 3 bits */
__be32
counter_mask0_9
;
/* 2, 10 * 3, bits */
__be16
counter_mask10_14
;
/* 1, 5 * 3, bits */
u8
sample_mechanisms
;
u8
sample_status
;
/* only lower 2 bits */
__be64
option_mask
;
__be64
vendor_mask
;
__be32
sample_start
;
__be32
sample_interval
;
__be16
tag
;
__be16
counter_select
[
15
];
}
__attribute__
((
packed
));
struct
ib_pma_portsamplesresult
{
__be16
tag
;
__be16
sample_status
;
/* only lower 2 bits */
__be32
counter
[
15
];
}
__attribute__
((
packed
));
struct
ib_pma_portsamplesresult_ext
{
__be16
tag
;
__be16
sample_status
;
/* only lower 2 bits */
__be32
extended_width
;
/* only upper 2 bits */
__be64
counter
[
15
];
}
__attribute__
((
packed
));
struct
ib_pma_portcounters
{
u8
reserved
;
u8
port_select
;
__be16
counter_select
;
__be16
symbol_error_counter
;
u8
link_error_recovery_counter
;
u8
link_downed_counter
;
__be16
port_rcv_errors
;
__be16
port_rcv_remphys_errors
;
__be16
port_rcv_switch_relay_errors
;
__be16
port_xmit_discards
;
u8
port_xmit_constraint_errors
;
u8
port_rcv_constraint_errors
;
u8
reserved1
;
u8
lli_ebor_errors
;
/* 4, 4, bits */
__be16
reserved2
;
__be16
vl15_dropped
;
__be32
port_xmit_data
;
__be32
port_rcv_data
;
__be32
port_xmit_packets
;
__be32
port_rcv_packets
;
}
__attribute__
((
packed
));
struct
ib_pma_portcounters_cong
{
u8
reserved
;
u8
reserved1
;
...
...
@@ -297,7 +198,7 @@ struct ib_pma_portcounters_cong {
u8
port_xmit_constraint_errors
;
u8
port_rcv_constraint_errors
;
u8
reserved2
;
u8
l
li_ebor_errors
;
/* 4, 4, bits
*/
u8
l
ink_overrun_errors
;
/* LocalLink: 7:4, BufferOverrun: 3:0
*/
__be16
reserved3
;
__be16
vl15_dropped
;
__be64
port_xmit_data
;
...
...
@@ -316,49 +217,11 @@ struct ib_pma_portcounters_cong {
/* number of 4nsec cycles equaling 2secs */
#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
#define IB_PMA_SEL_CONG_ALL 0x01
#define IB_PMA_SEL_CONG_PORT_DATA 0x02
#define IB_PMA_SEL_CONG_XMIT 0x04
#define IB_PMA_SEL_CONG_ROUTING 0x08
struct
ib_pma_portcounters_ext
{
u8
reserved
;
u8
port_select
;
__be16
counter_select
;
__be32
reserved1
;
__be64
port_xmit_data
;
__be64
port_rcv_data
;
__be64
port_xmit_packets
;
__be64
port_rcv_packets
;
__be64
port_unicast_xmit_packets
;
__be64
port_unicast_rcv_packets
;
__be64
port_multicast_xmit_packets
;
__be64
port_multicast_rcv_packets
;
}
__attribute__
((
packed
));
#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
/*
* The PortSamplesControl.CounterMasks field is an array of 3 bit fields
* which specify the N'th counter's capabilities. See ch. 16.1.3.2.
...
...
drivers/infiniband/hw/qib/qib_pcie.c
浏览文件 @
44602075
...
...
@@ -255,7 +255,7 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
u16
linkstat
,
speed
;
int
pos
=
0
,
pose
,
ret
=
1
;
pose
=
pci_
find_capability
(
dd
->
pcidev
,
PCI_CAP_ID_EXP
);
pose
=
pci_
pcie_cap
(
dd
->
pcidev
);
if
(
!
pose
)
{
qib_dev_err
(
dd
,
"Can't find PCI Express capability!
\n
"
);
/* set up something... */
...
...
@@ -509,7 +509,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
qib_devinfo
(
dd
->
pcidev
,
"Parent not root
\n
"
);
return
1
;
}
ppos
=
pci_
find_capability
(
parent
,
PCI_CAP_ID_EXP
);
ppos
=
pci_
pcie_cap
(
parent
);
if
(
!
ppos
)
return
1
;
if
(
parent
->
vendor
!=
0x8086
)
...
...
@@ -578,14 +578,14 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
qib_devinfo
(
dd
->
pcidev
,
"Parent not root
\n
"
);
goto
bail
;
}
ppos
=
pci_
find_capability
(
parent
,
PCI_CAP_ID_EXP
);
ppos
=
pci_
pcie_cap
(
parent
);
if
(
ppos
)
{
pci_read_config_word
(
parent
,
ppos
+
PCI_EXP_DEVCAP
,
&
pcaps
);
pci_read_config_word
(
parent
,
ppos
+
PCI_EXP_DEVCTL
,
&
pctl
);
}
else
goto
bail
;
/* Find out supported and configured values for endpoint (us) */
epos
=
pci_
find_capability
(
dd
->
pcidev
,
PCI_CAP_ID_EXP
);
epos
=
pci_
pcie_cap
(
dd
->
pcidev
);
if
(
epos
)
{
pci_read_config_word
(
dd
->
pcidev
,
epos
+
PCI_EXP_DEVCAP
,
&
ecaps
);
pci_read_config_word
(
dd
->
pcidev
,
epos
+
PCI_EXP_DEVCTL
,
&
ectl
);
...
...
drivers/infiniband/hw/qib/qib_sysfs.c
浏览文件 @
44602075
...
...
@@ -507,6 +507,18 @@ static ssize_t show_nctxts(struct device *device,
dd
->
first_user_ctxt
);
}
static
ssize_t
show_nfreectxts
(
struct
device
*
device
,
struct
device_attribute
*
attr
,
char
*
buf
)
{
struct
qib_ibdev
*
dev
=
container_of
(
device
,
struct
qib_ibdev
,
ibdev
.
dev
);
struct
qib_devdata
*
dd
=
dd_from_dev
(
dev
);
/* Return the number of free user ports (contexts) available. */
return
scnprintf
(
buf
,
PAGE_SIZE
,
"%u
\n
"
,
dd
->
cfgctxts
-
dd
->
first_user_ctxt
-
(
u32
)
qib_stats
.
sps_ctxts
);
}
static
ssize_t
show_serial
(
struct
device
*
device
,
struct
device_attribute
*
attr
,
char
*
buf
)
{
...
...
@@ -604,6 +616,7 @@ static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static
DEVICE_ATTR
(
board_id
,
S_IRUGO
,
show_hca
,
NULL
);
static
DEVICE_ATTR
(
version
,
S_IRUGO
,
show_version
,
NULL
);
static
DEVICE_ATTR
(
nctxts
,
S_IRUGO
,
show_nctxts
,
NULL
);
static
DEVICE_ATTR
(
nfreectxts
,
S_IRUGO
,
show_nfreectxts
,
NULL
);
static
DEVICE_ATTR
(
serial
,
S_IRUGO
,
show_serial
,
NULL
);
static
DEVICE_ATTR
(
boardversion
,
S_IRUGO
,
show_boardversion
,
NULL
);
static
DEVICE_ATTR
(
logged_errors
,
S_IRUGO
,
show_logged_errs
,
NULL
);
...
...
@@ -617,6 +630,7 @@ static struct device_attribute *qib_attributes[] = {
&
dev_attr_board_id
,
&
dev_attr_version
,
&
dev_attr_nctxts
,
&
dev_attr_nfreectxts
,
&
dev_attr_serial
,
&
dev_attr_boardversion
,
&
dev_attr_logged_errors
,
...
...
drivers/infiniband/ulp/srp/ib_srp.c
浏览文件 @
44602075
...
...
@@ -2127,6 +2127,8 @@ static ssize_t srp_create_target(struct device *dev,
return
-
ENOMEM
;
target_host
->
transportt
=
ib_srp_transport_template
;
target_host
->
max_channel
=
0
;
target_host
->
max_id
=
1
;
target_host
->
max_lun
=
SRP_MAX_LUN
;
target_host
->
max_cmd_len
=
sizeof
((
struct
srp_cmd
*
)
(
void
*
)
0L
)
->
cdb
;
...
...
drivers/net/mlx4/en_ethtool.c
浏览文件 @
44602075
...
...
@@ -104,7 +104,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
int
err
=
0
;
u64
config
=
0
;
if
(
!
priv
->
mdev
->
dev
->
caps
.
wol
)
{
if
(
!
(
priv
->
mdev
->
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_WOL
)
)
{
wol
->
supported
=
0
;
wol
->
wolopts
=
0
;
return
;
...
...
@@ -134,7 +134,7 @@ static int mlx4_en_set_wol(struct net_device *netdev,
u64
config
=
0
;
int
err
=
0
;
if
(
!
priv
->
mdev
->
dev
->
caps
.
wol
)
if
(
!
(
priv
->
mdev
->
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_WOL
)
)
return
-
EOPNOTSUPP
;
if
(
wol
->
supported
&
~
WAKE_MAGIC
)
...
...
@@ -170,7 +170,8 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
return
NUM_ALL_STATS
+
(
priv
->
tx_ring_num
+
priv
->
rx_ring_num
)
*
2
;
case
ETH_SS_TEST
:
return
MLX4_EN_NUM_SELF_TEST
-
!
(
priv
->
mdev
->
dev
->
caps
.
loopback_support
)
*
2
;
return
MLX4_EN_NUM_SELF_TEST
-
!
(
priv
->
mdev
->
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_UC_LOOPBACK
)
*
2
;
default:
return
-
EOPNOTSUPP
;
}
...
...
@@ -220,7 +221,7 @@ static void mlx4_en_get_strings(struct net_device *dev,
case
ETH_SS_TEST
:
for
(
i
=
0
;
i
<
MLX4_EN_NUM_SELF_TEST
-
2
;
i
++
)
strcpy
(
data
+
i
*
ETH_GSTRING_LEN
,
mlx4_en_test_names
[
i
]);
if
(
priv
->
mdev
->
dev
->
caps
.
loopback_support
)
if
(
priv
->
mdev
->
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_UC_LOOPBACK
)
for
(;
i
<
MLX4_EN_NUM_SELF_TEST
;
i
++
)
strcpy
(
data
+
i
*
ETH_GSTRING_LEN
,
mlx4_en_test_names
[
i
]);
break
;
...
...
drivers/net/mlx4/en_main.c
浏览文件 @
44602075
...
...
@@ -106,7 +106,8 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params
->
tcp_rss
=
tcp_rss
;
params
->
udp_rss
=
udp_rss
;
if
(
params
->
udp_rss
&&
!
mdev
->
dev
->
caps
.
udp_rss
)
{
if
(
params
->
udp_rss
&&
!
(
mdev
->
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_UDP_RSS
))
{
mlx4_warn
(
mdev
,
"UDP RSS is not supported on this device.
\n
"
);
params
->
udp_rss
=
0
;
}
...
...
drivers/net/mlx4/en_netdev.c
浏览文件 @
44602075
...
...
@@ -239,7 +239,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv
->
flags
|=
MLX4_EN_FLAG_PROMISC
;
/* Enable promiscouos mode */
if
(
!
mdev
->
dev
->
caps
.
vep_uc_steering
)
if
(
!
(
mdev
->
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_UC_STEER
))
err
=
mlx4_SET_PORT_qpn_calc
(
mdev
->
dev
,
priv
->
port
,
priv
->
base_qpn
,
1
);
else
...
...
@@ -285,7 +286,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv
->
flags
&=
~
MLX4_EN_FLAG_PROMISC
;
/* Disable promiscouos mode */
if
(
!
mdev
->
dev
->
caps
.
vep_uc_steering
)
if
(
!
(
mdev
->
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_UC_STEER
)
)
err
=
mlx4_SET_PORT_qpn_calc
(
mdev
->
dev
,
priv
->
port
,
priv
->
base_qpn
,
0
);
else
...
...
drivers/net/mlx4/en_port.c
浏览文件 @
44602075
...
...
@@ -119,9 +119,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
struct
mlx4_set_port_rqp_calc_context
*
context
;
int
err
;
u32
in_mod
;
u32
m_promisc
=
(
dev
->
caps
.
vep_mc_steering
)
?
MCAST_DIRECT
:
MCAST_DEFAULT
;
u32
m_promisc
=
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
?
MCAST_DIRECT
:
MCAST_DEFAULT
;
if
(
dev
->
caps
.
vep_mc_steering
&&
dev
->
caps
.
vep_uc_steering
)
if
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
&&
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_UC_STEER
)
return
0
;
mailbox
=
mlx4_alloc_cmd_mailbox
(
dev
);
...
...
drivers/net/mlx4/en_selftest.c
浏览文件 @
44602075
...
...
@@ -159,7 +159,8 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
goto
retry_tx
;
}
if
(
priv
->
mdev
->
dev
->
caps
.
loopback_support
){
if
(
priv
->
mdev
->
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_UC_LOOPBACK
)
{
buf
[
3
]
=
mlx4_en_test_registers
(
priv
);
buf
[
4
]
=
mlx4_en_test_loopback
(
priv
);
}
...
...
drivers/net/mlx4/fw.c
浏览文件 @
44602075
...
...
@@ -75,7 +75,7 @@ MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (defa
} \
} while (0)
static
void
dump_dev_cap_flags
(
struct
mlx4_dev
*
dev
,
u
32
flags
)
static
void
dump_dev_cap_flags
(
struct
mlx4_dev
*
dev
,
u
64
flags
)
{
static
const
char
*
fname
[]
=
{
[
0
]
=
"RC transport"
,
...
...
@@ -99,13 +99,19 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
[
21
]
=
"UD multicast support"
,
[
24
]
=
"Demand paging support"
,
[
25
]
=
"Router support"
,
[
30
]
=
"IBoE support"
[
30
]
=
"IBoE support"
,
[
32
]
=
"Unicast loopback support"
,
[
38
]
=
"Wake On LAN support"
,
[
40
]
=
"UDP RSS support"
,
[
41
]
=
"Unicast VEP steering support"
,
[
42
]
=
"Multicast VEP steering support"
,
[
48
]
=
"Counters support"
,
};
int
i
;
mlx4_dbg
(
dev
,
"DEV_CAP flags:
\n
"
);
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
fname
);
++
i
)
if
(
fname
[
i
]
&&
(
flags
&
(
1
<<
i
)))
if
(
fname
[
i
]
&&
(
flags
&
(
1
LL
<<
i
)))
mlx4_dbg
(
dev
,
" %s
\n
"
,
fname
[
i
]);
}
...
...
@@ -142,7 +148,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
struct
mlx4_cmd_mailbox
*
mailbox
;
u32
*
outbox
;
u8
field
;
u32
field32
;
u32
field32
,
flags
,
ext_flags
;
u16
size
;
u16
stat_rate
;
int
err
;
...
...
@@ -180,8 +186,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
#define QUERY_DEV_CAP_UDP_RSS_OFFSET 0x42
#define QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET 0x43
#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
...
...
@@ -199,6 +204,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
...
...
@@ -272,14 +278,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap
->
max_msg_sz
=
1
<<
(
field
&
0x1f
);
MLX4_GET
(
stat_rate
,
outbox
,
QUERY_DEV_CAP_RATE_SUPPORT_OFFSET
);
dev_cap
->
stat_rate_support
=
stat_rate
;
MLX4_GET
(
field
,
outbox
,
QUERY_DEV_CAP_UDP_RSS_OFFSET
);
dev_cap
->
udp_rss
=
field
&
0x1
;
dev_cap
->
vep_uc_steering
=
field
&
0x2
;
dev_cap
->
vep_mc_steering
=
field
&
0x4
;
MLX4_GET
(
field
,
outbox
,
QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET
);
dev_cap
->
loopback_support
=
field
&
0x1
;
dev_cap
->
wol
=
field
&
0x40
;
MLX4_GET
(
dev_cap
->
flags
,
outbox
,
QUERY_DEV_CAP_FLAGS_OFFSET
);
MLX4_GET
(
ext_flags
,
outbox
,
QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
MLX4_GET
(
flags
,
outbox
,
QUERY_DEV_CAP_FLAGS_OFFSET
);
dev_cap
->
flags
=
flags
|
(
u64
)
ext_flags
<<
32
;
MLX4_GET
(
field
,
outbox
,
QUERY_DEV_CAP_RSVD_UAR_OFFSET
);
dev_cap
->
reserved_uars
=
field
>>
4
;
MLX4_GET
(
field
,
outbox
,
QUERY_DEV_CAP_UAR_SZ_OFFSET
);
...
...
@@ -356,6 +357,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_RSVD_LKEY_OFFSET
);
MLX4_GET
(
dev_cap
->
max_icm_sz
,
outbox
,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET
);
if
(
dev_cap
->
flags
&
MLX4_DEV_CAP_FLAG_COUNTERS
)
MLX4_GET
(
dev_cap
->
max_counters
,
outbox
,
QUERY_DEV_CAP_MAX_COUNTERS_OFFSET
);
if
(
dev
->
flags
&
MLX4_FLAG_OLD_PORT_CMDS
)
{
for
(
i
=
1
;
i
<=
dev_cap
->
num_ports
;
++
i
)
{
...
...
@@ -449,6 +453,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_dbg
(
dev
,
"Max RQ desc size: %d, max RQ S/G: %d
\n
"
,
dev_cap
->
max_rq_desc_sz
,
dev_cap
->
max_rq_sg
);
mlx4_dbg
(
dev
,
"Max GSO size: %d
\n
"
,
dev_cap
->
max_gso_sz
);
mlx4_dbg
(
dev
,
"Max counters: %d
\n
"
,
dev_cap
->
max_counters
);
dump_dev_cap_flags
(
dev
,
dev_cap
->
flags
);
...
...
@@ -781,6 +786,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if
(
enable_qos
)
*
(
inbox
+
INIT_HCA_FLAGS_OFFSET
/
4
)
|=
cpu_to_be32
(
1
<<
2
);
/* enable counters */
if
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_COUNTERS
)
*
(
inbox
+
INIT_HCA_FLAGS_OFFSET
/
4
)
|=
cpu_to_be32
(
1
<<
4
);
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT
(
inbox
,
param
->
qpc_base
,
INIT_HCA_QPC_BASE_OFFSET
);
...
...
@@ -801,7 +810,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT
(
inbox
,
param
->
mc_base
,
INIT_HCA_MC_BASE_OFFSET
);
MLX4_PUT
(
inbox
,
param
->
log_mc_entry_sz
,
INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
MLX4_PUT
(
inbox
,
param
->
log_mc_hash_sz
,
INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
if
(
dev
->
caps
.
vep_mc_steering
)
if
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
MLX4_PUT
(
inbox
,
(
u8
)
(
1
<<
3
),
INIT_HCA_UC_STEERING_OFFSET
);
MLX4_PUT
(
inbox
,
param
->
log_mc_table_sz
,
INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
...
...
drivers/net/mlx4/fw.h
浏览文件 @
44602075
...
...
@@ -78,12 +78,7 @@ struct mlx4_dev_cap {
u16
wavelength
[
MLX4_MAX_PORTS
+
1
];
u64
trans_code
[
MLX4_MAX_PORTS
+
1
];
u16
stat_rate_support
;
int
udp_rss
;
int
loopback_support
;
int
vep_uc_steering
;
int
vep_mc_steering
;
int
wol
;
u32
flags
;
u64
flags
;
int
reserved_uars
;
int
uar_size
;
int
min_page_sz
;
...
...
@@ -116,6 +111,7 @@ struct mlx4_dev_cap {
u8
supported_port_types
[
MLX4_MAX_PORTS
+
1
];
u8
log_max_macs
[
MLX4_MAX_PORTS
+
1
];
u8
log_max_vlans
[
MLX4_MAX_PORTS
+
1
];
u32
max_counters
;
};
struct
mlx4_adapter
{
...
...
drivers/net/mlx4/main.c
浏览文件 @
44602075
...
...
@@ -143,6 +143,7 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
if
(
dev
->
caps
.
port_type
[
i
]
==
MLX4_PORT_TYPE_IB
)
dev
->
caps
.
port_mask
|=
1
<<
(
i
-
1
);
}
static
int
mlx4_dev_cap
(
struct
mlx4_dev
*
dev
,
struct
mlx4_dev_cap
*
dev_cap
)
{
int
err
;
...
...
@@ -226,11 +227,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev
->
caps
.
bmme_flags
=
dev_cap
->
bmme_flags
;
dev
->
caps
.
reserved_lkey
=
dev_cap
->
reserved_lkey
;
dev
->
caps
.
stat_rate_support
=
dev_cap
->
stat_rate_support
;
dev
->
caps
.
udp_rss
=
dev_cap
->
udp_rss
;
dev
->
caps
.
loopback_support
=
dev_cap
->
loopback_support
;
dev
->
caps
.
vep_uc_steering
=
dev_cap
->
vep_uc_steering
;
dev
->
caps
.
vep_mc_steering
=
dev_cap
->
vep_mc_steering
;
dev
->
caps
.
wol
=
dev_cap
->
wol
;
dev
->
caps
.
max_gso_sz
=
dev_cap
->
max_gso_sz
;
dev
->
caps
.
log_num_macs
=
log_num_mac
;
...
...
@@ -262,6 +258,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_set_port_mask
(
dev
);
dev
->
caps
.
max_counters
=
1
<<
ilog2
(
dev_cap
->
max_counters
);
dev
->
caps
.
reserved_qps_cnt
[
MLX4_QP_REGION_FW
]
=
dev_cap
->
reserved_qps
;
dev
->
caps
.
reserved_qps_cnt
[
MLX4_QP_REGION_ETH_ADDR
]
=
dev
->
caps
.
reserved_qps_cnt
[
MLX4_QP_REGION_FC_ADDR
]
=
...
...
@@ -839,6 +837,45 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
return
err
;
}
static
int
mlx4_init_counters_table
(
struct
mlx4_dev
*
dev
)
{
struct
mlx4_priv
*
priv
=
mlx4_priv
(
dev
);
int
nent
;
if
(
!
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_COUNTERS
))
return
-
ENOENT
;
nent
=
dev
->
caps
.
max_counters
;
return
mlx4_bitmap_init
(
&
priv
->
counters_bitmap
,
nent
,
nent
-
1
,
0
,
0
);
}
static
void
mlx4_cleanup_counters_table
(
struct
mlx4_dev
*
dev
)
{
mlx4_bitmap_cleanup
(
&
mlx4_priv
(
dev
)
->
counters_bitmap
);
}
int
mlx4_counter_alloc
(
struct
mlx4_dev
*
dev
,
u32
*
idx
)
{
struct
mlx4_priv
*
priv
=
mlx4_priv
(
dev
);
if
(
!
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_COUNTERS
))
return
-
ENOENT
;
*
idx
=
mlx4_bitmap_alloc
(
&
priv
->
counters_bitmap
);
if
(
*
idx
==
-
1
)
return
-
ENOMEM
;
return
0
;
}
EXPORT_SYMBOL_GPL
(
mlx4_counter_alloc
);
void
mlx4_counter_free
(
struct
mlx4_dev
*
dev
,
u32
idx
)
{
mlx4_bitmap_free
(
&
mlx4_priv
(
dev
)
->
counters_bitmap
,
idx
);
return
;
}
EXPORT_SYMBOL_GPL
(
mlx4_counter_free
);
static
int
mlx4_setup_hca
(
struct
mlx4_dev
*
dev
)
{
struct
mlx4_priv
*
priv
=
mlx4_priv
(
dev
);
...
...
@@ -943,6 +980,12 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto
err_qp_table_free
;
}
err
=
mlx4_init_counters_table
(
dev
);
if
(
err
&&
err
!=
-
ENOENT
)
{
mlx4_err
(
dev
,
"Failed to initialize counters table, aborting.
\n
"
);
goto
err_counters_table_free
;
}
for
(
port
=
1
;
port
<=
dev
->
caps
.
num_ports
;
port
++
)
{
enum
mlx4_port_type
port_type
=
0
;
mlx4_SENSE_PORT
(
dev
,
port
,
&
port_type
);
...
...
@@ -969,6 +1012,9 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err_mcg_table_free:
mlx4_cleanup_mcg_table
(
dev
);
err_counters_table_free:
mlx4_cleanup_counters_table
(
dev
);
err_qp_table_free:
mlx4_cleanup_qp_table
(
dev
);
...
...
@@ -1299,6 +1345,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
for
(
--
port
;
port
>=
1
;
--
port
)
mlx4_cleanup_port_info
(
&
priv
->
port
[
port
]);
mlx4_cleanup_counters_table
(
dev
);
mlx4_cleanup_mcg_table
(
dev
);
mlx4_cleanup_qp_table
(
dev
);
mlx4_cleanup_srq_table
(
dev
);
...
...
@@ -1359,6 +1406,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_CLOSE_PORT
(
dev
,
p
);
}
mlx4_cleanup_counters_table
(
dev
);
mlx4_cleanup_mcg_table
(
dev
);
mlx4_cleanup_qp_table
(
dev
);
mlx4_cleanup_srq_table
(
dev
);
...
...
drivers/net/mlx4/mcg.c
浏览文件 @
44602075
...
...
@@ -559,7 +559,8 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
struct
mlx4_mgm
*
mgm
=
mgm_mailbox
->
buf
;
u8
*
mgid
;
int
err
;
u8
op_mod
=
(
prot
==
MLX4_PROT_ETH
)
?
!!
(
dev
->
caps
.
vep_mc_steering
)
:
0
;
u8
op_mod
=
(
prot
==
MLX4_PROT_ETH
)
?
!!
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
:
0
;
mailbox
=
mlx4_alloc_cmd_mailbox
(
dev
);
if
(
IS_ERR
(
mailbox
))
...
...
@@ -834,7 +835,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
steer
=
(
is_valid_ether_addr
(
&
gid
[
10
]))
?
MLX4_UC_STEER
:
MLX4_MC_STEER
;
if
(
prot
==
MLX4_PROT_ETH
&&
!
dev
->
caps
.
vep_mc_steering
)
if
(
prot
==
MLX4_PROT_ETH
&&
!
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
))
return
0
;
if
(
prot
==
MLX4_PROT_ETH
)
...
...
@@ -853,7 +855,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
steer
=
(
is_valid_ether_addr
(
&
gid
[
10
]))
?
MLX4_UC_STEER
:
MLX4_MC_STEER
;
if
(
prot
==
MLX4_PROT_ETH
&&
!
dev
->
caps
.
vep_mc_steering
)
if
(
prot
==
MLX4_PROT_ETH
&&
!
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
))
return
0
;
if
(
prot
==
MLX4_PROT_ETH
)
{
...
...
@@ -867,7 +870,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
int
mlx4_multicast_promisc_add
(
struct
mlx4_dev
*
dev
,
u32
qpn
,
u8
port
)
{
if
(
!
dev
->
caps
.
vep_mc_steering
)
if
(
!
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
)
return
0
;
...
...
@@ -877,7 +880,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
int
mlx4_multicast_promisc_remove
(
struct
mlx4_dev
*
dev
,
u32
qpn
,
u8
port
)
{
if
(
!
dev
->
caps
.
vep_mc_steering
)
if
(
!
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
)
return
0
;
...
...
@@ -887,7 +890,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
int
mlx4_unicast_promisc_add
(
struct
mlx4_dev
*
dev
,
u32
qpn
,
u8
port
)
{
if
(
!
dev
->
caps
.
vep_mc_steering
)
if
(
!
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
)
return
0
;
...
...
@@ -897,7 +900,7 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
int
mlx4_unicast_promisc_remove
(
struct
mlx4_dev
*
dev
,
u32
qpn
,
u8
port
)
{
if
(
!
dev
->
caps
.
vep_mc_steering
)
if
(
!
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
)
return
0
;
return
remove_promisc_qp
(
dev
,
0
,
port
,
MLX4_UC_STEER
,
qpn
);
...
...
drivers/net/mlx4/mlx4.h
浏览文件 @
44602075
...
...
@@ -48,8 +48,8 @@
#include <linux/mlx4/doorbell.h>
#define DRV_NAME "mlx4_core"
#define DRV_VERSION "
0.01
"
#define DRV_RELDATE "
May 1, 2007
"
#define DRV_VERSION "
1.0
"
#define DRV_RELDATE "
July 14, 2011
"
enum
{
MLX4_HCR_BASE
=
0x80680
,
...
...
@@ -342,6 +342,7 @@ struct mlx4_priv {
struct
mlx4_srq_table
srq_table
;
struct
mlx4_qp_table
qp_table
;
struct
mlx4_mcg_table
mcg_table
;
struct
mlx4_bitmap
counters_bitmap
;
struct
mlx4_catas_err
catas_err
;
...
...
drivers/net/mlx4/port.c
浏览文件 @
44602075
...
...
@@ -146,7 +146,7 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
int
i
,
err
=
0
;
int
free
=
-
1
;
if
(
dev
->
caps
.
vep_uc_steering
)
{
if
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_UC_STEER
)
{
err
=
mlx4_uc_steer_add
(
dev
,
port
,
mac
,
qpn
,
1
);
if
(
!
err
)
{
entry
=
kmalloc
(
sizeof
*
entry
,
GFP_KERNEL
);
...
...
@@ -203,7 +203,7 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
goto
out
;
}
if
(
!
dev
->
caps
.
vep_uc_steering
)
if
(
!
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_UC_STEER
)
)
*
qpn
=
info
->
base_qpn
+
free
;
++
table
->
total
;
out:
...
...
@@ -243,7 +243,7 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
int
index
=
qpn
-
info
->
base_qpn
;
struct
mlx4_mac_entry
*
entry
;
if
(
dev
->
caps
.
vep_uc_steering
)
{
if
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_UC_STEER
)
{
entry
=
radix_tree_lookup
(
&
info
->
mac_tree
,
qpn
);
if
(
entry
)
{
mlx4_uc_steer_release
(
dev
,
port
,
entry
->
mac
,
qpn
,
1
);
...
...
@@ -274,7 +274,7 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
struct
mlx4_mac_entry
*
entry
;
int
err
;
if
(
dev
->
caps
.
vep_uc_steering
)
{
if
(
dev
->
caps
.
flags
&
MLX4_DEV_CAP_FLAG_VEP_UC_STEER
)
{
entry
=
radix_tree_lookup
(
&
info
->
mac_tree
,
qpn
);
if
(
!
entry
)
return
-
EINVAL
;
...
...
include/linux/mlx4/cmd.h
浏览文件 @
44602075
...
...
@@ -123,6 +123,9 @@ enum {
/* debug commands */
MLX4_CMD_QUERY_DEBUG_MSG
=
0x2a
,
MLX4_CMD_SET_DEBUG_MSG
=
0x2b
,
/* statistics commands */
MLX4_CMD_QUERY_IF_STAT
=
0X54
,
};
enum
{
...
...
include/linux/mlx4/device.h
浏览文件 @
44602075
...
...
@@ -58,22 +58,28 @@ enum {
};
enum
{
MLX4_DEV_CAP_FLAG_RC
=
1
<<
0
,
MLX4_DEV_CAP_FLAG_UC
=
1
<<
1
,
MLX4_DEV_CAP_FLAG_UD
=
1
<<
2
,
MLX4_DEV_CAP_FLAG_SRQ
=
1
<<
6
,
MLX4_DEV_CAP_FLAG_IPOIB_CSUM
=
1
<<
7
,
MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR
=
1
<<
8
,
MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR
=
1
<<
9
,
MLX4_DEV_CAP_FLAG_DPDP
=
1
<<
12
,
MLX4_DEV_CAP_FLAG_BLH
=
1
<<
15
,
MLX4_DEV_CAP_FLAG_MEM_WINDOW
=
1
<<
16
,
MLX4_DEV_CAP_FLAG_APM
=
1
<<
17
,
MLX4_DEV_CAP_FLAG_ATOMIC
=
1
<<
18
,
MLX4_DEV_CAP_FLAG_RAW_MCAST
=
1
<<
19
,
MLX4_DEV_CAP_FLAG_UD_AV_PORT
=
1
<<
20
,
MLX4_DEV_CAP_FLAG_UD_MCAST
=
1
<<
21
,
MLX4_DEV_CAP_FLAG_IBOE
=
1
<<
30
MLX4_DEV_CAP_FLAG_RC
=
1LL
<<
0
,
MLX4_DEV_CAP_FLAG_UC
=
1LL
<<
1
,
MLX4_DEV_CAP_FLAG_UD
=
1LL
<<
2
,
MLX4_DEV_CAP_FLAG_SRQ
=
1LL
<<
6
,
MLX4_DEV_CAP_FLAG_IPOIB_CSUM
=
1LL
<<
7
,
MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR
=
1LL
<<
8
,
MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR
=
1LL
<<
9
,
MLX4_DEV_CAP_FLAG_DPDP
=
1LL
<<
12
,
MLX4_DEV_CAP_FLAG_BLH
=
1LL
<<
15
,
MLX4_DEV_CAP_FLAG_MEM_WINDOW
=
1LL
<<
16
,
MLX4_DEV_CAP_FLAG_APM
=
1LL
<<
17
,
MLX4_DEV_CAP_FLAG_ATOMIC
=
1LL
<<
18
,
MLX4_DEV_CAP_FLAG_RAW_MCAST
=
1LL
<<
19
,
MLX4_DEV_CAP_FLAG_UD_AV_PORT
=
1LL
<<
20
,
MLX4_DEV_CAP_FLAG_UD_MCAST
=
1LL
<<
21
,
MLX4_DEV_CAP_FLAG_IBOE
=
1LL
<<
30
,
MLX4_DEV_CAP_FLAG_UC_LOOPBACK
=
1LL
<<
32
,
MLX4_DEV_CAP_FLAG_WOL
=
1LL
<<
38
,
MLX4_DEV_CAP_FLAG_UDP_RSS
=
1LL
<<
40
,
MLX4_DEV_CAP_FLAG_VEP_UC_STEER
=
1LL
<<
41
,
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
=
1LL
<<
42
,
MLX4_DEV_CAP_FLAG_COUNTERS
=
1LL
<<
48
};
enum
{
...
...
@@ -253,15 +259,10 @@ struct mlx4_caps {
int
mtt_entry_sz
;
u32
max_msg_sz
;
u32
page_size_cap
;
u
32
flags
;
u
64
flags
;
u32
bmme_flags
;
u32
reserved_lkey
;
u16
stat_rate_support
;
int
udp_rss
;
int
loopback_support
;
int
vep_uc_steering
;
int
vep_mc_steering
;
int
wol
;
u8
port_width_cap
[
MLX4_MAX_PORTS
+
1
];
int
max_gso_sz
;
int
reserved_qps_cnt
[
MLX4_NUM_QP_REGION
];
...
...
@@ -274,6 +275,7 @@ struct mlx4_caps {
u8
supported_type
[
MLX4_MAX_PORTS
+
1
];
u32
port_mask
;
enum
mlx4_port_type
possible_type
[
MLX4_MAX_PORTS
+
1
];
u32
max_counters
;
};
struct
mlx4_buf_list
{
...
...
@@ -438,6 +440,17 @@ union mlx4_ext_av {
struct
mlx4_eth_av
eth
;
};
struct
mlx4_counter
{
u8
reserved1
[
3
];
u8
counter_mode
;
__be32
num_ifc
;
u32
reserved2
[
2
];
__be64
rx_frames
;
__be64
rx_bytes
;
__be64
tx_frames
;
__be64
tx_bytes
;
};
struct
mlx4_dev
{
struct
pci_dev
*
pdev
;
unsigned
long
flags
;
...
...
@@ -568,4 +581,7 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec);
int
mlx4_wol_read
(
struct
mlx4_dev
*
dev
,
u64
*
config
,
int
port
);
int
mlx4_wol_write
(
struct
mlx4_dev
*
dev
,
u64
config
,
int
port
);
int
mlx4_counter_alloc
(
struct
mlx4_dev
*
dev
,
u32
*
idx
);
void
mlx4_counter_free
(
struct
mlx4_dev
*
dev
,
u32
idx
);
#endif
/* MLX4_DEVICE_H */
include/linux/mlx4/qp.h
浏览文件 @
44602075
...
...
@@ -54,7 +54,8 @@ enum mlx4_qp_optpar {
MLX4_QP_OPTPAR_RETRY_COUNT
=
1
<<
12
,
MLX4_QP_OPTPAR_RNR_RETRY
=
1
<<
13
,
MLX4_QP_OPTPAR_ACK_TIMEOUT
=
1
<<
14
,
MLX4_QP_OPTPAR_SCHED_QUEUE
=
1
<<
16
MLX4_QP_OPTPAR_SCHED_QUEUE
=
1
<<
16
,
MLX4_QP_OPTPAR_COUNTER_INDEX
=
1
<<
20
};
enum
mlx4_qp_state
{
...
...
@@ -99,7 +100,7 @@ struct mlx4_qp_path {
u8
fl
;
u8
reserved1
[
2
];
u8
pkey_index
;
u8
reserved2
;
u8
counter_index
;
u8
grh_mylmc
;
__be16
rlid
;
u8
ackto
;
...
...
@@ -111,8 +112,7 @@ struct mlx4_qp_path {
u8
sched_queue
;
u8
vlan_index
;
u8
reserved3
[
2
];
u8
counter_index
;
u8
reserved4
;
u8
reserved4
[
2
];
u8
dmac
[
6
];
};
...
...
include/rdma/ib_pma.h
0 → 100644
浏览文件 @
44602075
/*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#if !defined(IB_PMA_H)
#define IB_PMA_H
#include <rdma/ib_mad.h>
/*
* PMA class portinfo capability mask bits
*/
#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
struct
ib_pma_mad
{
struct
ib_mad_hdr
mad_hdr
;
u8
reserved
[
40
];
u8
data
[
192
];
}
__packed
;
struct
ib_pma_portsamplescontrol
{
u8
opcode
;
u8
port_select
;
u8
tick
;
u8
counter_width
;
/* resv: 7:3, counter width: 2:0 */
__be32
counter_mask0_9
;
/* 2, 10 3-bit fields */
__be16
counter_mask10_14
;
/* 1, 5 3-bit fields */
u8
sample_mechanisms
;
u8
sample_status
;
/* only lower 2 bits */
__be64
option_mask
;
__be64
vendor_mask
;
__be32
sample_start
;
__be32
sample_interval
;
__be16
tag
;
__be16
counter_select
[
15
];
__be32
reserved1
;
__be64
samples_only_option_mask
;
__be32
reserved2
[
28
];
};
struct
ib_pma_portsamplesresult
{
__be16
tag
;
__be16
sample_status
;
/* only lower 2 bits */
__be32
counter
[
15
];
};
struct
ib_pma_portsamplesresult_ext
{
__be16
tag
;
__be16
sample_status
;
/* only lower 2 bits */
__be32
extended_width
;
/* only upper 2 bits */
__be64
counter
[
15
];
};
struct
ib_pma_portcounters
{
u8
reserved
;
u8
port_select
;
__be16
counter_select
;
__be16
symbol_error_counter
;
u8
link_error_recovery_counter
;
u8
link_downed_counter
;
__be16
port_rcv_errors
;
__be16
port_rcv_remphys_errors
;
__be16
port_rcv_switch_relay_errors
;
__be16
port_xmit_discards
;
u8
port_xmit_constraint_errors
;
u8
port_rcv_constraint_errors
;
u8
reserved1
;
u8
link_overrun_errors
;
/* LocalLink: 7:4, BufferOverrun: 3:0 */
__be16
reserved2
;
__be16
vl15_dropped
;
__be32
port_xmit_data
;
__be32
port_rcv_data
;
__be32
port_xmit_packets
;
__be32
port_rcv_packets
;
__be32
port_xmit_wait
;
}
__packed
;
#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
struct
ib_pma_portcounters_ext
{
u8
reserved
;
u8
port_select
;
__be16
counter_select
;
__be32
reserved1
;
__be64
port_xmit_data
;
__be64
port_rcv_data
;
__be64
port_xmit_packets
;
__be64
port_rcv_packets
;
__be64
port_unicast_xmit_packets
;
__be64
port_unicast_rcv_packets
;
__be64
port_multicast_xmit_packets
;
__be64
port_multicast_rcv_packets
;
}
__packed
;
#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
#endif
/* IB_PMA_H */
include/rdma/ib_verbs.h
浏览文件 @
44602075
...
...
@@ -350,7 +350,8 @@ enum ib_event_type {
IB_EVENT_SRQ_ERR
,
IB_EVENT_SRQ_LIMIT_REACHED
,
IB_EVENT_QP_LAST_WQE_REACHED
,
IB_EVENT_CLIENT_REREGISTER
IB_EVENT_CLIENT_REREGISTER
,
IB_EVENT_GID_CHANGE
,
};
struct
ib_event
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录