Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
06a91a02
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
06a91a02
编写于
8月 07, 2008
作者:
R
Roland Dreier
浏览文件
操作
浏览文件
下载
差异文件
Merge branches 'cma', 'cxgb3', 'ipath', 'ipoib', 'mad' and 'mlx4' into for-linus
上级
3f446754
be43324d
70117b9e
e0819816
cd55ef5a
6e0d733d
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
82 addition
and
89 deletion
+82
-89
drivers/infiniband/core/mad_rmpp.c
drivers/infiniband/core/mad_rmpp.c
+1
-1
drivers/infiniband/hw/cxgb3/cxio_hal.c
drivers/infiniband/hw/cxgb3/cxio_hal.c
+3
-3
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
+3
-25
drivers/infiniband/hw/cxgb3/iwch_provider.h
drivers/infiniband/hw/cxgb3/iwch_provider.h
+7
-0
drivers/infiniband/hw/cxgb3/iwch_qp.c
drivers/infiniband/hw/cxgb3/iwch_qp.c
+8
-17
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/ipath/ipath_driver.c
+3
-2
drivers/infiniband/hw/ipath/ipath_iba7220.c
drivers/infiniband/hw/ipath/ipath_iba7220.c
+4
-3
drivers/infiniband/hw/ipath/ipath_intr.c
drivers/infiniband/hw/ipath/ipath_intr.c
+8
-4
drivers/infiniband/hw/ipath/ipath_verbs.c
drivers/infiniband/hw/ipath/ipath_verbs.c
+3
-3
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/cq.c
+16
-17
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/qp.c
+1
-1
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
+1
-1
include/linux/mlx4/cq.h
include/linux/mlx4/cq.h
+24
-12
未找到文件。
drivers/infiniband/core/mad_rmpp.c
浏览文件 @
06a91a02
...
...
@@ -133,7 +133,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
msg
=
ib_create_send_mad
(
&
rmpp_recv
->
agent
->
agent
,
recv_wc
->
wc
->
src_qp
,
recv_wc
->
wc
->
pkey_index
,
1
,
hdr_len
,
0
,
GFP_KERNEL
);
if
(
!
msg
)
if
(
IS_ERR
(
msg
)
)
return
;
format_ack
(
msg
,
(
struct
ib_rmpp_mad
*
)
recv_wc
->
recv_buf
.
mad
,
rmpp_recv
);
...
...
drivers/infiniband/hw/cxgb3/cxio_hal.c
浏览文件 @
06a91a02
...
...
@@ -725,9 +725,9 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
V_TPT_STAG_TYPE
(
type
)
|
V_TPT_PDID
(
pdid
));
BUG_ON
(
page_size
>=
28
);
tpt
.
flags_pagesize_qpid
=
cpu_to_be32
(
V_TPT_PERM
(
perm
)
|
F_TPT_MW_BIND_ENABLE
|
V_TPT_ADDR_TYPE
((
zbva
?
TPT_ZBTO
:
TPT_VATO
))
|
V_TPT_PAGE_SIZE
(
page_size
));
((
perm
&
TPT_MW_BIND
)
?
F_TPT_MW_BIND_ENABLE
:
0
)
|
V_TPT_ADDR_TYPE
((
zbva
?
TPT_ZBTO
:
TPT_VATO
))
|
V_TPT_PAGE_SIZE
(
page_size
));
tpt
.
rsvd_pbl_addr
=
reset_tpt_entry
?
0
:
cpu_to_be32
(
V_TPT_PBL_ADDR
(
PBL_OFF
(
rdev_p
,
pbl_addr
)
>>
3
));
tpt
.
len
=
cpu_to_be32
(
len
);
...
...
drivers/infiniband/hw/cxgb3/iwch_provider.c
浏览文件 @
06a91a02
...
...
@@ -1187,28 +1187,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
return
sprintf
(
buf
,
"%d
\n
"
,
iwch_dev
->
rdev
.
t3cdev_p
->
type
);
}
static
int
fw_supports_fastreg
(
struct
iwch_dev
*
iwch_dev
)
{
struct
ethtool_drvinfo
info
;
struct
net_device
*
lldev
=
iwch_dev
->
rdev
.
t3cdev_p
->
lldev
;
char
*
cp
,
*
next
;
unsigned
fw_maj
,
fw_min
;
rtnl_lock
();
lldev
->
ethtool_ops
->
get_drvinfo
(
lldev
,
&
info
);
rtnl_unlock
();
next
=
info
.
fw_version
+
1
;
cp
=
strsep
(
&
next
,
"."
);
sscanf
(
cp
,
"%i"
,
&
fw_maj
);
cp
=
strsep
(
&
next
,
"."
);
sscanf
(
cp
,
"%i"
,
&
fw_min
);
PDBG
(
"%s maj %u min %u
\n
"
,
__func__
,
fw_maj
,
fw_min
);
return
fw_maj
>
6
||
(
fw_maj
==
6
&&
fw_min
>
0
);
}
static
ssize_t
show_fw_ver
(
struct
device
*
dev
,
struct
device_attribute
*
attr
,
char
*
buf
)
{
struct
iwch_dev
*
iwch_dev
=
container_of
(
dev
,
struct
iwch_dev
,
...
...
@@ -1325,12 +1303,12 @@ int iwch_register_device(struct iwch_dev *dev)
memset
(
&
dev
->
ibdev
.
node_guid
,
0
,
sizeof
(
dev
->
ibdev
.
node_guid
));
memcpy
(
&
dev
->
ibdev
.
node_guid
,
dev
->
rdev
.
t3cdev_p
->
lldev
->
dev_addr
,
6
);
dev
->
ibdev
.
owner
=
THIS_MODULE
;
dev
->
device_cap_flags
=
IB_DEVICE_LOCAL_DMA_LKEY
|
IB_DEVICE_MEM_WINDOW
;
dev
->
device_cap_flags
=
IB_DEVICE_LOCAL_DMA_LKEY
|
IB_DEVICE_MEM_WINDOW
|
IB_DEVICE_MEM_MGT_EXTENSIONS
;
/* cxgb3 supports STag 0. */
dev
->
ibdev
.
local_dma_lkey
=
0
;
if
(
fw_supports_fastreg
(
dev
))
dev
->
device_cap_flags
|=
IB_DEVICE_MEM_MGT_EXTENSIONS
;
dev
->
ibdev
.
uverbs_cmd_mask
=
(
1ull
<<
IB_USER_VERBS_CMD_GET_CONTEXT
)
|
...
...
drivers/infiniband/hw/cxgb3/iwch_provider.h
浏览文件 @
06a91a02
...
...
@@ -293,9 +293,16 @@ static inline u32 iwch_ib_to_tpt_access(int acc)
return
(
acc
&
IB_ACCESS_REMOTE_WRITE
?
TPT_REMOTE_WRITE
:
0
)
|
(
acc
&
IB_ACCESS_REMOTE_READ
?
TPT_REMOTE_READ
:
0
)
|
(
acc
&
IB_ACCESS_LOCAL_WRITE
?
TPT_LOCAL_WRITE
:
0
)
|
(
acc
&
IB_ACCESS_MW_BIND
?
TPT_MW_BIND
:
0
)
|
TPT_LOCAL_READ
;
}
static
inline
u32
iwch_ib_to_tpt_bind_access
(
int
acc
)
{
return
(
acc
&
IB_ACCESS_REMOTE_WRITE
?
TPT_REMOTE_WRITE
:
0
)
|
(
acc
&
IB_ACCESS_REMOTE_READ
?
TPT_REMOTE_READ
:
0
);
}
enum
iwch_mmid_state
{
IWCH_STAG_STATE_VALID
,
IWCH_STAG_STATE_INVALID
...
...
drivers/infiniband/hw/cxgb3/iwch_qp.c
浏览文件 @
06a91a02
...
...
@@ -565,7 +565,7 @@ int iwch_bind_mw(struct ib_qp *qp,
wqe
->
bind
.
type
=
TPT_VATO
;
/* TBD: check perms */
wqe
->
bind
.
perms
=
iwch_ib_to_tpt_access
(
mw_bind
->
mw_access_flags
);
wqe
->
bind
.
perms
=
iwch_ib_to_tpt_
bind_
access
(
mw_bind
->
mw_access_flags
);
wqe
->
bind
.
mr_stag
=
cpu_to_be32
(
mw_bind
->
mr
->
lkey
);
wqe
->
bind
.
mw_stag
=
cpu_to_be32
(
mw
->
rkey
);
wqe
->
bind
.
mw_len
=
cpu_to_be32
(
mw_bind
->
length
);
...
...
@@ -879,20 +879,13 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
(
qhp
->
attr
.
mpa_attr
.
xmit_marker_enabled
<<
1
)
|
(
qhp
->
attr
.
mpa_attr
.
crc_enabled
<<
2
);
/*
* XXX - The IWCM doesn't quite handle getting these
* attrs set before going into RTS. For now, just turn
* them on always...
*/
#if 0
init_attr.qpcaps = qhp->attr.enableRdmaRead |
(qhp->attr.enableRdmaWrite << 1) |
(qhp->attr.enableBind << 2) |
(qhp->attr.enable_stag0_fastreg << 3) |
(qhp->attr.enable_stag0_fastreg << 4);
#else
init_attr
.
qpcaps
=
0x1f
;
#endif
init_attr
.
qpcaps
=
uP_RI_QP_RDMA_READ_ENABLE
|
uP_RI_QP_RDMA_WRITE_ENABLE
|
uP_RI_QP_BIND_ENABLE
;
if
(
!
qhp
->
ibqp
.
uobject
)
init_attr
.
qpcaps
|=
uP_RI_QP_STAG0_ENABLE
|
uP_RI_QP_FAST_REGISTER_ENABLE
;
init_attr
.
tcp_emss
=
qhp
->
ep
->
emss
;
init_attr
.
ord
=
qhp
->
attr
.
max_ord
;
init_attr
.
ird
=
qhp
->
attr
.
max_ird
;
...
...
@@ -900,8 +893,6 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
init_attr
.
qp_dma_size
=
(
1UL
<<
qhp
->
wq
.
size_log2
);
init_attr
.
rqe_count
=
iwch_rqes_posted
(
qhp
);
init_attr
.
flags
=
qhp
->
attr
.
mpa_attr
.
initiator
?
MPA_INITIATOR
:
0
;
if
(
!
qhp
->
ibqp
.
uobject
)
init_attr
.
flags
|=
PRIV_QP
;
if
(
peer2peer
)
{
init_attr
.
rtr_type
=
RTR_READ
;
if
(
init_attr
.
ord
==
0
&&
qhp
->
attr
.
mpa_attr
.
initiator
)
...
...
drivers/infiniband/hw/ipath/ipath_driver.c
浏览文件 @
06a91a02
...
...
@@ -1259,7 +1259,7 @@ void ipath_kreceive(struct ipath_portdata *pd)
*/
ipath_cdbg
(
ERRPKT
,
"Error Pkt, but no eflags! egrbuf"
" %x, len %x hdrq+%x rhf: %Lx
\n
"
,
etail
,
tlen
,
l
,
etail
,
tlen
,
l
,
(
unsigned
long
long
)
le64_to_cpu
(
*
(
__le64
*
)
rhf_addr
));
if
(
ipath_debug
&
__IPATH_ERRPKTDBG
)
{
u32
j
,
*
d
,
dw
=
rsize
-
2
;
...
...
@@ -1457,7 +1457,8 @@ static void ipath_reset_availshadow(struct ipath_devdata *dd)
0xaaaaaaaaaaaaaaaaULL
);
/* All BUSY bits in qword */
if
(
oldval
!=
dd
->
ipath_pioavailshadow
[
i
])
ipath_dbg
(
"shadow[%d] was %Lx, now %lx
\n
"
,
i
,
oldval
,
dd
->
ipath_pioavailshadow
[
i
]);
i
,
(
unsigned
long
long
)
oldval
,
dd
->
ipath_pioavailshadow
[
i
]);
}
spin_unlock_irqrestore
(
&
ipath_pioavail_lock
,
flags
);
}
...
...
drivers/infiniband/hw/ipath/ipath_iba7220.c
浏览文件 @
06a91a02
...
...
@@ -1032,7 +1032,7 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
ipath_cdbg
(
VERBOSE
,
"done: xgxs=%llx from %llx
\n
"
,
(
unsigned
long
long
)
ipath_read_kreg64
(
dd
,
dd
->
ipath_kregs
->
kr_xgxsconfig
),
prev_val
);
(
unsigned
long
long
)
prev_val
);
guid
=
be64_to_cpu
(
dd
->
ipath_guid
);
...
...
@@ -1042,7 +1042,8 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
ipath_dbg
(
"No GUID for heartbeat, faking %llx
\n
"
,
(
unsigned
long
long
)
guid
);
}
else
ipath_cdbg
(
VERBOSE
,
"Wrote %llX to HRTBT_GUID
\n
"
,
guid
);
ipath_cdbg
(
VERBOSE
,
"Wrote %llX to HRTBT_GUID
\n
"
,
(
unsigned
long
long
)
guid
);
ipath_write_kreg
(
dd
,
dd
->
ipath_kregs
->
kr_hrtbt_guid
,
guid
);
return
ret
;
}
...
...
@@ -2505,7 +2506,7 @@ static void autoneg_work(struct work_struct *work)
if
(
dd
->
ipath_flags
&
IPATH_IB_AUTONEG_INPROG
)
{
ipath_dbg
(
"Did not get to DDR INIT (%x) after %Lu msecs
\n
"
,
ipath_ib_state
(
dd
,
dd
->
ipath_lastibcstat
),
jiffies_to_msecs
(
jiffies
)
-
startms
);
(
unsigned
long
long
)
jiffies_to_msecs
(
jiffies
)
-
startms
);
dd
->
ipath_flags
&=
~
IPATH_IB_AUTONEG_INPROG
;
if
(
dd
->
ipath_autoneg_tries
==
IPATH_AUTONEG_TRIES
)
{
dd
->
ipath_flags
|=
IPATH_IB_AUTONEG_FAILED
;
...
...
drivers/infiniband/hw/ipath/ipath_intr.c
浏览文件 @
06a91a02
...
...
@@ -356,9 +356,10 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
dd
->
ipath_cregs
->
cr_iblinkerrrecovcnt
);
if
(
linkrecov
!=
dd
->
ipath_lastlinkrecov
)
{
ipath_dbg
(
"IB linkrecov up %Lx (%s %s) recov %Lu
\n
"
,
ibcs
,
ib_linkstate
(
dd
,
ibcs
),
(
unsigned
long
long
)
ibcs
,
ib_linkstate
(
dd
,
ibcs
),
ipath_ibcstatus_str
[
ltstate
],
linkrecov
);
(
unsigned
long
long
)
linkrecov
);
/* and no more until active again */
dd
->
ipath_lastlinkrecov
=
0
;
ipath_set_linkstate
(
dd
,
IPATH_IB_LINKDOWN
);
...
...
@@ -1118,9 +1119,11 @@ irqreturn_t ipath_intr(int irq, void *data)
if
(
unlikely
(
istat
&
~
dd
->
ipath_i_bitsextant
))
ipath_dev_err
(
dd
,
"interrupt with unknown interrupts %Lx set
\n
"
,
(
unsigned
long
long
)
istat
&
~
dd
->
ipath_i_bitsextant
);
else
if
(
istat
&
~
INFINIPATH_I_ERROR
)
/* errors do own printing */
ipath_cdbg
(
VERBOSE
,
"intr stat=0x%Lx
\n
"
,
istat
);
ipath_cdbg
(
VERBOSE
,
"intr stat=0x%Lx
\n
"
,
(
unsigned
long
long
)
istat
);
if
(
istat
&
INFINIPATH_I_ERROR
)
{
ipath_stats
.
sps_errints
++
;
...
...
@@ -1128,7 +1131,8 @@ irqreturn_t ipath_intr(int irq, void *data)
dd
->
ipath_kregs
->
kr_errorstatus
);
if
(
!
estat
)
dev_info
(
&
dd
->
pcidev
->
dev
,
"error interrupt (%Lx), "
"but no error bits set!
\n
"
,
istat
);
"but no error bits set!
\n
"
,
(
unsigned
long
long
)
istat
);
else
if
(
estat
==
-
1LL
)
/*
* should we try clearing all, or hope next read
...
...
drivers/infiniband/hw/ipath/ipath_verbs.c
浏览文件 @
06a91a02
...
...
@@ -1021,7 +1021,7 @@ static void sdma_complete(void *cookie, int status)
struct
ipath_verbs_txreq
*
tx
=
cookie
;
struct
ipath_qp
*
qp
=
tx
->
qp
;
struct
ipath_ibdev
*
dev
=
to_idev
(
qp
->
ibqp
.
device
);
unsigned
int
flags
;
unsigned
long
flags
;
enum
ib_wc_status
ibs
=
status
==
IPATH_SDMA_TXREQ_S_OK
?
IB_WC_SUCCESS
:
IB_WC_WR_FLUSH_ERR
;
...
...
@@ -1051,7 +1051,7 @@ static void sdma_complete(void *cookie, int status)
static
void
decrement_dma_busy
(
struct
ipath_qp
*
qp
)
{
unsigned
int
flags
;
unsigned
long
flags
;
if
(
atomic_dec_and_test
(
&
qp
->
s_dma_busy
))
{
spin_lock_irqsave
(
&
qp
->
s_lock
,
flags
);
...
...
@@ -1221,7 +1221,7 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp,
unsigned
flush_wc
;
u32
control
;
int
ret
;
unsigned
int
flags
;
unsigned
long
flags
;
piobuf
=
ipath_getpiobuf
(
dd
,
plen
,
NULL
);
if
(
unlikely
(
piobuf
==
NULL
))
{
...
...
drivers/infiniband/hw/mlx4/cq.c
浏览文件 @
06a91a02
...
...
@@ -515,17 +515,17 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
wc
->
vendor_err
=
cqe
->
vendor_err_syndrome
;
}
static
int
mlx4_ib_ipoib_csum_ok
(
__be
32
status
,
__be16
checksum
)
static
int
mlx4_ib_ipoib_csum_ok
(
__be
16
status
,
__be16
checksum
)
{
return
((
status
&
cpu_to_be
32
(
MLX4_CQE_IPOIB_STATUS_IPV4
|
MLX4_CQE_
IPOIB_STATUS_IPV4F
|
MLX4_CQE_
IPOIB_STATUS_IPV4OPT
|
MLX4_CQE_
IPOIB_STATUS_IPV6
|
MLX4_CQE_
IPOIB_
STATUS_IPOK
))
==
cpu_to_be
32
(
MLX4_CQE_IPOIB_STATUS_IPV4
|
MLX4_CQE_
IPOIB_STATUS_IPOK
))
&&
(
status
&
cpu_to_be
32
(
MLX4_CQE_IPOIB_STATUS_UDP
|
MLX4_CQE_
IPOIB_STATUS_TCP
))
&&
return
((
status
&
cpu_to_be
16
(
MLX4_CQE_STATUS_IPV4
|
MLX4_CQE_
STATUS_IPV4F
|
MLX4_CQE_
STATUS_IPV4OPT
|
MLX4_CQE_
STATUS_IPV6
|
MLX4_CQE_STATUS_IPOK
))
==
cpu_to_be
16
(
MLX4_CQE_STATUS_IPV4
|
MLX4_CQE_
STATUS_IPOK
))
&&
(
status
&
cpu_to_be
16
(
MLX4_CQE_STATUS_UDP
|
MLX4_CQE_
STATUS_TCP
))
&&
checksum
==
cpu_to_be16
(
0xffff
);
}
...
...
@@ -582,17 +582,17 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
}
if
(
!*
cur_qp
||
(
be32_to_cpu
(
cqe
->
my_qpn
)
&
0xffffff
)
!=
(
*
cur_qp
)
->
mqp
.
qpn
)
{
(
be32_to_cpu
(
cqe
->
vlan_my_qpn
)
&
MLX4_CQE_QPN_MASK
)
!=
(
*
cur_qp
)
->
mqp
.
qpn
)
{
/*
* We do not have to take the QP table lock here,
* because CQs will be locked while QPs are removed
* from the table.
*/
mqp
=
__mlx4_qp_lookup
(
to_mdev
(
cq
->
ibcq
.
device
)
->
dev
,
be32_to_cpu
(
cqe
->
my_qpn
));
be32_to_cpu
(
cqe
->
vlan_
my_qpn
));
if
(
unlikely
(
!
mqp
))
{
printk
(
KERN_WARNING
"CQ %06x with entry for unknown QPN %06x
\n
"
,
cq
->
mcq
.
cqn
,
be32_to_cpu
(
cqe
->
my_qpn
)
&
0xffffff
);
cq
->
mcq
.
cqn
,
be32_to_cpu
(
cqe
->
vlan_my_qpn
)
&
MLX4_CQE_QPN_MASK
);
return
-
EINVAL
;
}
...
...
@@ -692,14 +692,13 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
}
wc
->
slid
=
be16_to_cpu
(
cqe
->
rlid
);
wc
->
sl
=
cqe
->
sl
>>
4
;
wc
->
sl
=
be16_to_cpu
(
cqe
->
sl_vid
>>
12
)
;
g_mlpath_rqpn
=
be32_to_cpu
(
cqe
->
g_mlpath_rqpn
);
wc
->
src_qp
=
g_mlpath_rqpn
&
0xffffff
;
wc
->
dlid_path_bits
=
(
g_mlpath_rqpn
>>
24
)
&
0x7f
;
wc
->
wc_flags
|=
g_mlpath_rqpn
&
0x80000000
?
IB_WC_GRH
:
0
;
wc
->
pkey_index
=
be32_to_cpu
(
cqe
->
immed_rss_invalid
)
&
0x7f
;
wc
->
csum_ok
=
mlx4_ib_ipoib_csum_ok
(
cqe
->
ipoib_status
,
cqe
->
checksum
);
wc
->
csum_ok
=
mlx4_ib_ipoib_csum_ok
(
cqe
->
status
,
cqe
->
checksum
);
}
return
0
;
...
...
@@ -767,7 +766,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
*/
while
((
int
)
--
prod_index
-
(
int
)
cq
->
mcq
.
cons_index
>=
0
)
{
cqe
=
get_cqe
(
cq
,
prod_index
&
cq
->
ibcq
.
cqe
);
if
((
be32_to_cpu
(
cqe
->
my_qpn
)
&
0xffffff
)
==
qpn
)
{
if
((
be32_to_cpu
(
cqe
->
vlan_my_qpn
)
&
MLX4_CQE_QPN_MASK
)
==
qpn
)
{
if
(
srq
&&
!
(
cqe
->
owner_sr_opcode
&
MLX4_CQE_IS_SEND_MASK
))
mlx4_ib_free_srq_wqe
(
srq
,
be16_to_cpu
(
cqe
->
wqe_index
));
++
nfreed
;
...
...
drivers/infiniband/hw/mlx4/qp.c
浏览文件 @
06a91a02
...
...
@@ -902,7 +902,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context
->
mtu_msgmax
=
(
IB_MTU_4096
<<
5
)
|
ilog2
(
dev
->
dev
->
caps
.
max_gso_sz
);
else
context
->
mtu_msgmax
=
(
IB_MTU_4096
<<
5
)
|
1
1
;
context
->
mtu_msgmax
=
(
IB_MTU_4096
<<
5
)
|
1
2
;
}
else
if
(
attr_mask
&
IB_QP_PATH_MTU
)
{
if
(
attr
->
path_mtu
<
IB_MTU_256
||
attr
->
path_mtu
>
IB_MTU_4096
)
{
printk
(
KERN_ERR
"path MTU (%u) is invalid
\n
"
,
...
...
drivers/infiniband/ulp/ipoib/ipoib_cm.c
浏览文件 @
06a91a02
...
...
@@ -337,7 +337,7 @@ static void ipoib_cm_init_rx_wr(struct net_device *dev,
sge
[
i
].
length
=
PAGE_SIZE
;
wr
->
next
=
NULL
;
wr
->
sg_list
=
priv
->
cm
.
rx_
sge
;
wr
->
sg_list
=
sge
;
wr
->
num_sge
=
priv
->
cm
.
num_frags
;
}
...
...
include/linux/mlx4/cq.h
浏览文件 @
06a91a02
...
...
@@ -39,17 +39,18 @@
#include <linux/mlx4/doorbell.h>
struct
mlx4_cqe
{
__be32
my_qpn
;
__be32
vlan_
my_qpn
;
__be32
immed_rss_invalid
;
__be32
g_mlpath_rqpn
;
u8
sl
;
u8
reserved1
;
__be16
sl_vid
;
__be16
rlid
;
__be32
ipoib_status
;
__be16
status
;
u8
ipv6_ext_mask
;
u8
badfcs_enc
;
__be32
byte_cnt
;
__be16
wqe_index
;
__be16
checksum
;
u8
reserved
2
[
3
];
u8
reserved
[
3
];
u8
owner_sr_opcode
;
};
...
...
@@ -63,6 +64,11 @@ struct mlx4_err_cqe {
u8
owner_sr_opcode
;
};
enum
{
MLX4_CQE_VLAN_PRESENT_MASK
=
1
<<
29
,
MLX4_CQE_QPN_MASK
=
0xffffff
,
};
enum
{
MLX4_CQE_OWNER_MASK
=
0x80
,
MLX4_CQE_IS_SEND_MASK
=
0x40
,
...
...
@@ -86,13 +92,19 @@ enum {
};
enum
{
MLX4_CQE_IPOIB_STATUS_IPV4
=
1
<<
22
,
MLX4_CQE_IPOIB_STATUS_IPV4F
=
1
<<
23
,
MLX4_CQE_IPOIB_STATUS_IPV6
=
1
<<
24
,
MLX4_CQE_IPOIB_STATUS_IPV4OPT
=
1
<<
25
,
MLX4_CQE_IPOIB_STATUS_TCP
=
1
<<
26
,
MLX4_CQE_IPOIB_STATUS_UDP
=
1
<<
27
,
MLX4_CQE_IPOIB_STATUS_IPOK
=
1
<<
28
,
MLX4_CQE_STATUS_IPV4
=
1
<<
6
,
MLX4_CQE_STATUS_IPV4F
=
1
<<
7
,
MLX4_CQE_STATUS_IPV6
=
1
<<
8
,
MLX4_CQE_STATUS_IPV4OPT
=
1
<<
9
,
MLX4_CQE_STATUS_TCP
=
1
<<
10
,
MLX4_CQE_STATUS_UDP
=
1
<<
11
,
MLX4_CQE_STATUS_IPOK
=
1
<<
12
,
};
enum
{
MLX4_CQE_LLC
=
1
,
MLX4_CQE_SNAP
=
1
<<
1
,
MLX4_CQE_BAD_FCS
=
1
<<
4
,
};
static
inline
void
mlx4_cq_arm
(
struct
mlx4_cq
*
cq
,
u32
cmd
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录