Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
a5f66725
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
a5f66725
编写于
7月 27, 2017
作者:
D
Doug Ledford
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'misc' into k.o/for-next
上级
f55c1e66
67cbe353
变更
43
隐藏空白更改
内联
并排
Showing
43 changed file
with
693 addition
and
279 deletion
+693
-279
drivers/infiniband/core/cma.c
drivers/infiniband/core/cma.c
+7
-2
drivers/infiniband/core/roce_gid_mgmt.c
drivers/infiniband/core/roce_gid_mgmt.c
+2
-0
drivers/infiniband/core/verbs.c
drivers/infiniband/core/verbs.c
+55
-0
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
+25
-63
drivers/infiniband/hw/bnxt_re/ib_verbs.h
drivers/infiniband/hw/bnxt_re/ib_verbs.h
+0
-3
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/main.c
+49
-9
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
+380
-85
drivers/infiniband/hw/bnxt_re/qplib_fp.h
drivers/infiniband/hw/bnxt_re/qplib_fp.h
+24
-1
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+25
-1
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+9
-1
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/bnxt_re/qplib_res.c
+10
-0
drivers/infiniband/hw/bnxt_re/qplib_res.h
drivers/infiniband/hw/bnxt_re/qplib_res.h
+2
-0
drivers/infiniband/hw/bnxt_re/qplib_sp.c
drivers/infiniband/hw/bnxt_re/qplib_sp.c
+61
-16
drivers/infiniband/hw/bnxt_re/qplib_sp.h
drivers/infiniband/hw/bnxt_re/qplib_sp.h
+2
-0
drivers/infiniband/hw/bnxt_re/roce_hsi.h
drivers/infiniband/hw/bnxt_re/roce_hsi.h
+2
-2
drivers/infiniband/hw/cxgb3/iwch.c
drivers/infiniband/hw/cxgb3/iwch.c
+0
-1
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/device.c
+0
-1
drivers/infiniband/hw/hfi1/driver.c
drivers/infiniband/hw/hfi1/driver.c
+0
-1
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/qp.c
+5
-1
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_main.c
+0
-1
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/main.c
+0
-1
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/main.c
+1
-2
drivers/infiniband/hw/mthca/mthca_main.c
drivers/infiniband/hw/mthca/mthca_main.c
+0
-1
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes.c
+0
-1
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
+0
-1
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/main.c
+1
-1
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr.h
+2
-1
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/qedr/verbs.c
+3
-0
drivers/infiniband/hw/qib/qib_driver.c
drivers/infiniband/hw/qib/qib_driver.c
+0
-1
drivers/infiniband/hw/qib/qib_mad.c
drivers/infiniband/hw/qib/qib_mad.c
+0
-2
drivers/infiniband/hw/usnic/usnic_ib_main.c
drivers/infiniband/hw/usnic/usnic_ib_main.c
+1
-1
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+16
-25
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+1
-0
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+0
-1
drivers/infiniband/sw/rxe/rxe.c
drivers/infiniband/sw/rxe/rxe.c
+0
-1
drivers/infiniband/sw/rxe/rxe_verbs.c
drivers/infiniband/sw/rxe/rxe_verbs.c
+6
-47
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
+0
-1
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.c
+0
-1
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.c
+0
-1
drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+0
-1
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srp/ib_srp.c
+0
-1
include/rdma/ib_verbs.h
include/rdma/ib_verbs.h
+1
-0
include/uapi/rdma/qedr-abi.h
include/uapi/rdma/qedr-abi.h
+3
-0
未找到文件。
drivers/infiniband/core/cma.c
浏览文件 @
a5f66725
...
...
@@ -72,6 +72,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18
#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
static
const
char
*
const
cma_events
[]
=
{
[
RDMA_CM_EVENT_ADDR_RESOLVED
]
=
"address resolved"
,
...
...
@@ -4281,8 +4282,12 @@ static void cma_add_one(struct ib_device *device)
for
(
i
=
rdma_start_port
(
device
);
i
<=
rdma_end_port
(
device
);
i
++
)
{
supported_gids
=
roce_gid_type_mask_support
(
device
,
i
);
WARN_ON
(
!
supported_gids
);
cma_dev
->
default_gid_type
[
i
-
rdma_start_port
(
device
)]
=
find_first_bit
(
&
supported_gids
,
BITS_PER_LONG
);
if
(
supported_gids
&
CMA_PREFERRED_ROCE_GID_TYPE
)
cma_dev
->
default_gid_type
[
i
-
rdma_start_port
(
device
)]
=
CMA_PREFERRED_ROCE_GID_TYPE
;
else
cma_dev
->
default_gid_type
[
i
-
rdma_start_port
(
device
)]
=
find_first_bit
(
&
supported_gids
,
BITS_PER_LONG
);
cma_dev
->
default_roce_tos
[
i
-
rdma_start_port
(
device
)]
=
0
;
}
...
...
drivers/infiniband/core/roce_gid_mgmt.c
浏览文件 @
a5f66725
...
...
@@ -44,6 +44,8 @@
static
struct
workqueue_struct
*
gid_cache_wq
;
static
struct
workqueue_struct
*
gid_cache_wq
;
enum
gid_op_type
{
GID_DEL
=
0
,
GID_ADD
...
...
drivers/infiniband/core/verbs.c
浏览文件 @
a5f66725
...
...
@@ -1314,6 +1314,61 @@ int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
}
EXPORT_SYMBOL
(
ib_modify_qp_with_udata
);
int
ib_get_eth_speed
(
struct
ib_device
*
dev
,
u8
port_num
,
u8
*
speed
,
u8
*
width
)
{
int
rc
;
u32
netdev_speed
;
struct
net_device
*
netdev
;
struct
ethtool_link_ksettings
lksettings
;
if
(
rdma_port_get_link_layer
(
dev
,
port_num
)
!=
IB_LINK_LAYER_ETHERNET
)
return
-
EINVAL
;
if
(
!
dev
->
get_netdev
)
return
-
EOPNOTSUPP
;
netdev
=
dev
->
get_netdev
(
dev
,
port_num
);
if
(
!
netdev
)
return
-
ENODEV
;
rtnl_lock
();
rc
=
__ethtool_get_link_ksettings
(
netdev
,
&
lksettings
);
rtnl_unlock
();
dev_put
(
netdev
);
if
(
!
rc
)
{
netdev_speed
=
lksettings
.
base
.
speed
;
}
else
{
netdev_speed
=
SPEED_1000
;
pr_warn
(
"%s speed is unknown, defaulting to %d
\n
"
,
netdev
->
name
,
netdev_speed
);
}
if
(
netdev_speed
<=
SPEED_1000
)
{
*
width
=
IB_WIDTH_1X
;
*
speed
=
IB_SPEED_SDR
;
}
else
if
(
netdev_speed
<=
SPEED_10000
)
{
*
width
=
IB_WIDTH_1X
;
*
speed
=
IB_SPEED_FDR10
;
}
else
if
(
netdev_speed
<=
SPEED_20000
)
{
*
width
=
IB_WIDTH_4X
;
*
speed
=
IB_SPEED_DDR
;
}
else
if
(
netdev_speed
<=
SPEED_25000
)
{
*
width
=
IB_WIDTH_1X
;
*
speed
=
IB_SPEED_EDR
;
}
else
if
(
netdev_speed
<=
SPEED_40000
)
{
*
width
=
IB_WIDTH_4X
;
*
speed
=
IB_SPEED_FDR10
;
}
else
{
*
width
=
IB_WIDTH_4X
;
*
speed
=
IB_SPEED_EDR
;
}
return
0
;
}
EXPORT_SYMBOL
(
ib_get_eth_speed
);
int
ib_modify_qp
(
struct
ib_qp
*
qp
,
struct
ib_qp_attr
*
qp_attr
,
int
qp_attr_mask
)
...
...
drivers/infiniband/hw/bnxt_re/ib_verbs.c
浏览文件 @
a5f66725
...
...
@@ -223,50 +223,6 @@ int bnxt_re_modify_device(struct ib_device *ibdev,
return
0
;
}
static
void
__to_ib_speed_width
(
struct
net_device
*
netdev
,
u8
*
speed
,
u8
*
width
)
{
struct
ethtool_link_ksettings
lksettings
;
u32
espeed
;
if
(
netdev
->
ethtool_ops
&&
netdev
->
ethtool_ops
->
get_link_ksettings
)
{
memset
(
&
lksettings
,
0
,
sizeof
(
lksettings
));
rtnl_lock
();
netdev
->
ethtool_ops
->
get_link_ksettings
(
netdev
,
&
lksettings
);
rtnl_unlock
();
espeed
=
lksettings
.
base
.
speed
;
}
else
{
espeed
=
SPEED_UNKNOWN
;
}
switch
(
espeed
)
{
case
SPEED_1000
:
*
speed
=
IB_SPEED_SDR
;
*
width
=
IB_WIDTH_1X
;
break
;
case
SPEED_10000
:
*
speed
=
IB_SPEED_QDR
;
*
width
=
IB_WIDTH_1X
;
break
;
case
SPEED_20000
:
*
speed
=
IB_SPEED_DDR
;
*
width
=
IB_WIDTH_4X
;
break
;
case
SPEED_25000
:
*
speed
=
IB_SPEED_EDR
;
*
width
=
IB_WIDTH_1X
;
break
;
case
SPEED_40000
:
*
speed
=
IB_SPEED_QDR
;
*
width
=
IB_WIDTH_4X
;
break
;
case
SPEED_50000
:
break
;
default:
*
speed
=
IB_SPEED_SDR
;
*
width
=
IB_WIDTH_1X
;
break
;
}
}
/* Port */
int
bnxt_re_query_port
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_attr
*
port_attr
)
...
...
@@ -308,25 +264,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
* IB stack to avoid race in the NETDEV_UNREG path
*/
if
(
test_bit
(
BNXT_RE_FLAG_IBDEV_REGISTERED
,
&
rdev
->
flags
))
__to_ib_speed_width
(
rdev
->
netdev
,
&
port_attr
->
active_speed
,
&
port_attr
->
active_width
);
return
0
;
}
int
bnxt_re_modify_port
(
struct
ib_device
*
ibdev
,
u8
port_num
,
int
port_modify_mask
,
struct
ib_port_modify
*
port_modify
)
{
switch
(
port_modify_mask
)
{
case
IB_PORT_SHUTDOWN
:
break
;
case
IB_PORT_INIT_TYPE
:
break
;
case
IB_PORT_RESET_QKEY_CNTR
:
break
;
default:
break
;
}
if
(
!
ib_get_eth_speed
(
ibdev
,
port_num
,
&
port_attr
->
active_speed
,
&
port_attr
->
active_width
))
return
-
EINVAL
;
return
0
;
}
...
...
@@ -846,6 +786,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
struct
bnxt_re_dev
*
rdev
=
qp
->
rdev
;
int
rc
;
bnxt_qplib_del_flush_qp
(
&
qp
->
qplib_qp
);
rc
=
bnxt_qplib_destroy_qp
(
&
rdev
->
qplib_res
,
&
qp
->
qplib_qp
);
if
(
rc
)
{
dev_err
(
rdev_to_dev
(
rdev
),
"Failed to destroy HW QP"
);
...
...
@@ -860,6 +801,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
return
rc
;
}
bnxt_qplib_del_flush_qp
(
&
qp
->
qplib_qp
);
rc
=
bnxt_qplib_destroy_qp
(
&
rdev
->
qplib_res
,
&
rdev
->
qp1_sqp
->
qplib_qp
);
if
(
rc
)
{
...
...
@@ -1404,6 +1346,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
}
qp
->
qplib_qp
.
modify_flags
|=
CMDQ_MODIFY_QP_MODIFY_MASK_STATE
;
qp
->
qplib_qp
.
state
=
__from_ib_qp_state
(
qp_attr
->
qp_state
);
if
(
!
qp
->
sumem
&&
qp
->
qplib_qp
.
state
==
CMDQ_MODIFY_QP_NEW_STATE_ERR
)
{
dev_dbg
(
rdev_to_dev
(
rdev
),
"Move QP = %p to flush list
\n
"
,
qp
);
bnxt_qplib_add_flush_qp
(
&
qp
->
qplib_qp
);
}
if
(
!
qp
->
sumem
&&
qp
->
qplib_qp
.
state
==
CMDQ_MODIFY_QP_NEW_STATE_RESET
)
{
dev_dbg
(
rdev_to_dev
(
rdev
),
"Move QP = %p out of flush list
\n
"
,
qp
);
bnxt_qplib_del_flush_qp
(
&
qp
->
qplib_qp
);
}
}
if
(
qp_attr_mask
&
IB_QP_EN_SQD_ASYNC_NOTIFY
)
{
qp
->
qplib_qp
.
modify_flags
|=
...
...
@@ -2414,6 +2371,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
}
cq
->
qplib_cq
.
max_wqe
=
entries
;
cq
->
qplib_cq
.
cnq_hw_ring_id
=
rdev
->
nq
.
ring_id
;
cq
->
qplib_cq
.
nq
=
&
rdev
->
nq
;
rc
=
bnxt_qplib_create_cq
(
&
rdev
->
qplib_res
,
&
cq
->
qplib_cq
);
if
(
rc
)
{
...
...
@@ -2921,6 +2879,10 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
sq
->
send_phantom
=
false
;
}
}
if
(
ncqe
<
budget
)
ncqe
+=
bnxt_qplib_process_flush_list
(
&
cq
->
qplib_cq
,
cqe
+
ncqe
,
budget
-
ncqe
);
if
(
!
ncqe
)
break
;
...
...
drivers/infiniband/hw/bnxt_re/ib_verbs.h
浏览文件 @
a5f66725
...
...
@@ -141,9 +141,6 @@ int bnxt_re_modify_device(struct ib_device *ibdev,
struct
ib_device_modify
*
device_modify
);
int
bnxt_re_query_port
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_attr
*
port_attr
);
int
bnxt_re_modify_port
(
struct
ib_device
*
ibdev
,
u8
port_num
,
int
port_modify_mask
,
struct
ib_port_modify
*
port_modify
);
int
bnxt_re_get_port_immutable
(
struct
ib_device
*
ibdev
,
u8
port_num
,
struct
ib_port_immutable
*
immutable
);
int
bnxt_re_query_pkey
(
struct
ib_device
*
ibdev
,
u8
port_num
,
...
...
drivers/infiniband/hw/bnxt_re/main.c
浏览文件 @
a5f66725
...
...
@@ -70,7 +70,6 @@ static char version[] =
MODULE_AUTHOR
(
"Eddie Wai <eddie.wai@broadcom.com>"
);
MODULE_DESCRIPTION
(
BNXT_RE_DESC
" Driver"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
ROCE_DRV_MODULE_VERSION
);
/* globals */
static
struct
list_head
bnxt_re_dev_list
=
LIST_HEAD_INIT
(
bnxt_re_dev_list
);
...
...
@@ -474,7 +473,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev
->
modify_device
=
bnxt_re_modify_device
;
ibdev
->
query_port
=
bnxt_re_query_port
;
ibdev
->
modify_port
=
bnxt_re_modify_port
;
ibdev
->
get_port_immutable
=
bnxt_re_get_port_immutable
;
ibdev
->
query_pkey
=
bnxt_re_query_pkey
;
ibdev
->
query_gid
=
bnxt_re_query_gid
;
...
...
@@ -835,6 +833,42 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
mutex_unlock
(
&
rdev
->
qp_lock
);
}
static
int
bnxt_re_update_gid
(
struct
bnxt_re_dev
*
rdev
)
{
struct
bnxt_qplib_sgid_tbl
*
sgid_tbl
=
&
rdev
->
qplib_res
.
sgid_tbl
;
struct
bnxt_qplib_gid
gid
;
u16
gid_idx
,
index
;
int
rc
=
0
;
if
(
!
test_bit
(
BNXT_RE_FLAG_IBDEV_REGISTERED
,
&
rdev
->
flags
))
return
0
;
if
(
!
sgid_tbl
)
{
dev_err
(
rdev_to_dev
(
rdev
),
"QPLIB: SGID table not allocated"
);
return
-
EINVAL
;
}
for
(
index
=
0
;
index
<
sgid_tbl
->
active
;
index
++
)
{
gid_idx
=
sgid_tbl
->
hw_id
[
index
];
if
(
!
memcmp
(
&
sgid_tbl
->
tbl
[
index
],
&
bnxt_qplib_gid_zero
,
sizeof
(
bnxt_qplib_gid_zero
)))
continue
;
/* need to modify the VLAN enable setting of non VLAN GID only
* as setting is done for VLAN GID while adding GID
*/
if
(
sgid_tbl
->
vlan
[
index
])
continue
;
memcpy
(
&
gid
,
&
sgid_tbl
->
tbl
[
index
],
sizeof
(
gid
));
rc
=
bnxt_qplib_update_sgid
(
sgid_tbl
,
&
gid
,
gid_idx
,
rdev
->
qplib_res
.
netdev
->
dev_addr
);
}
return
rc
;
}
static
u32
bnxt_re_get_priority_mask
(
struct
bnxt_re_dev
*
rdev
)
{
u32
prio_map
=
0
,
tmp_map
=
0
;
...
...
@@ -854,8 +888,6 @@ static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
tmp_map
=
dcb_ieee_getapp_mask
(
netdev
,
&
app
);
prio_map
|=
tmp_map
;
if
(
!
prio_map
)
prio_map
=
-
EFAULT
;
return
prio_map
;
}
...
...
@@ -881,10 +913,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
int
rc
;
/* Get priority for roce */
rc
=
bnxt_re_get_priority_mask
(
rdev
);
if
(
rc
<
0
)
return
rc
;
prio_map
=
(
u8
)
rc
;
prio_map
=
bnxt_re_get_priority_mask
(
rdev
);
if
(
prio_map
==
rdev
->
cur_prio_map
)
return
0
;
...
...
@@ -906,6 +935,16 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return
rc
;
}
/* Actual priorities are not programmed as they are already
* done by L2 driver; just enable or disable priority vlan tagging
*/
if
((
prio_map
==
0
&&
rdev
->
qplib_res
.
prio
)
||
(
prio_map
!=
0
&&
!
rdev
->
qplib_res
.
prio
))
{
rdev
->
qplib_res
.
prio
=
prio_map
?
true
:
false
;
bnxt_re_update_gid
(
rdev
);
}
return
0
;
}
...
...
@@ -998,7 +1037,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
/* Establish RCFW Communication Channel to initialize the context
* memory for the function and all child VFs
*/
rc
=
bnxt_qplib_alloc_rcfw_channel
(
rdev
->
en_dev
->
pdev
,
&
rdev
->
rcfw
);
rc
=
bnxt_qplib_alloc_rcfw_channel
(
rdev
->
en_dev
->
pdev
,
&
rdev
->
rcfw
,
BNXT_RE_MAX_QPC_COUNT
);
if
(
rc
)
goto
fail
;
...
...
drivers/infiniband/hw/bnxt_re/qplib_fp.c
浏览文件 @
a5f66725
...
...
@@ -51,6 +51,168 @@
#include "qplib_fp.h"
static
void
bnxt_qplib_arm_cq_enable
(
struct
bnxt_qplib_cq
*
cq
);
static
void
__clean_cq
(
struct
bnxt_qplib_cq
*
cq
,
u64
qp
);
static
void
bnxt_qplib_cancel_phantom_processing
(
struct
bnxt_qplib_qp
*
qp
)
{
qp
->
sq
.
condition
=
false
;
qp
->
sq
.
send_phantom
=
false
;
qp
->
sq
.
single
=
false
;
}
/* Flush list */
static
void
__bnxt_qplib_add_flush_qp
(
struct
bnxt_qplib_qp
*
qp
)
{
struct
bnxt_qplib_cq
*
scq
,
*
rcq
;
scq
=
qp
->
scq
;
rcq
=
qp
->
rcq
;
if
(
!
qp
->
sq
.
flushed
)
{
dev_dbg
(
&
scq
->
hwq
.
pdev
->
dev
,
"QPLIB: FP: Adding to SQ Flush list = %p"
,
qp
);
bnxt_qplib_cancel_phantom_processing
(
qp
);
list_add_tail
(
&
qp
->
sq_flush
,
&
scq
->
sqf_head
);
qp
->
sq
.
flushed
=
true
;
}
if
(
!
qp
->
srq
)
{
if
(
!
qp
->
rq
.
flushed
)
{
dev_dbg
(
&
rcq
->
hwq
.
pdev
->
dev
,
"QPLIB: FP: Adding to RQ Flush list = %p"
,
qp
);
list_add_tail
(
&
qp
->
rq_flush
,
&
rcq
->
rqf_head
);
qp
->
rq
.
flushed
=
true
;
}
}
}
void
bnxt_qplib_acquire_cq_locks
(
struct
bnxt_qplib_qp
*
qp
,
unsigned
long
*
flags
)
__acquires
(
&
qp
->
scq
->
hwq
.
lock
)
__acquires
(
&
qp
->
rcq
->
hwq
.
lock
)
{
spin_lock_irqsave
(
&
qp
->
scq
->
hwq
.
lock
,
*
flags
);
if
(
qp
->
scq
==
qp
->
rcq
)
__acquire
(
&
qp
->
rcq
->
hwq
.
lock
);
else
spin_lock
(
&
qp
->
rcq
->
hwq
.
lock
);
}
void
bnxt_qplib_release_cq_locks
(
struct
bnxt_qplib_qp
*
qp
,
unsigned
long
*
flags
)
__releases
(
&
qp
->
scq
->
hwq
.
lock
)
__releases
(
&
qp
->
rcq
->
hwq
.
lock
)
{
if
(
qp
->
scq
==
qp
->
rcq
)
__release
(
&
qp
->
rcq
->
hwq
.
lock
);
else
spin_unlock
(
&
qp
->
rcq
->
hwq
.
lock
);
spin_unlock_irqrestore
(
&
qp
->
scq
->
hwq
.
lock
,
*
flags
);
}
static
struct
bnxt_qplib_cq
*
bnxt_qplib_find_buddy_cq
(
struct
bnxt_qplib_qp
*
qp
,
struct
bnxt_qplib_cq
*
cq
)
{
struct
bnxt_qplib_cq
*
buddy_cq
=
NULL
;
if
(
qp
->
scq
==
qp
->
rcq
)
buddy_cq
=
NULL
;
else
if
(
qp
->
scq
==
cq
)
buddy_cq
=
qp
->
rcq
;
else
buddy_cq
=
qp
->
scq
;
return
buddy_cq
;
}
static
void
bnxt_qplib_lock_buddy_cq
(
struct
bnxt_qplib_qp
*
qp
,
struct
bnxt_qplib_cq
*
cq
)
__acquires
(
&
buddy_cq
->
hwq
.
lock
)
{
struct
bnxt_qplib_cq
*
buddy_cq
=
NULL
;
buddy_cq
=
bnxt_qplib_find_buddy_cq
(
qp
,
cq
);
if
(
!
buddy_cq
)
__acquire
(
&
cq
->
hwq
.
lock
);
else
spin_lock
(
&
buddy_cq
->
hwq
.
lock
);
}
static
void
bnxt_qplib_unlock_buddy_cq
(
struct
bnxt_qplib_qp
*
qp
,
struct
bnxt_qplib_cq
*
cq
)
__releases
(
&
buddy_cq
->
hwq
.
lock
)
{
struct
bnxt_qplib_cq
*
buddy_cq
=
NULL
;
buddy_cq
=
bnxt_qplib_find_buddy_cq
(
qp
,
cq
);
if
(
!
buddy_cq
)
__release
(
&
cq
->
hwq
.
lock
);
else
spin_unlock
(
&
buddy_cq
->
hwq
.
lock
);
}
void
bnxt_qplib_add_flush_qp
(
struct
bnxt_qplib_qp
*
qp
)
{
unsigned
long
flags
;
bnxt_qplib_acquire_cq_locks
(
qp
,
&
flags
);
__bnxt_qplib_add_flush_qp
(
qp
);
bnxt_qplib_release_cq_locks
(
qp
,
&
flags
);
}
static
void
__bnxt_qplib_del_flush_qp
(
struct
bnxt_qplib_qp
*
qp
)
{
struct
bnxt_qplib_cq
*
scq
,
*
rcq
;
scq
=
qp
->
scq
;
rcq
=
qp
->
rcq
;
if
(
qp
->
sq
.
flushed
)
{
qp
->
sq
.
flushed
=
false
;
list_del
(
&
qp
->
sq_flush
);
}
if
(
!
qp
->
srq
)
{
if
(
qp
->
rq
.
flushed
)
{
qp
->
rq
.
flushed
=
false
;
list_del
(
&
qp
->
rq_flush
);
}
}
}
void
bnxt_qplib_del_flush_qp
(
struct
bnxt_qplib_qp
*
qp
)
{
unsigned
long
flags
;
bnxt_qplib_acquire_cq_locks
(
qp
,
&
flags
);
__clean_cq
(
qp
->
scq
,
(
u64
)(
unsigned
long
)
qp
);
qp
->
sq
.
hwq
.
prod
=
0
;
qp
->
sq
.
hwq
.
cons
=
0
;
__clean_cq
(
qp
->
rcq
,
(
u64
)(
unsigned
long
)
qp
);
qp
->
rq
.
hwq
.
prod
=
0
;
qp
->
rq
.
hwq
.
cons
=
0
;
__bnxt_qplib_del_flush_qp
(
qp
);
bnxt_qplib_release_cq_locks
(
qp
,
&
flags
);
}
static
void
bnxt_qpn_cqn_sched_task
(
struct
work_struct
*
work
)
{
struct
bnxt_qplib_nq_work
*
nq_work
=
container_of
(
work
,
struct
bnxt_qplib_nq_work
,
work
);
struct
bnxt_qplib_cq
*
cq
=
nq_work
->
cq
;
struct
bnxt_qplib_nq
*
nq
=
nq_work
->
nq
;
if
(
cq
&&
nq
)
{
spin_lock_bh
(
&
cq
->
compl_lock
);
if
(
atomic_read
(
&
cq
->
arm_state
)
&&
nq
->
cqn_handler
)
{
dev_dbg
(
&
nq
->
pdev
->
dev
,
"%s:Trigger cq = %p event nq = %p
\n
"
,
__func__
,
cq
,
nq
);
nq
->
cqn_handler
(
nq
,
cq
);
}
spin_unlock_bh
(
&
cq
->
compl_lock
);
}
kfree
(
nq_work
);
}
static
void
bnxt_qplib_free_qp_hdr_buf
(
struct
bnxt_qplib_res
*
res
,
struct
bnxt_qplib_qp
*
qp
)
...
...
@@ -119,6 +281,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
struct
bnxt_qplib_nq
*
nq
=
(
struct
bnxt_qplib_nq
*
)
data
;
struct
bnxt_qplib_hwq
*
hwq
=
&
nq
->
hwq
;
struct
nq_base
*
nqe
,
**
nq_ptr
;
struct
bnxt_qplib_cq
*
cq
;
int
num_cqne_processed
=
0
;
u32
sw_cons
,
raw_cons
;
u16
type
;
...
...
@@ -143,15 +306,17 @@ static void bnxt_qplib_service_nq(unsigned long data)
q_handle
=
le32_to_cpu
(
nqcne
->
cq_handle_low
);
q_handle
|=
(
u64
)
le32_to_cpu
(
nqcne
->
cq_handle_high
)
<<
32
;
bnxt_qplib_arm_cq_enable
((
struct
bnxt_qplib_cq
*
)
((
unsigned
long
)
q_handle
));
if
(
!
nq
->
cqn_handler
(
nq
,
(
struct
bnxt_qplib_cq
*
)
((
unsigned
long
)
q_handle
)))
cq
=
(
struct
bnxt_qplib_cq
*
)(
unsigned
long
)
q_handle
;
bnxt_qplib_arm_cq_enable
(
cq
);
spin_lock_bh
(
&
cq
->
compl_lock
);
atomic_set
(
&
cq
->
arm_state
,
0
);
if
(
!
nq
->
cqn_handler
(
nq
,
(
cq
)))
num_cqne_processed
++
;
else
dev_warn
(
&
nq
->
pdev
->
dev
,
"QPLIB: cqn - type 0x%x not handled"
,
type
);
spin_unlock_bh
(
&
cq
->
compl_lock
);
break
;
}
case
NQ_BASE_TYPE_DBQ_EVENT
:
...
...
@@ -190,6 +355,10 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
void
bnxt_qplib_disable_nq
(
struct
bnxt_qplib_nq
*
nq
)
{
if
(
nq
->
cqn_wq
)
{
destroy_workqueue
(
nq
->
cqn_wq
);
nq
->
cqn_wq
=
NULL
;
}
/* Make sure the HW is stopped! */
synchronize_irq
(
nq
->
vector
);
tasklet_disable
(
&
nq
->
worker
);
...
...
@@ -216,7 +385,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
void
*
,
u8
event
))
{
resource_size_t
nq_base
;
int
rc
;
int
rc
=
-
1
;
nq
->
pdev
=
pdev
;
nq
->
vector
=
msix_vector
;
...
...
@@ -227,6 +396,11 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
tasklet_init
(
&
nq
->
worker
,
bnxt_qplib_service_nq
,
(
unsigned
long
)
nq
);
/* Have a task to schedule CQ notifiers in post send case */
nq
->
cqn_wq
=
create_singlethread_workqueue
(
"bnxt_qplib_nq"
);
if
(
!
nq
->
cqn_wq
)
goto
fail
;
nq
->
requested
=
false
;
rc
=
request_irq
(
nq
->
vector
,
bnxt_qplib_nq_irq
,
0
,
"bnxt_qplib_nq"
,
nq
);
if
(
rc
)
{
...
...
@@ -401,8 +575,8 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp
->
id
=
le32_to_cpu
(
resp
.
xid
);
qp
->
cur_qp_state
=
CMDQ_MODIFY_QP_NEW_STATE_RESET
;
sq
->
flush_in_progress
=
false
;
r
q
->
flush_in_progress
=
false
;
rcfw
->
qp_tbl
[
qp
->
id
].
qp_id
=
qp
->
id
;
r
cfw
->
qp_tbl
[
qp
->
id
].
qp_handle
=
(
void
*
)
qp
;
return
0
;
...
...
@@ -615,8 +789,10 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp
->
id
=
le32_to_cpu
(
resp
.
xid
);
qp
->
cur_qp_state
=
CMDQ_MODIFY_QP_NEW_STATE_RESET
;
sq
->
flush_in_progress
=
false
;
rq
->
flush_in_progress
=
false
;
INIT_LIST_HEAD
(
&
qp
->
sq_flush
);
INIT_LIST_HEAD
(
&
qp
->
rq_flush
);
rcfw
->
qp_tbl
[
qp
->
id
].
qp_id
=
qp
->
id
;
rcfw
->
qp_tbl
[
qp
->
id
].
qp_handle
=
(
void
*
)
qp
;
return
0
;
...
...
@@ -963,13 +1139,19 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
u16
cmd_flags
=
0
;
int
rc
;
rcfw
->
qp_tbl
[
qp
->
id
].
qp_id
=
BNXT_QPLIB_QP_ID_INVALID
;
rcfw
->
qp_tbl
[
qp
->
id
].
qp_handle
=
NULL
;
RCFW_CMD_PREP
(
req
,
DESTROY_QP
,
cmd_flags
);
req
.
qp_cid
=
cpu_to_le32
(
qp
->
id
);
rc
=
bnxt_qplib_rcfw_send_message
(
rcfw
,
(
void
*
)
&
req
,
(
void
*
)
&
resp
,
NULL
,
0
);
if
(
rc
)
if
(
rc
)
{
rcfw
->
qp_tbl
[
qp
->
id
].
qp_id
=
qp
->
id
;
rcfw
->
qp_tbl
[
qp
->
id
].
qp_handle
=
qp
;
return
rc
;
}
/* Must walk the associated CQs to nullified the QP ptr */
spin_lock_irqsave
(
&
qp
->
scq
->
hwq
.
lock
,
flags
);
...
...
@@ -1074,14 +1256,21 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct
bnxt_qplib_swq
*
swq
;
struct
sq_send
*
hw_sq_send_hdr
,
**
hw_sq_send_ptr
;
struct
sq_sge
*
hw_sge
;
struct
bnxt_qplib_nq_work
*
nq_work
=
NULL
;
bool
sch_handler
=
false
;
u32
sw_prod
;
u8
wqe_size16
;
int
i
,
rc
=
0
,
data_len
=
0
,
pkt_num
=
0
;
__le32
temp32
;
if
(
qp
->
state
!=
CMDQ_MODIFY_QP_NEW_STATE_RTS
)
{
rc
=
-
EINVAL
;
goto
done
;
if
(
qp
->
state
==
CMDQ_MODIFY_QP_NEW_STATE_ERR
)
{
sch_handler
=
true
;
dev_dbg
(
&
sq
->
hwq
.
pdev
->
dev
,
"%s Error QP. Scheduling for poll_cq
\n
"
,
__func__
);
goto
queue_err
;
}
}
if
(
bnxt_qplib_queue_full
(
sq
))
{
...
...
@@ -1301,12 +1490,35 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
((
swq
->
next_psn
<<
SQ_PSN_SEARCH_NEXT_PSN_SFT
)
&
SQ_PSN_SEARCH_NEXT_PSN_MASK
));
}
queue_err:
if
(
sch_handler
)
{
/* Store the ULP info in the software structures */
sw_prod
=
HWQ_CMP
(
sq
->
hwq
.
prod
,
&
sq
->
hwq
);
swq
=
&
sq
->
swq
[
sw_prod
];
swq
->
wr_id
=
wqe
->
wr_id
;
swq
->
type
=
wqe
->
type
;
swq
->
flags
=
wqe
->
flags
;
if
(
qp
->
sig_type
)
swq
->
flags
|=
SQ_SEND_FLAGS_SIGNAL_COMP
;
swq
->
start_psn
=
sq
->
psn
&
BTH_PSN_MASK
;
}
sq
->
hwq
.
prod
++
;
qp
->
wqe_cnt
++
;
done:
if
(
sch_handler
)
{
nq_work
=
kzalloc
(
sizeof
(
*
nq_work
),
GFP_ATOMIC
);
if
(
nq_work
)
{
nq_work
->
cq
=
qp
->
scq
;
nq_work
->
nq
=
qp
->
scq
->
nq
;
INIT_WORK
(
&
nq_work
->
work
,
bnxt_qpn_cqn_sched_task
);
queue_work
(
qp
->
scq
->
nq
->
cqn_wq
,
&
nq_work
->
work
);
}
else
{
dev_err
(
&
sq
->
hwq
.
pdev
->
dev
,
"QPLIB: FP: Failed to allocate SQ nq_work!"
);
rc
=
-
ENOMEM
;
}
}
return
rc
;
}
...
...
@@ -1334,15 +1546,17 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
struct
bnxt_qplib_q
*
rq
=
&
qp
->
rq
;
struct
rq_wqe
*
rqe
,
**
rqe_ptr
;
struct
sq_sge
*
hw_sge
;
struct
bnxt_qplib_nq_work
*
nq_work
=
NULL
;
bool
sch_handler
=
false
;
u32
sw_prod
;
int
i
,
rc
=
0
;
if
(
qp
->
state
==
CMDQ_MODIFY_QP_NEW_STATE_ERR
)
{
dev_err
(
&
rq
->
hwq
.
pdev
->
dev
,
"QPLIB: FP: QP (0x%x) is in the 0x%x state"
,
qp
->
id
,
qp
->
state
);
rc
=
-
EINVAL
;
goto
done
;
sch_handler
=
true
;
dev_dbg
(
&
rq
->
hwq
.
pdev
->
dev
,
"%s Error QP. Scheduling for poll_cq
\n
"
,
__func__
)
;
goto
queue_err
;
}
if
(
bnxt_qplib_queue_full
(
rq
))
{
dev_err
(
&
rq
->
hwq
.
pdev
->
dev
,
...
...
@@ -1378,7 +1592,27 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
/* Supply the rqe->wr_id index to the wr_id_tbl for now */
rqe
->
wr_id
[
0
]
=
cpu_to_le32
(
sw_prod
);
queue_err:
if
(
sch_handler
)
{
/* Store the ULP info in the software structures */
sw_prod
=
HWQ_CMP
(
rq
->
hwq
.
prod
,
&
rq
->
hwq
);
rq
->
swq
[
sw_prod
].
wr_id
=
wqe
->
wr_id
;
}
rq
->
hwq
.
prod
++
;
if
(
sch_handler
)
{
nq_work
=
kzalloc
(
sizeof
(
*
nq_work
),
GFP_ATOMIC
);
if
(
nq_work
)
{
nq_work
->
cq
=
qp
->
rcq
;
nq_work
->
nq
=
qp
->
rcq
->
nq
;
INIT_WORK
(
&
nq_work
->
work
,
bnxt_qpn_cqn_sched_task
);
queue_work
(
qp
->
rcq
->
nq
->
cqn_wq
,
&
nq_work
->
work
);
}
else
{
dev_err
(
&
rq
->
hwq
.
pdev
->
dev
,
"QPLIB: FP: Failed to allocate RQ nq_work!"
);
rc
=
-
ENOMEM
;
}
}
done:
return
rc
;
}
...
...
@@ -1471,6 +1705,9 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
cq
->
dbr_base
=
res
->
dpi_tbl
.
dbr_bar_reg_iomem
;
cq
->
period
=
BNXT_QPLIB_QUEUE_START_PERIOD
;
init_waitqueue_head
(
&
cq
->
waitq
);
INIT_LIST_HEAD
(
&
cq
->
sqf_head
);
INIT_LIST_HEAD
(
&
cq
->
rqf_head
);
spin_lock_init
(
&
cq
->
compl_lock
);
bnxt_qplib_arm_cq_enable
(
cq
);
return
0
;
...
...
@@ -1513,9 +1750,13 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
while
(
*
budget
)
{
sw_cons
=
HWQ_CMP
(
sq
->
hwq
.
cons
,
&
sq
->
hwq
);
if
(
sw_cons
==
sw_prod
)
{
sq
->
flush_in_progress
=
false
;
break
;
}
/* Skip the FENCE WQE completions */
if
(
sq
->
swq
[
sw_cons
].
wr_id
==
BNXT_QPLIB_FENCE_WRID
)
{
bnxt_qplib_cancel_phantom_processing
(
qp
);
goto
skip_compl
;
}
memset
(
cqe
,
0
,
sizeof
(
*
cqe
));
cqe
->
status
=
CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR
;
cqe
->
opcode
=
CQ_BASE_CQE_TYPE_REQ
;
...
...
@@ -1525,6 +1766,7 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
cqe
->
type
=
sq
->
swq
[
sw_cons
].
type
;
cqe
++
;
(
*
budget
)
--
;
skip_compl:
sq
->
hwq
.
cons
++
;
}
*
pcqe
=
cqe
;
...
...
@@ -1536,11 +1778,24 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
}
static
int
__flush_rq
(
struct
bnxt_qplib_q
*
rq
,
struct
bnxt_qplib_qp
*
qp
,
int
opcode
,
struct
bnxt_qplib_cqe
**
pcqe
,
int
*
budget
)
struct
bnxt_qplib_cqe
**
pcqe
,
int
*
budget
)
{
struct
bnxt_qplib_cqe
*
cqe
;
u32
sw_prod
,
sw_cons
;
int
rc
=
0
;
int
opcode
=
0
;
switch
(
qp
->
type
)
{
case
CMDQ_CREATE_QP1_TYPE_GSI
:
opcode
=
CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
;
break
;
case
CMDQ_CREATE_QP_TYPE_RC
:
opcode
=
CQ_BASE_CQE_TYPE_RES_RC
;
break
;
case
CMDQ_CREATE_QP_TYPE_UD
:
opcode
=
CQ_BASE_CQE_TYPE_RES_UD
;
break
;
}
/* Flush the rest of the RQ */
sw_prod
=
HWQ_CMP
(
rq
->
hwq
.
prod
,
&
rq
->
hwq
);
...
...
@@ -1567,6 +1822,21 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
return
rc
;
}
void
bnxt_qplib_mark_qp_error
(
void
*
qp_handle
)
{
struct
bnxt_qplib_qp
*
qp
=
qp_handle
;
if
(
!
qp
)
return
;
/* Must block new posting of SQ and RQ */
qp
->
state
=
CMDQ_MODIFY_QP_NEW_STATE_ERR
;
bnxt_qplib_cancel_phantom_processing
(
qp
);
/* Add qp to flush list of the CQ */
__bnxt_qplib_add_flush_qp
(
qp
);
}
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
* CQE is track from sw_cq_cons to max_element but valid only if VALID=1
*/
...
...
@@ -1694,10 +1964,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
cqe_sq_cons
,
sq
->
hwq
.
max_elements
);
return
-
EINVAL
;
}
/* If we were in the middle of flushing the SQ, continue */
if
(
sq
->
flush_in_progress
)
goto
flush
;
if
(
qp
->
sq
.
flushed
)
{
dev_dbg
(
&
cq
->
hwq
.
pdev
->
dev
,
"%s: QPLIB: QP in Flush QP = %p
\n
"
,
__func__
,
qp
);
goto
done
;
}
/* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons
* to the cqe_sq_cons
...
...
@@ -1733,11 +2005,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
sw_sq_cons
,
cqe
->
wr_id
,
cqe
->
status
);
cqe
++
;
(
*
budget
)
--
;
sq
->
flush_in_progress
=
true
;
/* Must block new posting of SQ and RQ */
qp
->
state
=
CMDQ_MODIFY_QP_NEW_STATE_ERR
;
sq
->
condition
=
false
;
sq
->
single
=
false
;
bnxt_qplib_lock_buddy_cq
(
qp
,
cq
);
bnxt_qplib_mark_qp_error
(
qp
);
bnxt_qplib_unlock_buddy_cq
(
qp
,
cq
);
}
else
{
if
(
swq
->
flags
&
SQ_SEND_FLAGS_SIGNAL_COMP
)
{
/* Before we complete, do WA 9060 */
...
...
@@ -1768,15 +2038,6 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
* the WC for this CQE
*/
sq
->
single
=
false
;
if
(
!
sq
->
flush_in_progress
)
goto
done
;
flush:
/* Require to walk the sq's swq to fabricate CQEs for all
* previously posted SWQEs due to the error CQE received
*/
rc
=
__flush_sq
(
sq
,
qp
,
pcqe
,
budget
);
if
(
!
rc
)
sq
->
flush_in_progress
=
false
;
done:
return
rc
;
}
...
...
@@ -1798,6 +2059,12 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
dev_err
(
&
cq
->
hwq
.
pdev
->
dev
,
"QPLIB: process_cq RC qp is NULL"
);
return
-
EINVAL
;
}
if
(
qp
->
rq
.
flushed
)
{
dev_dbg
(
&
cq
->
hwq
.
pdev
->
dev
,
"%s: QPLIB: QP in Flush QP = %p
\n
"
,
__func__
,
qp
);
goto
done
;
}
cqe
=
*
pcqe
;
cqe
->
opcode
=
hwcqe
->
cqe_type_toggle
&
CQ_BASE_CQE_TYPE_MASK
;
cqe
->
length
=
le32_to_cpu
(
hwcqe
->
length
);
...
...
@@ -1817,8 +2084,6 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
wr_id_idx
,
rq
->
hwq
.
max_elements
);
return
-
EINVAL
;
}
if
(
rq
->
flush_in_progress
)
goto
flush_rq
;
cqe
->
wr_id
=
rq
->
swq
[
wr_id_idx
].
wr_id
;
cqe
++
;
...
...
@@ -1827,12 +2092,13 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
*
pcqe
=
cqe
;
if
(
hwcqe
->
status
!=
CQ_RES_RC_STATUS_OK
)
{
rq
->
flush_in_progress
=
true
;
flush_rq:
rc
=
__flush_rq
(
rq
,
qp
,
CQ_BASE_CQE_TYPE_RES_RC
,
pcqe
,
budget
);
if
(
!
rc
)
rq
->
flush_in_progress
=
false
;
/* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq
(
qp
,
cq
);
__bnxt_qplib_add_flush_qp
(
qp
);
bnxt_qplib_unlock_buddy_cq
(
qp
,
cq
);
}
done:
return
rc
;
}
...
...
@@ -1853,6 +2119,11 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
dev_err
(
&
cq
->
hwq
.
pdev
->
dev
,
"QPLIB: process_cq UD qp is NULL"
);
return
-
EINVAL
;
}
if
(
qp
->
rq
.
flushed
)
{
dev_dbg
(
&
cq
->
hwq
.
pdev
->
dev
,
"%s: QPLIB: QP in Flush QP = %p
\n
"
,
__func__
,
qp
);
goto
done
;
}
cqe
=
*
pcqe
;
cqe
->
opcode
=
hwcqe
->
cqe_type_toggle
&
CQ_BASE_CQE_TYPE_MASK
;
cqe
->
length
=
le32_to_cpu
(
hwcqe
->
length
);
...
...
@@ -1876,8 +2147,6 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
wr_id_idx
,
rq
->
hwq
.
max_elements
);
return
-
EINVAL
;
}
if
(
rq
->
flush_in_progress
)
goto
flush_rq
;
cqe
->
wr_id
=
rq
->
swq
[
wr_id_idx
].
wr_id
;
cqe
++
;
...
...
@@ -1886,12 +2155,12 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
*
pcqe
=
cqe
;
if
(
hwcqe
->
status
!=
CQ_RES_RC_STATUS_OK
)
{
rq
->
flush_in_progress
=
true
;
flush_rq:
rc
=
__flush_rq
(
rq
,
qp
,
CQ_BASE_CQE_TYPE_RES_UD
,
pcqe
,
budget
);
if
(
!
rc
)
rq
->
flush_in_progress
=
false
;
/* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq
(
qp
,
cq
);
__bnxt_qplib_add_flush_qp
(
qp
);
bnxt_qplib_unlock_buddy_cq
(
qp
,
cq
);
}
done:
return
rc
;
}
...
...
@@ -1932,6 +2201,11 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
"QPLIB: process_cq Raw/QP1 qp is NULL"
);
return
-
EINVAL
;
}
if
(
qp
->
rq
.
flushed
)
{
dev_dbg
(
&
cq
->
hwq
.
pdev
->
dev
,
"%s: QPLIB: QP in Flush QP = %p
\n
"
,
__func__
,
qp
);
goto
done
;
}
cqe
=
*
pcqe
;
cqe
->
opcode
=
hwcqe
->
cqe_type_toggle
&
CQ_BASE_CQE_TYPE_MASK
;
cqe
->
flags
=
le16_to_cpu
(
hwcqe
->
flags
);
...
...
@@ -1960,8 +2234,6 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
wr_id_idx
,
rq
->
hwq
.
max_elements
);
return
-
EINVAL
;
}
if
(
rq
->
flush_in_progress
)
goto
flush_rq
;
cqe
->
wr_id
=
rq
->
swq
[
wr_id_idx
].
wr_id
;
cqe
++
;
...
...
@@ -1970,13 +2242,13 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
*
pcqe
=
cqe
;
if
(
hwcqe
->
status
!=
CQ_RES_RC_STATUS_OK
)
{
rq
->
flush_in_progress
=
true
;
flush_rq:
rc
=
__flush_rq
(
rq
,
qp
,
CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
,
pcqe
,
budget
);
if
(
!
rc
)
rq
->
flush_in_progress
=
false
;
/* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq
(
qp
,
cq
);
__bnxt_qplib_add_flush_qp
(
qp
);
bnxt_qplib_unlock_buddy_cq
(
qp
,
cq
);
}
done:
return
rc
;
}
...
...
@@ -1990,7 +2262,6 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
struct
bnxt_qplib_cqe
*
cqe
;
u32
sw_cons
=
0
,
cqe_cons
;
int
rc
=
0
;
u8
opcode
=
0
;
/* Check the Status */
if
(
hwcqe
->
status
!=
CQ_TERMINAL_STATUS_OK
)
...
...
@@ -2005,6 +2276,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
"QPLIB: FP: CQ Process terminal qp is NULL"
);
return
-
EINVAL
;
}
/* Must block new posting of SQ and RQ */
qp
->
state
=
CMDQ_MODIFY_QP_NEW_STATE_ERR
;
...
...
@@ -2023,9 +2295,12 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
cqe_cons
,
sq
->
hwq
.
max_elements
);
goto
do_rq
;
}
/* If we were in the middle of flushing, continue */
if
(
sq
->
flush_in_progress
)
goto
flush_sq
;
if
(
qp
->
sq
.
flushed
)
{
dev_dbg
(
&
cq
->
hwq
.
pdev
->
dev
,
"%s: QPLIB: QP in Flush QP = %p
\n
"
,
__func__
,
qp
);
goto
sq_done
;
}
/* Terminal CQE can also include aggregated successful CQEs prior.
* So we must complete all CQEs from the current sq's cons to the
...
...
@@ -2055,11 +2330,6 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
rc
=
-
EAGAIN
;
goto
sq_done
;
}
sq
->
flush_in_progress
=
true
;
flush_sq:
rc
=
__flush_sq
(
sq
,
qp
,
pcqe
,
budget
);
if
(
!
rc
)
sq
->
flush_in_progress
=
false
;
sq_done:
if
(
rc
)
return
rc
;
...
...
@@ -2075,26 +2345,23 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
cqe_cons
,
rq
->
hwq
.
max_elements
);
goto
done
;
}
if
(
qp
->
rq
.
flushed
)
{
dev_dbg
(
&
cq
->
hwq
.
pdev
->
dev
,
"%s: QPLIB: QP in Flush QP = %p
\n
"
,
__func__
,
qp
);
rc
=
0
;
goto
done
;
}
/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
* from the current rq->cons to the rq->prod regardless what the
* rq->cons the terminal CQE indicates
*/
rq
->
flush_in_progress
=
true
;
switch
(
qp
->
type
)
{
case
CMDQ_CREATE_QP1_TYPE_GSI
:
opcode
=
CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
;
break
;
case
CMDQ_CREATE_QP_TYPE_RC
:
opcode
=
CQ_BASE_CQE_TYPE_RES_RC
;
break
;
case
CMDQ_CREATE_QP_TYPE_UD
:
opcode
=
CQ_BASE_CQE_TYPE_RES_UD
;
break
;
}
rc
=
__flush_rq
(
rq
,
qp
,
opcode
,
pcqe
,
budget
);
if
(
!
rc
)
rq
->
flush_in_progress
=
false
;
/* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq
(
qp
,
cq
);
__bnxt_qplib_add_flush_qp
(
qp
);
bnxt_qplib_unlock_buddy_cq
(
qp
,
cq
);
done:
return
rc
;
}
...
...
@@ -2115,6 +2382,33 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
return
0
;
}
int
bnxt_qplib_process_flush_list
(
struct
bnxt_qplib_cq
*
cq
,
struct
bnxt_qplib_cqe
*
cqe
,
int
num_cqes
)
{
struct
bnxt_qplib_qp
*
qp
=
NULL
;
u32
budget
=
num_cqes
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
cq
->
hwq
.
lock
,
flags
);
list_for_each_entry
(
qp
,
&
cq
->
sqf_head
,
sq_flush
)
{
dev_dbg
(
&
cq
->
hwq
.
pdev
->
dev
,
"QPLIB: FP: Flushing SQ QP= %p"
,
qp
);
__flush_sq
(
&
qp
->
sq
,
qp
,
&
cqe
,
&
budget
);
}
list_for_each_entry
(
qp
,
&
cq
->
rqf_head
,
rq_flush
)
{
dev_dbg
(
&
cq
->
hwq
.
pdev
->
dev
,
"QPLIB: FP: Flushing RQ QP= %p"
,
qp
);
__flush_rq
(
&
qp
->
rq
,
qp
,
&
cqe
,
&
budget
);
}
spin_unlock_irqrestore
(
&
cq
->
hwq
.
lock
,
flags
);
return
num_cqes
-
budget
;
}
int
bnxt_qplib_poll_cq
(
struct
bnxt_qplib_cq
*
cq
,
struct
bnxt_qplib_cqe
*
cqe
,
int
num_cqes
,
struct
bnxt_qplib_qp
**
lib_qp
)
{
...
...
@@ -2205,6 +2499,7 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
spin_lock_irqsave
(
&
cq
->
hwq
.
lock
,
flags
);
if
(
arm_type
)
bnxt_qplib_arm_cq
(
cq
,
arm_type
);
/* Using cq->arm_state variable to track whether to issue cq handler */
atomic_set
(
&
cq
->
arm_state
,
1
);
spin_unlock_irqrestore
(
&
cq
->
hwq
.
lock
,
flags
);
}
drivers/infiniband/hw/bnxt_re/qplib_fp.h
浏览文件 @
a5f66725
...
...
@@ -220,19 +220,20 @@ struct bnxt_qplib_q {
u16
q_full_delta
;
u16
max_sge
;
u32
psn
;
bool
flush_in_progress
;
bool
condition
;
bool
single
;
bool
send_phantom
;
u32
phantom_wqe_cnt
;
u32
phantom_cqe_cnt
;
u32
next_cq_cons
;
bool
flushed
;
};
struct
bnxt_qplib_qp
{
struct
bnxt_qplib_pd
*
pd
;
struct
bnxt_qplib_dpi
*
dpi
;
u64
qp_handle
;
#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
u32
id
;
u8
type
;
u8
sig_type
;
...
...
@@ -296,6 +297,8 @@ struct bnxt_qplib_qp {
dma_addr_t
sq_hdr_buf_map
;
void
*
rq_hdr_buf
;
dma_addr_t
rq_hdr_buf_map
;
struct
list_head
sq_flush
;
struct
list_head
rq_flush
;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
...
...
@@ -351,6 +354,7 @@ struct bnxt_qplib_cq {
u16
period
;
struct
bnxt_qplib_hwq
hwq
;
u32
cnq_hw_ring_id
;
struct
bnxt_qplib_nq
*
nq
;
bool
resize_in_progress
;
struct
scatterlist
*
sghead
;
u32
nmap
;
...
...
@@ -360,6 +364,9 @@ struct bnxt_qplib_cq {
unsigned
long
flags
;
#define CQ_FLAGS_RESIZE_IN_PROG 1
wait_queue_head_t
waitq
;
struct
list_head
sqf_head
,
rqf_head
;
atomic_t
arm_state
;
spinlock_t
compl_lock
;
/* synch CQ handlers */
};
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
...
...
@@ -417,6 +424,13 @@ struct bnxt_qplib_nq {
(
struct
bnxt_qplib_nq
*
nq
,
void
*
srq
,
u8
event
);
struct
workqueue_struct
*
cqn_wq
;
};
struct
bnxt_qplib_nq_work
{
struct
work_struct
work
;
struct
bnxt_qplib_nq
*
nq
;
struct
bnxt_qplib_cq
*
cq
;
};
void
bnxt_qplib_disable_nq
(
struct
bnxt_qplib_nq
*
nq
);
...
...
@@ -453,4 +467,13 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
void
bnxt_qplib_req_notify_cq
(
struct
bnxt_qplib_cq
*
cq
,
u32
arm_type
);
void
bnxt_qplib_free_nq
(
struct
bnxt_qplib_nq
*
nq
);
int
bnxt_qplib_alloc_nq
(
struct
pci_dev
*
pdev
,
struct
bnxt_qplib_nq
*
nq
);
void
bnxt_qplib_add_flush_qp
(
struct
bnxt_qplib_qp
*
qp
);
void
bnxt_qplib_del_flush_qp
(
struct
bnxt_qplib_qp
*
qp
);
void
bnxt_qplib_acquire_cq_locks
(
struct
bnxt_qplib_qp
*
qp
,
unsigned
long
*
flags
);
void
bnxt_qplib_release_cq_locks
(
struct
bnxt_qplib_qp
*
qp
,
unsigned
long
*
flags
);
int
bnxt_qplib_process_flush_list
(
struct
bnxt_qplib_cq
*
cq
,
struct
bnxt_qplib_cqe
*
cqe
,
int
num_cqes
);
#endif
/* __BNXT_QPLIB_FP_H__ */
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
浏览文件 @
a5f66725
...
...
@@ -44,6 +44,9 @@
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_rcfw.h"
#include "qplib_sp.h"
#include "qplib_fp.h"
static
void
bnxt_qplib_service_creq
(
unsigned
long
data
);
/* Hardware communication channel */
...
...
@@ -279,16 +282,29 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
struct
creq_qp_event
*
qp_event
)
{
struct
bnxt_qplib_hwq
*
cmdq
=
&
rcfw
->
cmdq
;
struct
creq_qp_error_notification
*
err_event
;
struct
bnxt_qplib_crsq
*
crsqe
;
unsigned
long
flags
;
struct
bnxt_qplib_qp
*
qp
;
u16
cbit
,
blocked
=
0
;
u16
cookie
;
__le16
mcookie
;
u32
qp_id
;
switch
(
qp_event
->
event
)
{
case
CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION
:
err_event
=
(
struct
creq_qp_error_notification
*
)
qp_event
;
qp_id
=
le32_to_cpu
(
err_event
->
xid
);
qp
=
rcfw
->
qp_tbl
[
qp_id
].
qp_handle
;
dev_dbg
(
&
rcfw
->
pdev
->
dev
,
"QPLIB: Received QP error notification"
);
dev_dbg
(
&
rcfw
->
pdev
->
dev
,
"QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x
\n
"
,
qp_id
,
err_event
->
req_err_state_reason
,
err_event
->
res_err_state_reason
);
bnxt_qplib_acquire_cq_locks
(
qp
,
&
flags
);
bnxt_qplib_mark_qp_error
(
qp
);
bnxt_qplib_release_cq_locks
(
qp
,
&
flags
);
break
;
default:
/* Command Response */
...
...
@@ -507,6 +523,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
void
bnxt_qplib_free_rcfw_channel
(
struct
bnxt_qplib_rcfw
*
rcfw
)
{
kfree
(
rcfw
->
qp_tbl
);
kfree
(
rcfw
->
crsqe_tbl
);
bnxt_qplib_free_hwq
(
rcfw
->
pdev
,
&
rcfw
->
cmdq
);
bnxt_qplib_free_hwq
(
rcfw
->
pdev
,
&
rcfw
->
creq
);
...
...
@@ -514,7 +531,8 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
}
int
bnxt_qplib_alloc_rcfw_channel
(
struct
pci_dev
*
pdev
,
struct
bnxt_qplib_rcfw
*
rcfw
)
struct
bnxt_qplib_rcfw
*
rcfw
,
int
qp_tbl_sz
)
{
rcfw
->
pdev
=
pdev
;
rcfw
->
creq
.
max_elements
=
BNXT_QPLIB_CREQE_MAX_CNT
;
...
...
@@ -541,6 +559,12 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
if
(
!
rcfw
->
crsqe_tbl
)
goto
fail
;
rcfw
->
qp_tbl_size
=
qp_tbl_sz
;
rcfw
->
qp_tbl
=
kcalloc
(
qp_tbl_sz
,
sizeof
(
struct
bnxt_qplib_qp_node
),
GFP_KERNEL
);
if
(
!
rcfw
->
qp_tbl
)
goto
fail
;
return
0
;
fail:
...
...
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
浏览文件 @
a5f66725
...
...
@@ -148,6 +148,11 @@ struct bnxt_qplib_rcfw_sbuf {
u32
size
;
};
struct
bnxt_qplib_qp_node
{
u32
qp_id
;
/* QP id */
void
*
qp_handle
;
/* ptr to qplib_qp */
};
/* RCFW Communication Channels */
struct
bnxt_qplib_rcfw
{
struct
pci_dev
*
pdev
;
...
...
@@ -181,11 +186,13 @@ struct bnxt_qplib_rcfw {
/* Actual Cmd and Resp Queues */
struct
bnxt_qplib_hwq
cmdq
;
struct
bnxt_qplib_crsq
*
crsqe_tbl
;
int
qp_tbl_size
;
struct
bnxt_qplib_qp_node
*
qp_tbl
;
};
void
bnxt_qplib_free_rcfw_channel
(
struct
bnxt_qplib_rcfw
*
rcfw
);
int
bnxt_qplib_alloc_rcfw_channel
(
struct
pci_dev
*
pdev
,
struct
bnxt_qplib_rcfw
*
rcfw
);
struct
bnxt_qplib_rcfw
*
rcfw
,
int
qp_tbl_sz
);
void
bnxt_qplib_disable_rcfw_channel
(
struct
bnxt_qplib_rcfw
*
rcfw
);
int
bnxt_qplib_enable_rcfw_channel
(
struct
pci_dev
*
pdev
,
struct
bnxt_qplib_rcfw
*
rcfw
,
...
...
@@ -207,4 +214,5 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
int
bnxt_qplib_deinit_rcfw
(
struct
bnxt_qplib_rcfw
*
rcfw
);
int
bnxt_qplib_init_rcfw
(
struct
bnxt_qplib_rcfw
*
rcfw
,
struct
bnxt_qplib_ctx
*
ctx
,
int
is_virtfn
);
void
bnxt_qplib_mark_qp_error
(
void
*
qp_handle
);
#endif
/* __BNXT_QPLIB_RCFW_H__ */
drivers/infiniband/hw/bnxt_re/qplib_res.c
浏览文件 @
a5f66725
...
...
@@ -468,9 +468,11 @@ static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
kfree
(
sgid_tbl
->
tbl
);
kfree
(
sgid_tbl
->
hw_id
);
kfree
(
sgid_tbl
->
ctx
);
kfree
(
sgid_tbl
->
vlan
);
sgid_tbl
->
tbl
=
NULL
;
sgid_tbl
->
hw_id
=
NULL
;
sgid_tbl
->
ctx
=
NULL
;
sgid_tbl
->
vlan
=
NULL
;
sgid_tbl
->
max
=
0
;
sgid_tbl
->
active
=
0
;
}
...
...
@@ -491,8 +493,15 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
if
(
!
sgid_tbl
->
ctx
)
goto
out_free2
;
sgid_tbl
->
vlan
=
kcalloc
(
max
,
sizeof
(
u8
),
GFP_KERNEL
);
if
(
!
sgid_tbl
->
vlan
)
goto
out_free3
;
sgid_tbl
->
max
=
max
;
return
0
;
out_free3:
kfree
(
sgid_tbl
->
ctx
);
sgid_tbl
->
ctx
=
NULL
;
out_free2:
kfree
(
sgid_tbl
->
hw_id
);
sgid_tbl
->
hw_id
=
NULL
;
...
...
@@ -514,6 +523,7 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
}
memset
(
sgid_tbl
->
tbl
,
0
,
sizeof
(
struct
bnxt_qplib_gid
)
*
sgid_tbl
->
max
);
memset
(
sgid_tbl
->
hw_id
,
-
1
,
sizeof
(
u16
)
*
sgid_tbl
->
max
);
memset
(
sgid_tbl
->
vlan
,
0
,
sizeof
(
u8
)
*
sgid_tbl
->
max
);
sgid_tbl
->
active
=
0
;
}
...
...
drivers/infiniband/hw/bnxt_re/qplib_res.h
浏览文件 @
a5f66725
...
...
@@ -116,6 +116,7 @@ struct bnxt_qplib_sgid_tbl {
u16
max
;
u16
active
;
void
*
ctx
;
u8
*
vlan
;
};
struct
bnxt_qplib_pkey_tbl
{
...
...
@@ -188,6 +189,7 @@ struct bnxt_qplib_res {
struct
bnxt_qplib_sgid_tbl
sgid_tbl
;
struct
bnxt_qplib_pkey_tbl
pkey_tbl
;
struct
bnxt_qplib_dpi_tbl
dpi_tbl
;
bool
prio
;
};
#define to_bnxt_qplib(ptr, type, member) \
...
...
drivers/infiniband/hw/bnxt_re/qplib_sp.c
浏览文件 @
a5f66725
...
...
@@ -213,6 +213,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
memcpy
(
&
sgid_tbl
->
tbl
[
index
],
&
bnxt_qplib_gid_zero
,
sizeof
(
bnxt_qplib_gid_zero
));
sgid_tbl
->
vlan
[
index
]
=
0
;
sgid_tbl
->
active
--
;
dev_dbg
(
&
res
->
pdev
->
dev
,
"QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x"
,
...
...
@@ -265,28 +266,32 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct
cmdq_add_gid
req
;
struct
creq_add_gid_resp
resp
;
u16
cmd_flags
=
0
;
u32
temp32
[
4
];
u16
temp16
[
3
];
int
rc
;
RCFW_CMD_PREP
(
req
,
ADD_GID
,
cmd_flags
);
memcpy
(
temp32
,
gid
->
data
,
sizeof
(
struct
bnxt_qplib_gid
));
req
.
gid
[
0
]
=
cpu_to_be32
(
temp32
[
3
]);
req
.
gid
[
1
]
=
cpu_to_be32
(
temp32
[
2
]);
req
.
gid
[
2
]
=
cpu_to_be32
(
temp32
[
1
]);
req
.
gid
[
3
]
=
cpu_to_be32
(
temp32
[
0
]);
if
(
vlan_id
!=
0xFFFF
)
req
.
vlan
=
cpu_to_le16
((
vlan_id
&
CMDQ_ADD_GID_VLAN_VLAN_ID_MASK
)
|
CMDQ_ADD_GID_VLAN_TPID_TPID_8100
|
CMDQ_ADD_GID_VLAN_VLAN_EN
);
req
.
gid
[
0
]
=
cpu_to_be32
(((
u32
*
)
gid
->
data
)[
3
]);
req
.
gid
[
1
]
=
cpu_to_be32
(((
u32
*
)
gid
->
data
)[
2
]);
req
.
gid
[
2
]
=
cpu_to_be32
(((
u32
*
)
gid
->
data
)[
1
]);
req
.
gid
[
3
]
=
cpu_to_be32
(((
u32
*
)
gid
->
data
)[
0
]);
/*
* driver should ensure that all RoCE traffic is always VLAN
* tagged if RoCE traffic is running on non-zero VLAN ID or
* RoCE traffic is running on non-zero Priority.
*/
if
((
vlan_id
!=
0xFFFF
)
||
res
->
prio
)
{
if
(
vlan_id
!=
0xFFFF
)
req
.
vlan
=
cpu_to_le16
(
vlan_id
&
CMDQ_ADD_GID_VLAN_VLAN_ID_MASK
);
req
.
vlan
|=
cpu_to_le16
(
CMDQ_ADD_GID_VLAN_TPID_TPID_8100
|
CMDQ_ADD_GID_VLAN_VLAN_EN
);
}
/* MAC in network format */
memcpy
(
temp16
,
smac
,
6
);
req
.
src_mac
[
0
]
=
cpu_to_be16
(
temp16
[
0
]);
req
.
src_mac
[
1
]
=
cpu_to_be16
(
temp16
[
1
]);
req
.
src_mac
[
2
]
=
cpu_to_be16
(
temp16
[
2
]);
req
.
src_mac
[
0
]
=
cpu_to_be16
(((
u16
*
)
smac
)[
0
]);
req
.
src_mac
[
1
]
=
cpu_to_be16
(((
u16
*
)
smac
)[
1
]);
req
.
src_mac
[
2
]
=
cpu_to_be16
(((
u16
*
)
smac
)[
2
]);
rc
=
bnxt_qplib_rcfw_send_message
(
rcfw
,
(
void
*
)
&
req
,
(
void
*
)
&
resp
,
NULL
,
0
);
...
...
@@ -297,6 +302,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
/* Add GID to the sgid_tbl */
memcpy
(
&
sgid_tbl
->
tbl
[
free_idx
],
gid
,
sizeof
(
*
gid
));
sgid_tbl
->
active
++
;
if
(
vlan_id
!=
0xFFFF
)
sgid_tbl
->
vlan
[
free_idx
]
=
1
;
dev_dbg
(
&
res
->
pdev
->
dev
,
"QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x"
,
free_idx
,
sgid_tbl
->
hw_id
[
free_idx
],
sgid_tbl
->
active
);
...
...
@@ -306,6 +314,43 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return
0
;
}
int
bnxt_qplib_update_sgid
(
struct
bnxt_qplib_sgid_tbl
*
sgid_tbl
,
struct
bnxt_qplib_gid
*
gid
,
u16
gid_idx
,
u8
*
smac
)
{
struct
bnxt_qplib_res
*
res
=
to_bnxt_qplib
(
sgid_tbl
,
struct
bnxt_qplib_res
,
sgid_tbl
);
struct
bnxt_qplib_rcfw
*
rcfw
=
res
->
rcfw
;
struct
creq_modify_gid_resp
resp
;
struct
cmdq_modify_gid
req
;
int
rc
;
u16
cmd_flags
=
0
;
RCFW_CMD_PREP
(
req
,
MODIFY_GID
,
cmd_flags
);
req
.
gid
[
0
]
=
cpu_to_be32
(((
u32
*
)
gid
->
data
)[
3
]);
req
.
gid
[
1
]
=
cpu_to_be32
(((
u32
*
)
gid
->
data
)[
2
]);
req
.
gid
[
2
]
=
cpu_to_be32
(((
u32
*
)
gid
->
data
)[
1
]);
req
.
gid
[
3
]
=
cpu_to_be32
(((
u32
*
)
gid
->
data
)[
0
]);
if
(
res
->
prio
)
{
req
.
vlan
|=
cpu_to_le16
(
CMDQ_ADD_GID_VLAN_TPID_TPID_8100
|
CMDQ_ADD_GID_VLAN_VLAN_EN
);
}
/* MAC in network format */
req
.
src_mac
[
0
]
=
cpu_to_be16
(((
u16
*
)
smac
)[
0
]);
req
.
src_mac
[
1
]
=
cpu_to_be16
(((
u16
*
)
smac
)[
1
]);
req
.
src_mac
[
2
]
=
cpu_to_be16
(((
u16
*
)
smac
)[
2
]);
req
.
gid_index
=
cpu_to_le16
(
gid_idx
);
rc
=
bnxt_qplib_rcfw_send_message
(
rcfw
,
(
void
*
)
&
req
,
(
void
*
)
&
resp
,
NULL
,
0
);
return
rc
;
}
/* pkeys */
int
bnxt_qplib_get_pkey
(
struct
bnxt_qplib_res
*
res
,
struct
bnxt_qplib_pkey_tbl
*
pkey_tbl
,
u16
index
,
...
...
drivers/infiniband/hw/bnxt_re/qplib_sp.h
浏览文件 @
a5f66725
...
...
@@ -135,6 +135,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int
bnxt_qplib_add_sgid
(
struct
bnxt_qplib_sgid_tbl
*
sgid_tbl
,
struct
bnxt_qplib_gid
*
gid
,
u8
*
mac
,
u16
vlan_id
,
bool
update
,
u32
*
index
);
int
bnxt_qplib_update_sgid
(
struct
bnxt_qplib_sgid_tbl
*
sgid_tbl
,
struct
bnxt_qplib_gid
*
gid
,
u16
gid_idx
,
u8
*
smac
);
int
bnxt_qplib_get_pkey
(
struct
bnxt_qplib_res
*
res
,
struct
bnxt_qplib_pkey_tbl
*
pkey_tbl
,
u16
index
,
u16
*
pkey
);
...
...
drivers/infiniband/hw/bnxt_re/roce_hsi.h
浏览文件 @
a5f66725
...
...
@@ -1473,8 +1473,8 @@ struct cmdq_modify_gid {
u8
resp_size
;
u8
reserved8
;
__le64
resp_addr
;
__
l
e32
gid
[
4
];
__
l
e16
src_mac
[
3
];
__
b
e32
gid
[
4
];
__
b
e16
src_mac
[
3
];
__le16
vlan
;
#define CMDQ_MODIFY_GID_VLAN_VLAN_ID_MASK 0xfffUL
#define CMDQ_MODIFY_GID_VLAN_VLAN_ID_SFT 0
...
...
drivers/infiniband/hw/cxgb3/iwch.c
浏览文件 @
a5f66725
...
...
@@ -45,7 +45,6 @@
MODULE_AUTHOR
(
"Boyd Faulkner, Steve Wise"
);
MODULE_DESCRIPTION
(
"Chelsio T3 RDMA Driver"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
DRV_VERSION
);
static
void
open_rnic_dev
(
struct
t3cdev
*
);
static
void
close_rnic_dev
(
struct
t3cdev
*
);
...
...
drivers/infiniband/hw/cxgb4/device.c
浏览文件 @
a5f66725
...
...
@@ -44,7 +44,6 @@
MODULE_AUTHOR
(
"Steve Wise"
);
MODULE_DESCRIPTION
(
"Chelsio T4/T5 RDMA Driver"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
DRV_VERSION
);
static
int
allow_db_fc_on_t5
;
module_param
(
allow_db_fc_on_t5
,
int
,
0644
);
...
...
drivers/infiniband/hw/hfi1/driver.c
浏览文件 @
a5f66725
...
...
@@ -96,7 +96,6 @@ MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_DESCRIPTION
(
"Intel Omni-Path Architecture driver"
);
MODULE_VERSION
(
HFI1_DRIVER_VERSION
);
/*
* MAX_PKT_RCV is the max # if packets processed per receive interrupt.
...
...
drivers/infiniband/hw/hfi1/qp.c
浏览文件 @
a5f66725
...
...
@@ -601,7 +601,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
wqe
=
rvt_get_swqe_ptr
(
qp
,
qp
->
s_last
);
send_context
=
qp_to_send_context
(
qp
,
priv
->
s_sc
);
seq_printf
(
s
,
"N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x
(%u %u %u %u
%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d
\n
"
,
"N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x
S(%u %u %u %u %u %u %u) R(
%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d
\n
"
,
iter
->
n
,
qp_idle
(
qp
)
?
"I"
:
"B"
,
qp
->
ibqp
.
qp_num
,
...
...
@@ -624,6 +624,10 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
qp
->
s_last
,
qp
->
s_acked
,
qp
->
s_cur
,
qp
->
s_tail
,
qp
->
s_head
,
qp
->
s_size
,
qp
->
s_avail
,
/* ack_queue ring pointers, size */
qp
->
s_tail_ack_queue
,
qp
->
r_head_ack_queue
,
HFI1_MAX_RDMA_ATOMIC
,
/* remote QP info */
qp
->
remote_qpn
,
rdma_ah_get_dlid
(
&
qp
->
remote_ah_attr
),
rdma_ah_get_sl
(
&
qp
->
remote_ah_attr
),
...
...
drivers/infiniband/hw/i40iw/i40iw_main.c
浏览文件 @
a5f66725
...
...
@@ -77,7 +77,6 @@ MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
MODULE_AUTHOR
(
"Intel Corporation, <e1000-rdma@lists.sourceforge.net>"
);
MODULE_DESCRIPTION
(
"Intel(R) Ethernet Connection X722 iWARP RDMA Driver"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
DRV_VERSION
);
static
struct
i40e_client
i40iw_client
;
static
char
i40iw_client_name
[
I40E_CLIENT_STR_LENGTH
]
=
"i40iw"
;
...
...
drivers/infiniband/hw/mlx4/main.c
浏览文件 @
a5f66725
...
...
@@ -70,7 +70,6 @@
MODULE_AUTHOR
(
"Roland Dreier"
);
MODULE_DESCRIPTION
(
"Mellanox ConnectX HCA InfiniBand driver"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
DRV_VERSION
);
int
mlx4_ib_sm_guid_assign
=
0
;
module_param_named
(
sm_guid_assign
,
mlx4_ib_sm_guid_assign
,
int
,
0444
);
...
...
drivers/infiniband/hw/mlx5/main.c
浏览文件 @
a5f66725
...
...
@@ -67,7 +67,6 @@
MODULE_AUTHOR
(
"Eli Cohen <eli@mellanox.com>"
);
MODULE_DESCRIPTION
(
"Mellanox Connect-IB HCA IB driver"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
DRIVER_VERSION
);
static
char
mlx5_version
[]
=
DRIVER_NAME
": Mellanox Connect-IB Infiniband driver v"
...
...
@@ -1176,7 +1175,7 @@ static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
if
(
req
->
num_low_latency_bfregs
>
req
->
total_num_bfregs
-
1
)
return
-
EINVAL
;
mlx5_ib_dbg
(
dev
,
"uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages
\n
"
,
mlx5_ib_dbg
(
dev
,
"uar_4k: fw support %s, lib support %s, user requested %d bfregs, allo
c
ated %d, using %d sys pages
\n
"
,
MLX5_CAP_GEN
(
dev
->
mdev
,
uar_4k
)
?
"yes"
:
"no"
,
lib_uar_4k
?
"yes"
:
"no"
,
ref_bfregs
,
req
->
total_num_bfregs
,
*
num_sys_pages
);
...
...
drivers/infiniband/hw/mthca/mthca_main.c
浏览文件 @
a5f66725
...
...
@@ -49,7 +49,6 @@
MODULE_AUTHOR
(
"Roland Dreier"
);
MODULE_DESCRIPTION
(
"Mellanox InfiniBand HCA low-level driver"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
DRV_VERSION
);
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
...
...
drivers/infiniband/hw/nes/nes.c
浏览文件 @
a5f66725
...
...
@@ -63,7 +63,6 @@
MODULE_AUTHOR
(
"NetEffect"
);
MODULE_DESCRIPTION
(
"NetEffect RNIC Low-level iWARP Driver"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
DRV_VERSION
);
int
interrupt_mod_interval
=
0
;
...
...
drivers/infiniband/hw/ocrdma/ocrdma_main.c
浏览文件 @
a5f66725
...
...
@@ -58,7 +58,6 @@
#include "ocrdma_stats.h"
#include <rdma/ocrdma-abi.h>
MODULE_VERSION
(
OCRDMA_ROCE_DRV_VERSION
);
MODULE_DESCRIPTION
(
OCRDMA_ROCE_DRV_DESC
" "
OCRDMA_ROCE_DRV_VERSION
);
MODULE_AUTHOR
(
"Emulex Corporation"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
...
...
drivers/infiniband/hw/qedr/main.c
浏览文件 @
a5f66725
...
...
@@ -47,7 +47,6 @@
MODULE_DESCRIPTION
(
"QLogic 40G/100G ROCE Driver"
);
MODULE_AUTHOR
(
"QLogic Corporation"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
QEDR_MODULE_VERSION
);
#define QEDR_WQ_MULTIPLIER_DFT (3)
...
...
@@ -778,6 +777,7 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
if
(
rc
)
goto
init_err
;
dev
->
user_dpm_enabled
=
dev_info
.
user_dpm_enabled
;
dev
->
num_hwfns
=
dev_info
.
common
.
num_hwfns
;
dev
->
rdma_ctx
=
dev
->
ops
->
rdma_get_rdma_ctx
(
cdev
);
...
...
drivers/infiniband/hw/qedr/qedr.h
浏览文件 @
a5f66725
...
...
@@ -41,7 +41,6 @@
#include <linux/qed/roce_common.h>
#include "qedr_hsi_rdma.h"
#define QEDR_MODULE_VERSION "8.10.10.0"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
#define DP_NAME(dev) ((dev)->ibdev.name)
...
...
@@ -163,6 +162,8 @@ struct qedr_dev {
struct
qedr_qp
*
gsi_qp
;
unsigned
long
enet_state
;
u8
user_dpm_enabled
;
};
#define QEDR_MAX_SQ_PBL (0x8000)
...
...
drivers/infiniband/hw/qedr/verbs.c
浏览文件 @
a5f66725
...
...
@@ -376,6 +376,9 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
memset
(
&
uresp
,
0
,
sizeof
(
uresp
));
uresp
.
dpm_enabled
=
dev
->
user_dpm_enabled
;
uresp
.
wids_enabled
=
1
;
uresp
.
wid_count
=
oparams
.
wid_count
;
uresp
.
db_pa
=
ctx
->
dpi_phys_addr
;
uresp
.
db_size
=
ctx
->
dpi_size
;
uresp
.
max_send_wr
=
dev
->
attr
.
max_sqe
;
...
...
drivers/infiniband/hw/qib/qib_driver.c
浏览文件 @
a5f66725
...
...
@@ -66,7 +66,6 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_AUTHOR
(
"Intel <ibsupport@intel.com>"
);
MODULE_DESCRIPTION
(
"Intel IB driver"
);
MODULE_VERSION
(
QIB_DRIVER_VERSION
);
/*
* QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
...
...
drivers/infiniband/hw/qib/qib_mad.c
浏览文件 @
a5f66725
...
...
@@ -871,8 +871,6 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
ib_dispatch_event
(
&
event
);
}
ret
=
subn_get_portinfo
(
smp
,
ibdev
,
port
);
/* restore re-reg bit per o14-12.2.1 */
pip
->
clientrereg_resv_subnetto
|=
clientrereg
;
...
...
drivers/infiniband/hw/usnic/usnic_ib_main.c
浏览文件 @
a5f66725
...
...
@@ -409,6 +409,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev
->
ib_dev
.
query_port
=
usnic_ib_query_port
;
us_ibdev
->
ib_dev
.
query_pkey
=
usnic_ib_query_pkey
;
us_ibdev
->
ib_dev
.
query_gid
=
usnic_ib_query_gid
;
us_ibdev
->
ib_dev
.
get_netdev
=
usnic_get_netdev
;
us_ibdev
->
ib_dev
.
get_link_layer
=
usnic_ib_port_link_layer
;
us_ibdev
->
ib_dev
.
alloc_pd
=
usnic_ib_alloc_pd
;
us_ibdev
->
ib_dev
.
dealloc_pd
=
usnic_ib_dealloc_pd
;
...
...
@@ -720,7 +721,6 @@ static void __exit usnic_ib_destroy(void)
MODULE_DESCRIPTION
(
"Cisco VIC (usNIC) Verbs Driver"
);
MODULE_AUTHOR
(
"Upinder Malhi <umalhi@cisco.com>"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
DRV_VERSION
);
module_param
(
usnic_log_lvl
,
uint
,
S_IRUGO
|
S_IWUSR
);
module_param
(
usnic_ib_share_vf
,
uint
,
S_IRUGO
|
S_IWUSR
);
MODULE_PARM_DESC
(
usnic_log_lvl
,
" Off=0, Err=1, Info=2, Debug=3"
);
...
...
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
浏览文件 @
a5f66725
...
...
@@ -226,27 +226,6 @@ static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
spin_unlock
(
&
vf
->
lock
);
}
static
void
eth_speed_to_ib_speed
(
int
speed
,
u8
*
active_speed
,
u8
*
active_width
)
{
if
(
speed
<=
10000
)
{
*
active_width
=
IB_WIDTH_1X
;
*
active_speed
=
IB_SPEED_FDR10
;
}
else
if
(
speed
<=
20000
)
{
*
active_width
=
IB_WIDTH_4X
;
*
active_speed
=
IB_SPEED_DDR
;
}
else
if
(
speed
<=
30000
)
{
*
active_width
=
IB_WIDTH_4X
;
*
active_speed
=
IB_SPEED_QDR
;
}
else
if
(
speed
<=
40000
)
{
*
active_width
=
IB_WIDTH_4X
;
*
active_speed
=
IB_SPEED_FDR10
;
}
else
{
*
active_width
=
IB_WIDTH_4X
;
*
active_speed
=
IB_SPEED_EDR
;
}
}
static
int
create_qp_validate_user_data
(
struct
usnic_ib_create_qp_cmd
cmd
)
{
if
(
cmd
.
spec
.
trans_type
<=
USNIC_TRANSPORT_UNKNOWN
||
...
...
@@ -326,12 +305,16 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
struct
ib_port_attr
*
props
)
{
struct
usnic_ib_dev
*
us_ibdev
=
to_usdev
(
ibdev
);
struct
ethtool_link_ksettings
cmd
;
usnic_dbg
(
"
\n
"
);
mutex_lock
(
&
us_ibdev
->
usdev_lock
);
__ethtool_get_link_ksettings
(
us_ibdev
->
netdev
,
&
cmd
);
if
(
!
ib_get_eth_speed
(
ibdev
,
port
,
&
props
->
active_speed
,
&
props
->
active_width
))
{
mutex_unlock
(
&
us_ibdev
->
usdev_lock
);
return
-
EINVAL
;
}
/* props being zeroed by the caller, avoid zeroing it here */
props
->
lid
=
0
;
...
...
@@ -355,8 +338,6 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
props
->
pkey_tbl_len
=
1
;
props
->
bad_pkey_cntr
=
0
;
props
->
qkey_viol_cntr
=
0
;
eth_speed_to_ib_speed
(
cmd
.
base
.
speed
,
&
props
->
active_speed
,
&
props
->
active_width
);
props
->
max_mtu
=
IB_MTU_4096
;
props
->
active_mtu
=
iboe_get_mtu
(
us_ibdev
->
ufdev
->
mtu
);
/* Userspace will adjust for hdrs */
...
...
@@ -424,6 +405,16 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
return
0
;
}
struct
net_device
*
usnic_get_netdev
(
struct
ib_device
*
device
,
u8
port_num
)
{
struct
usnic_ib_dev
*
us_ibdev
=
to_usdev
(
device
);
if
(
us_ibdev
->
netdev
)
dev_hold
(
us_ibdev
->
netdev
);
return
us_ibdev
->
netdev
;
}
int
usnic_ib_query_pkey
(
struct
ib_device
*
ibdev
,
u8
port
,
u16
index
,
u16
*
pkey
)
{
...
...
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
浏览文件 @
a5f66725
...
...
@@ -48,6 +48,7 @@ int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
struct
ib_qp_init_attr
*
qp_init_attr
);
int
usnic_ib_query_gid
(
struct
ib_device
*
ibdev
,
u8
port
,
int
index
,
union
ib_gid
*
gid
);
struct
net_device
*
usnic_get_netdev
(
struct
ib_device
*
device
,
u8
port_num
);
int
usnic_ib_query_pkey
(
struct
ib_device
*
ibdev
,
u8
port
,
u16
index
,
u16
*
pkey
);
struct
ib_pd
*
usnic_ib_alloc_pd
(
struct
ib_device
*
ibdev
,
...
...
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
浏览文件 @
a5f66725
...
...
@@ -1119,5 +1119,4 @@ module_exit(pvrdma_cleanup);
MODULE_AUTHOR
(
"VMware, Inc"
);
MODULE_DESCRIPTION
(
"VMware Paravirtual RDMA driver"
);
MODULE_VERSION
(
DRV_VERSION
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
drivers/infiniband/sw/rxe/rxe.c
浏览文件 @
a5f66725
...
...
@@ -38,7 +38,6 @@
MODULE_AUTHOR
(
"Bob Pearson, Frank Zago, John Groves, Kamal Heib"
);
MODULE_DESCRIPTION
(
"Soft RDMA transport"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
"0.2"
);
/* free resources for all ports on a device */
static
void
rxe_cleanup_ports
(
struct
rxe_dev
*
rxe
)
...
...
drivers/infiniband/sw/rxe/rxe_verbs.c
浏览文件 @
a5f66725
...
...
@@ -51,40 +51,16 @@ static int rxe_query_device(struct ib_device *dev,
return
0
;
}
static
void
rxe_eth_speed_to_ib_speed
(
int
speed
,
u8
*
active_speed
,
u8
*
active_width
)
{
if
(
speed
<=
1000
)
{
*
active_width
=
IB_WIDTH_1X
;
*
active_speed
=
IB_SPEED_SDR
;
}
else
if
(
speed
<=
10000
)
{
*
active_width
=
IB_WIDTH_1X
;
*
active_speed
=
IB_SPEED_FDR10
;
}
else
if
(
speed
<=
20000
)
{
*
active_width
=
IB_WIDTH_4X
;
*
active_speed
=
IB_SPEED_DDR
;
}
else
if
(
speed
<=
30000
)
{
*
active_width
=
IB_WIDTH_4X
;
*
active_speed
=
IB_SPEED_QDR
;
}
else
if
(
speed
<=
40000
)
{
*
active_width
=
IB_WIDTH_4X
;
*
active_speed
=
IB_SPEED_FDR10
;
}
else
{
*
active_width
=
IB_WIDTH_4X
;
*
active_speed
=
IB_SPEED_EDR
;
}
}
static
int
rxe_query_port
(
struct
ib_device
*
dev
,
u8
port_num
,
struct
ib_port_attr
*
attr
)
{
struct
rxe_dev
*
rxe
=
to_rdev
(
dev
);
struct
rxe_port
*
port
;
u32
speed
;
int
rc
=
-
EINVAL
;
if
(
unlikely
(
port_num
!=
1
))
{
pr_warn
(
"invalid port_number %d
\n
"
,
port_num
);
goto
err1
;
goto
out
;
}
port
=
&
rxe
->
port
;
...
...
@@ -93,29 +69,12 @@ static int rxe_query_port(struct ib_device *dev,
*
attr
=
port
->
attr
;
mutex_lock
(
&
rxe
->
usdev_lock
);
if
(
rxe
->
ndev
->
ethtool_ops
->
get_link_ksettings
)
{
struct
ethtool_link_ksettings
ks
;
rxe
->
ndev
->
ethtool_ops
->
get_link_ksettings
(
rxe
->
ndev
,
&
ks
);
speed
=
ks
.
base
.
speed
;
}
else
if
(
rxe
->
ndev
->
ethtool_ops
->
get_settings
)
{
struct
ethtool_cmd
cmd
;
rxe
->
ndev
->
ethtool_ops
->
get_settings
(
rxe
->
ndev
,
&
cmd
);
speed
=
cmd
.
speed
;
}
else
{
pr_warn
(
"%s speed is unknown, defaulting to 1000
\n
"
,
rxe
->
ndev
->
name
);
speed
=
1000
;
}
rxe_eth_speed_to_ib_speed
(
speed
,
&
attr
->
active_speed
,
&
attr
->
active_width
);
rc
=
ib_get_eth_speed
(
dev
,
port_num
,
&
attr
->
active_speed
,
&
attr
->
active_width
);
mutex_unlock
(
&
rxe
->
usdev_lock
);
return
0
;
err1:
return
-
EINVAL
;
out:
return
rc
;
}
static
int
rxe_query_gid
(
struct
ib_device
*
device
,
...
...
drivers/infiniband/ulp/ipoib/ipoib_main.c
浏览文件 @
a5f66725
...
...
@@ -60,7 +60,6 @@ const char ipoib_driver_version[] = DRV_VERSION;
MODULE_AUTHOR
(
"Roland Dreier"
);
MODULE_DESCRIPTION
(
"IP-over-InfiniBand net driver"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
DRV_VERSION
);
int
ipoib_sendq_size
__read_mostly
=
IPOIB_TX_RING_SIZE
;
int
ipoib_recvq_size
__read_mostly
=
IPOIB_RX_RING_SIZE
;
...
...
drivers/infiniband/ulp/iser/iscsi_iser.c
浏览文件 @
a5f66725
...
...
@@ -77,7 +77,6 @@
MODULE_DESCRIPTION
(
"iSER (iSCSI Extensions for RDMA) Datamover"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_AUTHOR
(
"Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"
);
MODULE_VERSION
(
DRV_VER
);
static
struct
scsi_host_template
iscsi_iser_sht
;
static
struct
iscsi_transport
iscsi_iser_transport
;
...
...
drivers/infiniband/ulp/isert/ib_isert.c
浏览文件 @
a5f66725
...
...
@@ -2710,7 +2710,6 @@ static void __exit isert_exit(void)
}
MODULE_DESCRIPTION
(
"iSER-Target for mainline target infrastructure"
);
MODULE_VERSION
(
"1.0"
);
MODULE_AUTHOR
(
"nab@Linux-iSCSI.org"
);
MODULE_LICENSE
(
"GPL"
);
...
...
drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
浏览文件 @
a5f66725
...
...
@@ -1078,4 +1078,3 @@ module_exit(opa_vnic_deinit);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_AUTHOR
(
"Intel Corporation"
);
MODULE_DESCRIPTION
(
"Intel OPA Virtual Network driver"
);
MODULE_VERSION
(
DRV_VERSION
);
drivers/infiniband/ulp/srp/ib_srp.c
浏览文件 @
a5f66725
...
...
@@ -62,7 +62,6 @@
MODULE_AUTHOR
(
"Roland Dreier"
);
MODULE_DESCRIPTION
(
"InfiniBand SCSI RDMA Protocol initiator"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
DRV_VERSION
);
MODULE_INFO
(
release_date
,
DRV_RELDATE
);
#if !defined(CONFIG_DYNAMIC_DEBUG)
...
...
include/rdma/ib_verbs.h
浏览文件 @
a5f66725
...
...
@@ -3565,6 +3565,7 @@ void ib_drain_qp(struct ib_qp *qp);
int
ib_resolve_eth_dmac
(
struct
ib_device
*
device
,
struct
rdma_ah_attr
*
ah_attr
);
int
ib_get_eth_speed
(
struct
ib_device
*
dev
,
u8
port_num
,
u8
*
speed
,
u8
*
width
);
static
inline
u8
*
rdma_ah_retrieve_dmac
(
struct
rdma_ah_attr
*
attr
)
{
...
...
include/uapi/rdma/qedr-abi.h
浏览文件 @
a5f66725
...
...
@@ -49,6 +49,9 @@ struct qedr_alloc_ucontext_resp {
__u32
sges_per_recv_wr
;
__u32
sges_per_srq_wr
;
__u32
max_cqes
;
__u8
dpm_enabled
;
__u8
wids_enabled
;
__u16
wid_count
;
};
struct
qedr_alloc_pd_ureq
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录