Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
d68478da
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
d68478da
编写于
8月 25, 2016
作者:
D
Doug Ledford
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'mlx5-shared' into k.o/for-4.9
上级
204f69ba
0c41284c
变更
46
隐藏空白更改
内联
并排
Showing
46 changed file
with
2493 addition
and
2340 deletion
+2493
-2340
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/cq.c
+63
-47
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/main.c
+4
-9
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mlx5_ib.h
+1
-1
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/mr.c
+103
-81
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/qp.c
+95
-94
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/Makefile
+1
-1
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+151
-122
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
+41
-68
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+27
-23
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en.h
+0
-34
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+13
-10
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+1
-2
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+28
-27
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+2
-2
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
+31
-49
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+33
-65
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+126
-106
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+8
-0
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+101
-17
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+10
-0
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
+10
-33
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
+602
-0
drivers/net/ethernet/mellanox/mlx5/core/mad.c
drivers/net/ethernet/mellanox/mlx5/core/mad.c
+19
-22
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
+71
-74
drivers/net/ethernet/mellanox/mlx5/core/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+16
-56
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+29
-15
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/mr.c
+74
-115
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+51
-105
drivers/net/ethernet/mellanox/mlx5/core/pd.c
drivers/net/ethernet/mellanox/mlx5/core/pd.c
+11
-50
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
+71
-110
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
+173
-126
drivers/net/ethernet/mellanox/mlx5/core/rl.c
drivers/net/ethernet/mellanox/mlx5/core/rl.c
+3
-8
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+16
-3
drivers/net/ethernet/mellanox/mlx5/core/srq.c
drivers/net/ethernet/mellanox/mlx5/core/srq.c
+21
-28
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+61
-122
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlx5/core/uar.c
+11
-56
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
+35
-59
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+6
-23
include/linux/mlx5/cq.h
include/linux/mlx5/cq.h
+3
-3
include/linux/mlx5/device.h
include/linux/mlx5/device.h
+18
-423
include/linux/mlx5/driver.h
include/linux/mlx5/driver.h
+19
-12
include/linux/mlx5/fs.h
include/linux/mlx5/fs.h
+6
-0
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/mlx5_ifc.h
+279
-18
include/linux/mlx5/port.h
include/linux/mlx5/port.h
+37
-3
include/linux/mlx5/qp.h
include/linux/mlx5/qp.h
+10
-118
include/linux/mlx5/vport.h
include/linux/mlx5/vport.h
+2
-0
未找到文件。
drivers/infiniband/hw/mlx5/cq.c
浏览文件 @
d68478da
...
@@ -747,14 +747,16 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
...
@@ -747,14 +747,16 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
static
int
create_cq_user
(
struct
mlx5_ib_dev
*
dev
,
struct
ib_udata
*
udata
,
static
int
create_cq_user
(
struct
mlx5_ib_dev
*
dev
,
struct
ib_udata
*
udata
,
struct
ib_ucontext
*
context
,
struct
mlx5_ib_cq
*
cq
,
struct
ib_ucontext
*
context
,
struct
mlx5_ib_cq
*
cq
,
int
entries
,
struct
mlx5_create_cq_mbox_in
**
cqb
,
int
entries
,
u32
**
cqb
,
int
*
cqe_size
,
int
*
index
,
int
*
inlen
)
int
*
cqe_size
,
int
*
index
,
int
*
inlen
)
{
{
struct
mlx5_ib_create_cq
ucmd
;
struct
mlx5_ib_create_cq
ucmd
;
size_t
ucmdlen
;
size_t
ucmdlen
;
int
page_shift
;
int
page_shift
;
__be64
*
pas
;
int
npages
;
int
npages
;
int
ncont
;
int
ncont
;
void
*
cqc
;
int
err
;
int
err
;
ucmdlen
=
ucmdlen
=
...
@@ -792,14 +794,20 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
...
@@ -792,14 +794,20 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
mlx5_ib_dbg
(
dev
,
"addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d
\n
"
,
mlx5_ib_dbg
(
dev
,
"addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d
\n
"
,
ucmd
.
buf_addr
,
entries
*
ucmd
.
cqe_size
,
npages
,
page_shift
,
ncont
);
ucmd
.
buf_addr
,
entries
*
ucmd
.
cqe_size
,
npages
,
page_shift
,
ncont
);
*
inlen
=
sizeof
(
**
cqb
)
+
sizeof
(
*
(
*
cqb
)
->
pas
)
*
ncont
;
*
inlen
=
MLX5_ST_SZ_BYTES
(
create_cq_in
)
+
MLX5_FLD_SZ_BYTES
(
create_cq_in
,
pas
[
0
])
*
ncont
;
*
cqb
=
mlx5_vzalloc
(
*
inlen
);
*
cqb
=
mlx5_vzalloc
(
*
inlen
);
if
(
!*
cqb
)
{
if
(
!*
cqb
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
goto
err_db
;
goto
err_db
;
}
}
mlx5_ib_populate_pas
(
dev
,
cq
->
buf
.
umem
,
page_shift
,
(
*
cqb
)
->
pas
,
0
);
(
*
cqb
)
->
ctx
.
log_pg_sz
=
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
;
pas
=
(
__be64
*
)
MLX5_ADDR_OF
(
create_cq_in
,
*
cqb
,
pas
);
mlx5_ib_populate_pas
(
dev
,
cq
->
buf
.
umem
,
page_shift
,
pas
,
0
);
cqc
=
MLX5_ADDR_OF
(
create_cq_in
,
*
cqb
,
cq_context
);
MLX5_SET
(
cqc
,
cqc
,
log_page_size
,
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
);
*
index
=
to_mucontext
(
context
)
->
uuari
.
uars
[
0
].
index
;
*
index
=
to_mucontext
(
context
)
->
uuari
.
uars
[
0
].
index
;
...
@@ -834,9 +842,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
...
@@ -834,9 +842,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
static
int
create_cq_kernel
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_cq
*
cq
,
static
int
create_cq_kernel
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_cq
*
cq
,
int
entries
,
int
cqe_size
,
int
entries
,
int
cqe_size
,
struct
mlx5_create_cq_mbox_in
**
cqb
,
u32
**
cqb
,
int
*
index
,
int
*
inlen
)
int
*
index
,
int
*
inlen
)
{
{
__be64
*
pas
;
void
*
cqc
;
int
err
;
int
err
;
err
=
mlx5_db_alloc
(
dev
->
mdev
,
&
cq
->
db
);
err
=
mlx5_db_alloc
(
dev
->
mdev
,
&
cq
->
db
);
...
@@ -853,15 +862,21 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
...
@@ -853,15 +862,21 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
init_cq_buf
(
cq
,
&
cq
->
buf
);
init_cq_buf
(
cq
,
&
cq
->
buf
);
*
inlen
=
sizeof
(
**
cqb
)
+
sizeof
(
*
(
*
cqb
)
->
pas
)
*
cq
->
buf
.
buf
.
npages
;
*
inlen
=
MLX5_ST_SZ_BYTES
(
create_cq_in
)
+
MLX5_FLD_SZ_BYTES
(
create_cq_in
,
pas
[
0
])
*
cq
->
buf
.
buf
.
npages
;
*
cqb
=
mlx5_vzalloc
(
*
inlen
);
*
cqb
=
mlx5_vzalloc
(
*
inlen
);
if
(
!*
cqb
)
{
if
(
!*
cqb
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
goto
err_buf
;
goto
err_buf
;
}
}
mlx5_fill_page_array
(
&
cq
->
buf
.
buf
,
(
*
cqb
)
->
pas
);
(
*
cqb
)
->
ctx
.
log_pg_sz
=
cq
->
buf
.
buf
.
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
;
pas
=
(
__be64
*
)
MLX5_ADDR_OF
(
create_cq_in
,
*
cqb
,
pas
);
mlx5_fill_page_array
(
&
cq
->
buf
.
buf
,
pas
);
cqc
=
MLX5_ADDR_OF
(
create_cq_in
,
*
cqb
,
cq_context
);
MLX5_SET
(
cqc
,
cqc
,
log_page_size
,
cq
->
buf
.
buf
.
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
);
*
index
=
dev
->
mdev
->
priv
.
uuari
.
uars
[
0
].
index
;
*
index
=
dev
->
mdev
->
priv
.
uuari
.
uars
[
0
].
index
;
return
0
;
return
0
;
...
@@ -895,11 +910,12 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
...
@@ -895,11 +910,12 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
{
{
int
entries
=
attr
->
cqe
;
int
entries
=
attr
->
cqe
;
int
vector
=
attr
->
comp_vector
;
int
vector
=
attr
->
comp_vector
;
struct
mlx5_create_cq_mbox_in
*
cqb
=
NULL
;
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
ibdev
);
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
ibdev
);
struct
mlx5_ib_cq
*
cq
;
struct
mlx5_ib_cq
*
cq
;
int
uninitialized_var
(
index
);
int
uninitialized_var
(
index
);
int
uninitialized_var
(
inlen
);
int
uninitialized_var
(
inlen
);
u32
*
cqb
=
NULL
;
void
*
cqc
;
int
cqe_size
;
int
cqe_size
;
unsigned
int
irqn
;
unsigned
int
irqn
;
int
eqn
;
int
eqn
;
...
@@ -945,19 +961,20 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
...
@@ -945,19 +961,20 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
INIT_WORK
(
&
cq
->
notify_work
,
notify_soft_wc_handler
);
INIT_WORK
(
&
cq
->
notify_work
,
notify_soft_wc_handler
);
}
}
cq
->
cqe_size
=
cqe_size
;
cqb
->
ctx
.
cqe_sz_flags
=
cqe_sz_to_mlx_sz
(
cqe_size
)
<<
5
;
if
(
cq
->
create_flags
&
IB_CQ_FLAGS_IGNORE_OVERRUN
)
cqb
->
ctx
.
cqe_sz_flags
|=
(
1
<<
1
);
cqb
->
ctx
.
log_sz_usr_page
=
cpu_to_be32
((
ilog2
(
entries
)
<<
24
)
|
index
);
err
=
mlx5_vector2eqn
(
dev
->
mdev
,
vector
,
&
eqn
,
&
irqn
);
err
=
mlx5_vector2eqn
(
dev
->
mdev
,
vector
,
&
eqn
,
&
irqn
);
if
(
err
)
if
(
err
)
goto
err_cqb
;
goto
err_cqb
;
cqb
->
ctx
.
c_eqn
=
cpu_to_be16
(
eqn
);
cq
->
cqe_size
=
cqe_size
;
cqb
->
ctx
.
db_record_addr
=
cpu_to_be64
(
cq
->
db
.
dma
);
cqc
=
MLX5_ADDR_OF
(
create_cq_in
,
cqb
,
cq_context
);
MLX5_SET
(
cqc
,
cqc
,
cqe_sz
,
cqe_sz_to_mlx_sz
(
cqe_size
));
MLX5_SET
(
cqc
,
cqc
,
log_cq_size
,
ilog2
(
entries
));
MLX5_SET
(
cqc
,
cqc
,
uar_page
,
index
);
MLX5_SET
(
cqc
,
cqc
,
c_eqn
,
eqn
);
MLX5_SET64
(
cqc
,
cqc
,
dbr_addr
,
cq
->
db
.
dma
);
if
(
cq
->
create_flags
&
IB_CQ_FLAGS_IGNORE_OVERRUN
)
MLX5_SET
(
cqc
,
cqc
,
oi
,
1
);
err
=
mlx5_core_create_cq
(
dev
->
mdev
,
&
cq
->
mcq
,
cqb
,
inlen
);
err
=
mlx5_core_create_cq
(
dev
->
mdev
,
&
cq
->
mcq
,
cqb
,
inlen
);
if
(
err
)
if
(
err
)
...
@@ -1088,27 +1105,15 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
...
@@ -1088,27 +1105,15 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
int
mlx5_ib_modify_cq
(
struct
ib_cq
*
cq
,
u16
cq_count
,
u16
cq_period
)
int
mlx5_ib_modify_cq
(
struct
ib_cq
*
cq
,
u16
cq_count
,
u16
cq_period
)
{
{
struct
mlx5_modify_cq_mbox_in
*
in
;
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
cq
->
device
);
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
cq
->
device
);
struct
mlx5_ib_cq
*
mcq
=
to_mcq
(
cq
);
struct
mlx5_ib_cq
*
mcq
=
to_mcq
(
cq
);
int
err
;
int
err
;
u32
fsel
;
if
(
!
MLX5_CAP_GEN
(
dev
->
mdev
,
cq_moderation
))
if
(
!
MLX5_CAP_GEN
(
dev
->
mdev
,
cq_moderation
))
return
-
ENOSYS
;
return
-
ENOSYS
;
in
=
kzalloc
(
sizeof
(
*
in
),
GFP_KERNEL
);
err
=
mlx5_core_modify_cq_moderation
(
dev
->
mdev
,
&
mcq
->
mcq
,
if
(
!
in
)
cq_period
,
cq_count
);
return
-
ENOMEM
;
in
->
cqn
=
cpu_to_be32
(
mcq
->
mcq
.
cqn
);
fsel
=
(
MLX5_CQ_MODIFY_PERIOD
|
MLX5_CQ_MODIFY_COUNT
);
in
->
ctx
.
cq_period
=
cpu_to_be16
(
cq_period
);
in
->
ctx
.
cq_max_count
=
cpu_to_be16
(
cq_count
);
in
->
field_select
=
cpu_to_be32
(
fsel
);
err
=
mlx5_core_modify_cq
(
dev
->
mdev
,
&
mcq
->
mcq
,
in
,
sizeof
(
*
in
));
kfree
(
in
);
if
(
err
)
if
(
err
)
mlx5_ib_warn
(
dev
,
"modify cq 0x%x failed
\n
"
,
mcq
->
mcq
.
cqn
);
mlx5_ib_warn
(
dev
,
"modify cq 0x%x failed
\n
"
,
mcq
->
mcq
.
cqn
);
...
@@ -1241,9 +1246,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
...
@@ -1241,9 +1246,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
ibcq
->
device
);
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
ibcq
->
device
);
struct
mlx5_ib_cq
*
cq
=
to_mcq
(
ibcq
);
struct
mlx5_ib_cq
*
cq
=
to_mcq
(
ibcq
);
struct
mlx5_modify_cq_mbox_in
*
in
;
void
*
cqc
;
u32
*
in
;
int
err
;
int
err
;
int
npas
;
int
npas
;
__be64
*
pas
;
int
page_shift
;
int
page_shift
;
int
inlen
;
int
inlen
;
int
uninitialized_var
(
cqe_size
);
int
uninitialized_var
(
cqe_size
);
...
@@ -1285,28 +1292,37 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
...
@@ -1285,28 +1292,37 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
if
(
err
)
if
(
err
)
goto
ex
;
goto
ex
;
inlen
=
sizeof
(
*
in
)
+
npas
*
sizeof
(
in
->
pas
[
0
]);
inlen
=
MLX5_ST_SZ_BYTES
(
modify_cq_in
)
+
MLX5_FLD_SZ_BYTES
(
modify_cq_in
,
pas
[
0
])
*
npas
;
in
=
mlx5_vzalloc
(
inlen
);
in
=
mlx5_vzalloc
(
inlen
);
if
(
!
in
)
{
if
(
!
in
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
goto
ex_resize
;
goto
ex_resize
;
}
}
pas
=
(
__be64
*
)
MLX5_ADDR_OF
(
modify_cq_in
,
in
,
pas
);
if
(
udata
)
if
(
udata
)
mlx5_ib_populate_pas
(
dev
,
cq
->
resize_umem
,
page_shift
,
mlx5_ib_populate_pas
(
dev
,
cq
->
resize_umem
,
page_shift
,
in
->
pas
,
0
);
pas
,
0
);
else
else
mlx5_fill_page_array
(
&
cq
->
resize_buf
->
buf
,
in
->
pas
);
mlx5_fill_page_array
(
&
cq
->
resize_buf
->
buf
,
pas
);
in
->
field_select
=
cpu_to_be32
(
MLX5_MODIFY_CQ_MASK_LOG_SIZE
|
MLX5_SET
(
modify_cq_in
,
in
,
MLX5_MODIFY_CQ_MASK_PG_OFFSET
|
modify_field_select_resize_field_select
.
resize_field_select
.
resize_field_select
,
MLX5_MODIFY_CQ_MASK_PG_SIZE
);
MLX5_MODIFY_CQ_MASK_LOG_SIZE
|
in
->
ctx
.
log_pg_sz
=
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
;
MLX5_MODIFY_CQ_MASK_PG_OFFSET
|
in
->
ctx
.
cqe_sz_flags
=
cqe_sz_to_mlx_sz
(
cqe_size
)
<<
5
;
MLX5_MODIFY_CQ_MASK_PG_SIZE
);
in
->
ctx
.
page_offset
=
0
;
in
->
ctx
.
log_sz_usr_page
=
cpu_to_be32
(
ilog2
(
entries
)
<<
24
);
cqc
=
MLX5_ADDR_OF
(
modify_cq_in
,
in
,
cq_context
);
in
->
hdr
.
opmod
=
cpu_to_be16
(
MLX5_CQ_OPMOD_RESIZE
);
in
->
cqn
=
cpu_to_be32
(
cq
->
mcq
.
cqn
);
MLX5_SET
(
cqc
,
cqc
,
log_page_size
,
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
);
MLX5_SET
(
cqc
,
cqc
,
cqe_sz
,
cqe_sz_to_mlx_sz
(
cqe_size
));
MLX5_SET
(
cqc
,
cqc
,
log_cq_size
,
ilog2
(
entries
));
MLX5_SET
(
modify_cq_in
,
in
,
op_mod
,
MLX5_CQ_OPMOD_RESIZE
);
MLX5_SET
(
modify_cq_in
,
in
,
cqn
,
cq
->
mcq
.
cqn
);
err
=
mlx5_core_modify_cq
(
dev
->
mdev
,
&
cq
->
mcq
,
in
,
inlen
);
err
=
mlx5_core_modify_cq
(
dev
->
mdev
,
&
cq
->
mcq
,
in
,
inlen
);
if
(
err
)
if
(
err
)
...
...
drivers/infiniband/hw/mlx5/main.c
浏览文件 @
d68478da
...
@@ -233,23 +233,19 @@ static int set_roce_addr(struct ib_device *device, u8 port_num,
...
@@ -233,23 +233,19 @@ static int set_roce_addr(struct ib_device *device, u8 port_num,
const
union
ib_gid
*
gid
,
const
union
ib_gid
*
gid
,
const
struct
ib_gid_attr
*
attr
)
const
struct
ib_gid_attr
*
attr
)
{
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
device
);
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
device
);
u32
in
[
MLX5_ST_SZ_DW
(
set_roce_address_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
set_roce_address_in
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
set_roce_address_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
set_roce_address_out
)]
=
{
0
}
;
void
*
in_addr
=
MLX5_ADDR_OF
(
set_roce_address_in
,
in
,
roce_address
);
void
*
in_addr
=
MLX5_ADDR_OF
(
set_roce_address_in
,
in
,
roce_address
);
enum
rdma_link_layer
ll
=
mlx5_ib_port_link_layer
(
device
,
port_num
);
enum
rdma_link_layer
ll
=
mlx5_ib_port_link_layer
(
device
,
port_num
);
if
(
ll
!=
IB_LINK_LAYER_ETHERNET
)
if
(
ll
!=
IB_LINK_LAYER_ETHERNET
)
return
-
EINVAL
;
return
-
EINVAL
;
memset
(
in
,
0
,
sizeof
(
in
));
ib_gid_to_mlx5_roce_addr
(
gid
,
attr
,
in_addr
);
ib_gid_to_mlx5_roce_addr
(
gid
,
attr
,
in_addr
);
MLX5_SET
(
set_roce_address_in
,
in
,
roce_address_index
,
index
);
MLX5_SET
(
set_roce_address_in
,
in
,
roce_address_index
,
index
);
MLX5_SET
(
set_roce_address_in
,
in
,
opcode
,
MLX5_CMD_OP_SET_ROCE_ADDRESS
);
MLX5_SET
(
set_roce_address_in
,
in
,
opcode
,
MLX5_CMD_OP_SET_ROCE_ADDRESS
);
memset
(
out
,
0
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
->
mdev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
->
mdev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
...
@@ -752,8 +748,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
...
@@ -752,8 +748,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
&
props
->
active_width
);
&
props
->
active_width
);
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
err
=
mlx5_query_port_proto_oper
(
mdev
,
&
props
->
active_speed
,
MLX5_PTYS_IB
,
err
=
mlx5_query_port_ib_proto_oper
(
mdev
,
&
props
->
active_speed
,
port
);
port
);
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
...
...
drivers/infiniband/hw/mlx5/mlx5_ib.h
浏览文件 @
d68478da
...
@@ -504,7 +504,7 @@ struct mlx5_ib_mr {
...
@@ -504,7 +504,7 @@ struct mlx5_ib_mr {
int
umred
;
int
umred
;
int
npages
;
int
npages
;
struct
mlx5_ib_dev
*
dev
;
struct
mlx5_ib_dev
*
dev
;
struct
mlx5_create_mkey_mbox_out
out
;
u32
out
[
MLX5_ST_SZ_DW
(
create_mkey_out
)]
;
struct
mlx5_core_sig_ctx
*
sig
;
struct
mlx5_core_sig_ctx
*
sig
;
int
live
;
int
live
;
void
*
descs_alloc
;
void
*
descs_alloc
;
...
...
drivers/infiniband/hw/mlx5/mr.c
浏览文件 @
d68478da
...
@@ -135,20 +135,10 @@ static void reg_mr_callback(int status, void *context)
...
@@ -135,20 +135,10 @@ static void reg_mr_callback(int status, void *context)
return
;
return
;
}
}
if
(
mr
->
out
.
hdr
.
status
)
{
mlx5_ib_warn
(
dev
,
"failed - status %d, syndorme 0x%x
\n
"
,
mr
->
out
.
hdr
.
status
,
be32_to_cpu
(
mr
->
out
.
hdr
.
syndrome
));
kfree
(
mr
);
dev
->
fill_delay
=
1
;
mod_timer
(
&
dev
->
delay_timer
,
jiffies
+
HZ
);
return
;
}
spin_lock_irqsave
(
&
dev
->
mdev
->
priv
.
mkey_lock
,
flags
);
spin_lock_irqsave
(
&
dev
->
mdev
->
priv
.
mkey_lock
,
flags
);
key
=
dev
->
mdev
->
priv
.
mkey_key
++
;
key
=
dev
->
mdev
->
priv
.
mkey_key
++
;
spin_unlock_irqrestore
(
&
dev
->
mdev
->
priv
.
mkey_lock
,
flags
);
spin_unlock_irqrestore
(
&
dev
->
mdev
->
priv
.
mkey_lock
,
flags
);
mr
->
mmkey
.
key
=
mlx5_idx_to_mkey
(
be32_to_cpu
(
mr
->
out
.
mkey
)
&
0xffffff
)
|
key
;
mr
->
mmkey
.
key
=
mlx5_idx_to_mkey
(
MLX5_GET
(
create_mkey_out
,
mr
->
out
,
mkey_index
)
)
|
key
;
cache
->
last_add
=
jiffies
;
cache
->
last_add
=
jiffies
;
...
@@ -170,16 +160,19 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
...
@@ -170,16 +160,19 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
{
{
struct
mlx5_mr_cache
*
cache
=
&
dev
->
cache
;
struct
mlx5_mr_cache
*
cache
=
&
dev
->
cache
;
struct
mlx5_cache_ent
*
ent
=
&
cache
->
ent
[
c
];
struct
mlx5_cache_ent
*
ent
=
&
cache
->
ent
[
c
];
struct
mlx5_create_mkey_mbox_in
*
in
;
int
inlen
=
MLX5_ST_SZ_BYTES
(
create_mkey_in
)
;
struct
mlx5_ib_mr
*
mr
;
struct
mlx5_ib_mr
*
mr
;
int
npages
=
1
<<
ent
->
order
;
int
npages
=
1
<<
ent
->
order
;
void
*
mkc
;
u32
*
in
;
int
err
=
0
;
int
err
=
0
;
int
i
;
int
i
;
in
=
kzalloc
(
sizeof
(
*
in
)
,
GFP_KERNEL
);
in
=
kzalloc
(
inlen
,
GFP_KERNEL
);
if
(
!
in
)
if
(
!
in
)
return
-
ENOMEM
;
return
-
ENOMEM
;
mkc
=
MLX5_ADDR_OF
(
create_mkey_in
,
in
,
memory_key_mkey_entry
);
for
(
i
=
0
;
i
<
num
;
i
++
)
{
for
(
i
=
0
;
i
<
num
;
i
++
)
{
if
(
ent
->
pending
>=
MAX_PENDING_REG_MR
)
{
if
(
ent
->
pending
>=
MAX_PENDING_REG_MR
)
{
err
=
-
EAGAIN
;
err
=
-
EAGAIN
;
...
@@ -194,18 +187,22 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
...
@@ -194,18 +187,22 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
mr
->
order
=
ent
->
order
;
mr
->
order
=
ent
->
order
;
mr
->
umred
=
1
;
mr
->
umred
=
1
;
mr
->
dev
=
dev
;
mr
->
dev
=
dev
;
in
->
seg
.
status
=
MLX5_MKEY_STATUS_FREE
;
in
->
seg
.
xlt_oct_size
=
cpu_to_be32
((
npages
+
1
)
/
2
);
MLX5_SET
(
mkc
,
mkc
,
free
,
1
);
in
->
seg
.
qpn_mkey7_0
=
cpu_to_be32
(
0xffffff
<<
8
);
MLX5_SET
(
mkc
,
mkc
,
umr_en
,
1
);
in
->
seg
.
flags
=
MLX5_ACCESS_MODE_MTT
|
MLX5_PERM_UMR_EN
;
MLX5_SET
(
mkc
,
mkc
,
access_mode
,
MLX5_MKC_ACCESS_MODE_MTT
);
in
->
seg
.
log2_page_size
=
12
;
MLX5_SET
(
mkc
,
mkc
,
qpn
,
0xffffff
);
MLX5_SET
(
mkc
,
mkc
,
translations_octword_size
,
(
npages
+
1
)
/
2
);
MLX5_SET
(
mkc
,
mkc
,
log_page_size
,
12
);
spin_lock_irq
(
&
ent
->
lock
);
spin_lock_irq
(
&
ent
->
lock
);
ent
->
pending
++
;
ent
->
pending
++
;
spin_unlock_irq
(
&
ent
->
lock
);
spin_unlock_irq
(
&
ent
->
lock
);
err
=
mlx5_core_create_mkey
(
dev
->
mdev
,
&
mr
->
mmkey
,
in
,
err
=
mlx5_core_create_mkey_cb
(
dev
->
mdev
,
&
mr
->
mmkey
,
sizeof
(
*
in
),
reg_mr_callback
,
in
,
inlen
,
mr
,
&
mr
->
out
);
mr
->
out
,
sizeof
(
mr
->
out
),
reg_mr_callback
,
mr
);
if
(
err
)
{
if
(
err
)
{
spin_lock_irq
(
&
ent
->
lock
);
spin_lock_irq
(
&
ent
->
lock
);
ent
->
pending
--
;
ent
->
pending
--
;
...
@@ -670,30 +667,38 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
...
@@ -670,30 +667,38 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
struct
ib_mr
*
mlx5_ib_get_dma_mr
(
struct
ib_pd
*
pd
,
int
acc
)
struct
ib_mr
*
mlx5_ib_get_dma_mr
(
struct
ib_pd
*
pd
,
int
acc
)
{
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
pd
->
device
);
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
pd
->
device
);
int
inlen
=
MLX5_ST_SZ_BYTES
(
create_mkey_in
);
struct
mlx5_core_dev
*
mdev
=
dev
->
mdev
;
struct
mlx5_core_dev
*
mdev
=
dev
->
mdev
;
struct
mlx5_create_mkey_mbox_in
*
in
;
struct
mlx5_mkey_seg
*
seg
;
struct
mlx5_ib_mr
*
mr
;
struct
mlx5_ib_mr
*
mr
;
void
*
mkc
;
u32
*
in
;
int
err
;
int
err
;
mr
=
kzalloc
(
sizeof
(
*
mr
),
GFP_KERNEL
);
mr
=
kzalloc
(
sizeof
(
*
mr
),
GFP_KERNEL
);
if
(
!
mr
)
if
(
!
mr
)
return
ERR_PTR
(
-
ENOMEM
);
return
ERR_PTR
(
-
ENOMEM
);
in
=
kzalloc
(
sizeof
(
*
in
)
,
GFP_KERNEL
);
in
=
kzalloc
(
inlen
,
GFP_KERNEL
);
if
(
!
in
)
{
if
(
!
in
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
goto
err_free
;
goto
err_free
;
}
}
seg
=
&
in
->
seg
;
mkc
=
MLX5_ADDR_OF
(
create_mkey_in
,
in
,
memory_key_mkey_entry
);
seg
->
flags
=
convert_access
(
acc
)
|
MLX5_ACCESS_MODE_PA
;
seg
->
flags_pd
=
cpu_to_be32
(
to_mpd
(
pd
)
->
pdn
|
MLX5_MKEY_LEN64
);
MLX5_SET
(
mkc
,
mkc
,
access_mode
,
MLX5_MKC_ACCESS_MODE_PA
);
seg
->
qpn_mkey7_0
=
cpu_to_be32
(
0xffffff
<<
8
);
MLX5_SET
(
mkc
,
mkc
,
a
,
!!
(
acc
&
IB_ACCESS_REMOTE_ATOMIC
));
seg
->
start_addr
=
0
;
MLX5_SET
(
mkc
,
mkc
,
rw
,
!!
(
acc
&
IB_ACCESS_REMOTE_WRITE
));
MLX5_SET
(
mkc
,
mkc
,
rr
,
!!
(
acc
&
IB_ACCESS_REMOTE_READ
));
MLX5_SET
(
mkc
,
mkc
,
lw
,
!!
(
acc
&
IB_ACCESS_LOCAL_WRITE
));
MLX5_SET
(
mkc
,
mkc
,
lr
,
1
);
err
=
mlx5_core_create_mkey
(
mdev
,
&
mr
->
mmkey
,
in
,
sizeof
(
*
in
),
NULL
,
NULL
,
MLX5_SET
(
mkc
,
mkc
,
length64
,
1
);
NULL
);
MLX5_SET
(
mkc
,
mkc
,
pd
,
to_mpd
(
pd
)
->
pdn
);
MLX5_SET
(
mkc
,
mkc
,
qpn
,
0xffffff
);
MLX5_SET64
(
mkc
,
mkc
,
start_addr
,
0
);
err
=
mlx5_core_create_mkey
(
mdev
,
&
mr
->
mmkey
,
in
,
inlen
);
if
(
err
)
if
(
err
)
goto
err_in
;
goto
err_in
;
...
@@ -1063,9 +1068,11 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
...
@@ -1063,9 +1068,11 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
int
page_shift
,
int
access_flags
)
int
page_shift
,
int
access_flags
)
{
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
pd
->
device
);
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
pd
->
device
);
struct
mlx5_create_mkey_mbox_in
*
in
;
struct
mlx5_ib_mr
*
mr
;
struct
mlx5_ib_mr
*
mr
;
__be64
*
pas
;
void
*
mkc
;
int
inlen
;
int
inlen
;
u32
*
in
;
int
err
;
int
err
;
bool
pg_cap
=
!!
(
MLX5_CAP_GEN
(
dev
->
mdev
,
pg
));
bool
pg_cap
=
!!
(
MLX5_CAP_GEN
(
dev
->
mdev
,
pg
));
...
@@ -1073,31 +1080,41 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
...
@@ -1073,31 +1080,41 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
if
(
!
mr
)
if
(
!
mr
)
return
ERR_PTR
(
-
ENOMEM
);
return
ERR_PTR
(
-
ENOMEM
);
inlen
=
sizeof
(
*
in
)
+
sizeof
(
*
in
->
pas
)
*
((
npages
+
1
)
/
2
)
*
2
;
inlen
=
MLX5_ST_SZ_BYTES
(
create_mkey_in
)
+
sizeof
(
*
pas
)
*
((
npages
+
1
)
/
2
)
*
2
;
in
=
mlx5_vzalloc
(
inlen
);
in
=
mlx5_vzalloc
(
inlen
);
if
(
!
in
)
{
if
(
!
in
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
goto
err_1
;
goto
err_1
;
}
}
mlx5_ib_populate_pas
(
dev
,
umem
,
page_shift
,
in
->
pas
,
pas
=
(
__be64
*
)
MLX5_ADDR_OF
(
create_mkey_in
,
in
,
klm_pas_mtt
);
mlx5_ib_populate_pas
(
dev
,
umem
,
page_shift
,
pas
,
pg_cap
?
MLX5_IB_MTT_PRESENT
:
0
);
pg_cap
?
MLX5_IB_MTT_PRESENT
:
0
);
/* The
MLX5_MKEY_INBOX_PG_ACCESS
bit allows setting the access flags
/* The
pg_access
bit allows setting the access flags
* in the page list submitted with the command. */
* in the page list submitted with the command. */
in
->
flags
=
pg_cap
?
cpu_to_be32
(
MLX5_MKEY_INBOX_PG_ACCESS
)
:
0
;
MLX5_SET
(
create_mkey_in
,
in
,
pg_access
,
!!
(
pg_cap
));
in
->
seg
.
flags
=
convert_access
(
access_flags
)
|
MLX5_ACCESS_MODE_MTT
;
mkc
=
MLX5_ADDR_OF
(
create_mkey_in
,
in
,
memory_key_mkey_entry
);
in
->
seg
.
flags_pd
=
cpu_to_be32
(
to_mpd
(
pd
)
->
pdn
);
MLX5_SET
(
mkc
,
mkc
,
access_mode
,
MLX5_MKC_ACCESS_MODE_MTT
);
in
->
seg
.
start_addr
=
cpu_to_be64
(
virt_addr
);
MLX5_SET
(
mkc
,
mkc
,
a
,
!!
(
access_flags
&
IB_ACCESS_REMOTE_ATOMIC
));
in
->
seg
.
len
=
cpu_to_be64
(
length
);
MLX5_SET
(
mkc
,
mkc
,
rw
,
!!
(
access_flags
&
IB_ACCESS_REMOTE_WRITE
));
in
->
seg
.
bsfs_octo_size
=
0
;
MLX5_SET
(
mkc
,
mkc
,
rr
,
!!
(
access_flags
&
IB_ACCESS_REMOTE_READ
));
in
->
seg
.
xlt_oct_size
=
cpu_to_be32
(
get_octo_len
(
virt_addr
,
length
,
1
<<
page_shift
));
MLX5_SET
(
mkc
,
mkc
,
lw
,
!!
(
access_flags
&
IB_ACCESS_LOCAL_WRITE
));
in
->
seg
.
log2_page_size
=
page_shift
;
MLX5_SET
(
mkc
,
mkc
,
lr
,
1
);
in
->
seg
.
qpn_mkey7_0
=
cpu_to_be32
(
0xffffff
<<
8
);
in
->
xlat_oct_act_size
=
cpu_to_be32
(
get_octo_len
(
virt_addr
,
length
,
MLX5_SET64
(
mkc
,
mkc
,
start_addr
,
virt_addr
);
1
<<
page_shift
));
MLX5_SET64
(
mkc
,
mkc
,
len
,
length
);
err
=
mlx5_core_create_mkey
(
dev
->
mdev
,
&
mr
->
mmkey
,
in
,
inlen
,
NULL
,
MLX5_SET
(
mkc
,
mkc
,
pd
,
to_mpd
(
pd
)
->
pdn
);
NULL
,
NULL
);
MLX5_SET
(
mkc
,
mkc
,
bsf_octword_size
,
0
);
MLX5_SET
(
mkc
,
mkc
,
translations_octword_size
,
get_octo_len
(
virt_addr
,
length
,
1
<<
page_shift
));
MLX5_SET
(
mkc
,
mkc
,
log_page_size
,
page_shift
);
MLX5_SET
(
mkc
,
mkc
,
qpn
,
0xffffff
);
MLX5_SET
(
create_mkey_in
,
in
,
translations_octword_actual_size
,
get_octo_len
(
virt_addr
,
length
,
1
<<
page_shift
));
err
=
mlx5_core_create_mkey
(
dev
->
mdev
,
&
mr
->
mmkey
,
in
,
inlen
);
if
(
err
)
{
if
(
err
)
{
mlx5_ib_warn
(
dev
,
"create mkey failed
\n
"
);
mlx5_ib_warn
(
dev
,
"create mkey failed
\n
"
);
goto
err_2
;
goto
err_2
;
...
@@ -1523,30 +1540,32 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
...
@@ -1523,30 +1540,32 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
u32
max_num_sg
)
u32
max_num_sg
)
{
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
pd
->
device
);
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
pd
->
device
);
struct
mlx5_create_mkey_mbox_in
*
in
;
int
inlen
=
MLX5_ST_SZ_BYTES
(
create_mkey_in
);
struct
mlx5_ib_mr
*
mr
;
int
ndescs
=
ALIGN
(
max_num_sg
,
4
);
int
ndescs
=
ALIGN
(
max_num_sg
,
4
);
struct
mlx5_ib_mr
*
mr
;
void
*
mkc
;
u32
*
in
;
int
err
;
int
err
;
mr
=
kzalloc
(
sizeof
(
*
mr
),
GFP_KERNEL
);
mr
=
kzalloc
(
sizeof
(
*
mr
),
GFP_KERNEL
);
if
(
!
mr
)
if
(
!
mr
)
return
ERR_PTR
(
-
ENOMEM
);
return
ERR_PTR
(
-
ENOMEM
);
in
=
kzalloc
(
sizeof
(
*
in
)
,
GFP_KERNEL
);
in
=
kzalloc
(
inlen
,
GFP_KERNEL
);
if
(
!
in
)
{
if
(
!
in
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
goto
err_free
;
goto
err_free
;
}
}
in
->
seg
.
status
=
MLX5_MKEY_STATUS_FREE
;
mkc
=
MLX5_ADDR_OF
(
create_mkey_in
,
in
,
memory_key_mkey_entry
);
in
->
seg
.
xlt_oct_size
=
cpu_to_be32
(
ndescs
);
MLX5_SET
(
mkc
,
mkc
,
free
,
1
);
in
->
seg
.
qpn_mkey7_0
=
cpu_to_be32
(
0xffffff
<<
8
);
MLX5_SET
(
mkc
,
mkc
,
translations_octword_size
,
ndescs
);
in
->
seg
.
flags_pd
=
cpu_to_be32
(
to_mpd
(
pd
)
->
pdn
);
MLX5_SET
(
mkc
,
mkc
,
qpn
,
0xffffff
);
MLX5_SET
(
mkc
,
mkc
,
pd
,
to_mpd
(
pd
)
->
pdn
);
if
(
mr_type
==
IB_MR_TYPE_MEM_REG
)
{
if
(
mr_type
==
IB_MR_TYPE_MEM_REG
)
{
mr
->
access_mode
=
MLX5_ACCESS_MODE_MTT
;
mr
->
access_mode
=
MLX5_MKC_ACCESS_MODE_MTT
;
in
->
seg
.
log2_page_size
=
PAGE_SHIFT
;
MLX5_SET
(
mkc
,
mkc
,
log_page_size
,
PAGE_SHIFT
);
err
=
mlx5_alloc_priv_descs
(
pd
->
device
,
mr
,
err
=
mlx5_alloc_priv_descs
(
pd
->
device
,
mr
,
ndescs
,
sizeof
(
u64
));
ndescs
,
sizeof
(
u64
));
if
(
err
)
if
(
err
)
...
@@ -1555,7 +1574,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
...
@@ -1555,7 +1574,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
mr
->
desc_size
=
sizeof
(
u64
);
mr
->
desc_size
=
sizeof
(
u64
);
mr
->
max_descs
=
ndescs
;
mr
->
max_descs
=
ndescs
;
}
else
if
(
mr_type
==
IB_MR_TYPE_SG_GAPS
)
{
}
else
if
(
mr_type
==
IB_MR_TYPE_SG_GAPS
)
{
mr
->
access_mode
=
MLX5_
ACCESS_MODE_KLM
;
mr
->
access_mode
=
MLX5_
MKC_ACCESS_MODE_KLMS
;
err
=
mlx5_alloc_priv_descs
(
pd
->
device
,
mr
,
err
=
mlx5_alloc_priv_descs
(
pd
->
device
,
mr
,
ndescs
,
sizeof
(
struct
mlx5_klm
));
ndescs
,
sizeof
(
struct
mlx5_klm
));
...
@@ -1566,9 +1585,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
...
@@ -1566,9 +1585,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
}
else
if
(
mr_type
==
IB_MR_TYPE_SIGNATURE
)
{
}
else
if
(
mr_type
==
IB_MR_TYPE_SIGNATURE
)
{
u32
psv_index
[
2
];
u32
psv_index
[
2
];
in
->
seg
.
flags_pd
=
cpu_to_be32
(
be32_to_cpu
(
in
->
seg
.
flags_pd
)
|
MLX5_SET
(
mkc
,
mkc
,
bsf_en
,
1
);
MLX5_MKEY_BSF_EN
);
MLX5_SET
(
mkc
,
mkc
,
bsf_octword_size
,
MLX5_MKEY_BSF_OCTO_SIZE
);
in
->
seg
.
bsfs_octo_size
=
cpu_to_be32
(
MLX5_MKEY_BSF_OCTO_SIZE
);
mr
->
sig
=
kzalloc
(
sizeof
(
*
mr
->
sig
),
GFP_KERNEL
);
mr
->
sig
=
kzalloc
(
sizeof
(
*
mr
->
sig
),
GFP_KERNEL
);
if
(
!
mr
->
sig
)
{
if
(
!
mr
->
sig
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
...
@@ -1581,7 +1599,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
...
@@ -1581,7 +1599,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
if
(
err
)
if
(
err
)
goto
err_free_sig
;
goto
err_free_sig
;
mr
->
access_mode
=
MLX5_
ACCESS_MODE_KLM
;
mr
->
access_mode
=
MLX5_
MKC_ACCESS_MODE_KLMS
;
mr
->
sig
->
psv_memory
.
psv_idx
=
psv_index
[
0
];
mr
->
sig
->
psv_memory
.
psv_idx
=
psv_index
[
0
];
mr
->
sig
->
psv_wire
.
psv_idx
=
psv_index
[
1
];
mr
->
sig
->
psv_wire
.
psv_idx
=
psv_index
[
1
];
...
@@ -1595,9 +1613,10 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
...
@@ -1595,9 +1613,10 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
goto
err_free_in
;
goto
err_free_in
;
}
}
in
->
seg
.
flags
=
MLX5_PERM_UMR_EN
|
mr
->
access_mode
;
MLX5_SET
(
mkc
,
mkc
,
access_mode
,
mr
->
access_mode
);
err
=
mlx5_core_create_mkey
(
dev
->
mdev
,
&
mr
->
mmkey
,
in
,
sizeof
(
*
in
),
MLX5_SET
(
mkc
,
mkc
,
umr_en
,
1
);
NULL
,
NULL
,
NULL
);
err
=
mlx5_core_create_mkey
(
dev
->
mdev
,
&
mr
->
mmkey
,
in
,
inlen
);
if
(
err
)
if
(
err
)
goto
err_destroy_psv
;
goto
err_destroy_psv
;
...
@@ -1633,8 +1652,10 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
...
@@ -1633,8 +1652,10 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct
ib_udata
*
udata
)
struct
ib_udata
*
udata
)
{
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
pd
->
device
);
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
pd
->
device
);
struct
mlx5_create_mkey_mbox_in
*
in
=
NULL
;
int
inlen
=
MLX5_ST_SZ_BYTES
(
create_mkey_in
)
;
struct
mlx5_ib_mw
*
mw
=
NULL
;
struct
mlx5_ib_mw
*
mw
=
NULL
;
u32
*
in
=
NULL
;
void
*
mkc
;
int
ndescs
;
int
ndescs
;
int
err
;
int
err
;
struct
mlx5_ib_alloc_mw
req
=
{};
struct
mlx5_ib_alloc_mw
req
=
{};
...
@@ -1658,23 +1679,24 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
...
@@ -1658,23 +1679,24 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ndescs
=
req
.
num_klms
?
roundup
(
req
.
num_klms
,
4
)
:
roundup
(
1
,
4
);
ndescs
=
req
.
num_klms
?
roundup
(
req
.
num_klms
,
4
)
:
roundup
(
1
,
4
);
mw
=
kzalloc
(
sizeof
(
*
mw
),
GFP_KERNEL
);
mw
=
kzalloc
(
sizeof
(
*
mw
),
GFP_KERNEL
);
in
=
kzalloc
(
sizeof
(
*
in
)
,
GFP_KERNEL
);
in
=
kzalloc
(
inlen
,
GFP_KERNEL
);
if
(
!
mw
||
!
in
)
{
if
(
!
mw
||
!
in
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
goto
free
;
goto
free
;
}
}
in
->
seg
.
status
=
MLX5_MKEY_STATUS_FREE
;
mkc
=
MLX5_ADDR_OF
(
create_mkey_in
,
in
,
memory_key_mkey_entry
);
in
->
seg
.
xlt_oct_size
=
cpu_to_be32
(
ndescs
);
in
->
seg
.
flags_pd
=
cpu_to_be32
(
to_mpd
(
pd
)
->
pdn
);
MLX5_SET
(
mkc
,
mkc
,
free
,
1
);
in
->
seg
.
flags
=
MLX5_PERM_UMR_EN
|
MLX5_ACCESS_MODE_KLM
|
MLX5_SET
(
mkc
,
mkc
,
translations_octword_size
,
ndescs
);
MLX5_PERM_LOCAL_READ
;
MLX5_SET
(
mkc
,
mkc
,
pd
,
to_mpd
(
pd
)
->
pdn
);
if
(
type
==
IB_MW_TYPE_2
)
MLX5_SET
(
mkc
,
mkc
,
umr_en
,
1
);
in
->
seg
.
flags_pd
|=
cpu_to_be32
(
MLX5_MKEY_REMOTE_INVAL
);
MLX5_SET
(
mkc
,
mkc
,
lr
,
1
);
in
->
seg
.
qpn_mkey7_0
=
cpu_to_be32
(
0xffffff
<<
8
);
MLX5_SET
(
mkc
,
mkc
,
access_mode
,
MLX5_MKC_ACCESS_MODE_KLMS
);
MLX5_SET
(
mkc
,
mkc
,
en_rinval
,
!!
((
type
==
IB_MW_TYPE_2
)));
err
=
mlx5_core_create_mkey
(
dev
->
mdev
,
&
mw
->
mmkey
,
in
,
sizeof
(
*
in
),
MLX5_SET
(
mkc
,
mkc
,
qpn
,
0xffffff
);
NULL
,
NULL
,
NULL
);
err
=
mlx5_core_create_mkey
(
dev
->
mdev
,
&
mw
->
mmkey
,
in
,
inlen
);
if
(
err
)
if
(
err
)
goto
free
;
goto
free
;
...
@@ -1811,7 +1833,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
...
@@ -1811,7 +1833,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
mr
->
desc_size
*
mr
->
max_descs
,
mr
->
desc_size
*
mr
->
max_descs
,
DMA_TO_DEVICE
);
DMA_TO_DEVICE
);
if
(
mr
->
access_mode
==
MLX5_
ACCESS_MODE_KLM
)
if
(
mr
->
access_mode
==
MLX5_
MKC_ACCESS_MODE_KLMS
)
n
=
mlx5_ib_sg_to_klms
(
mr
,
sg
,
sg_nents
,
sg_offset
);
n
=
mlx5_ib_sg_to_klms
(
mr
,
sg
,
sg_nents
,
sg_offset
);
else
else
n
=
ib_sg_to_pages
(
ibmr
,
sg
,
sg_nents
,
sg_offset
,
n
=
ib_sg_to_pages
(
ibmr
,
sg
,
sg_nents
,
sg_offset
,
...
...
drivers/infiniband/hw/mlx5/qp.c
浏览文件 @
d68478da
...
@@ -726,7 +726,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
...
@@ -726,7 +726,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
static
int
create_user_qp
(
struct
mlx5_ib_dev
*
dev
,
struct
ib_pd
*
pd
,
static
int
create_user_qp
(
struct
mlx5_ib_dev
*
dev
,
struct
ib_pd
*
pd
,
struct
mlx5_ib_qp
*
qp
,
struct
ib_udata
*
udata
,
struct
mlx5_ib_qp
*
qp
,
struct
ib_udata
*
udata
,
struct
ib_qp_init_attr
*
attr
,
struct
ib_qp_init_attr
*
attr
,
struct
mlx5_create_qp_mbox_in
**
in
,
u32
**
in
,
struct
mlx5_ib_create_qp_resp
*
resp
,
int
*
inlen
,
struct
mlx5_ib_create_qp_resp
*
resp
,
int
*
inlen
,
struct
mlx5_ib_qp_base
*
base
)
struct
mlx5_ib_qp_base
*
base
)
{
{
...
@@ -739,6 +739,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
...
@@ -739,6 +739,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32
offset
=
0
;
u32
offset
=
0
;
int
uuarn
;
int
uuarn
;
int
ncont
=
0
;
int
ncont
=
0
;
__be64
*
pas
;
void
*
qpc
;
int
err
;
int
err
;
err
=
ib_copy_from_udata
(
&
ucmd
,
udata
,
sizeof
(
ucmd
));
err
=
ib_copy_from_udata
(
&
ucmd
,
udata
,
sizeof
(
ucmd
));
...
@@ -795,20 +797,24 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
...
@@ -795,20 +797,24 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
ubuffer
->
umem
=
NULL
;
ubuffer
->
umem
=
NULL
;
}
}
*
inlen
=
sizeof
(
**
in
)
+
sizeof
(
*
(
*
in
)
->
pas
)
*
ncont
;
*
inlen
=
MLX5_ST_SZ_BYTES
(
create_qp_in
)
+
MLX5_FLD_SZ_BYTES
(
create_qp_in
,
pas
[
0
])
*
ncont
;
*
in
=
mlx5_vzalloc
(
*
inlen
);
*
in
=
mlx5_vzalloc
(
*
inlen
);
if
(
!*
in
)
{
if
(
!*
in
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
goto
err_umem
;
goto
err_umem
;
}
}
pas
=
(
__be64
*
)
MLX5_ADDR_OF
(
create_qp_in
,
*
in
,
pas
);
if
(
ubuffer
->
umem
)
if
(
ubuffer
->
umem
)
mlx5_ib_populate_pas
(
dev
,
ubuffer
->
umem
,
page_shift
,
mlx5_ib_populate_pas
(
dev
,
ubuffer
->
umem
,
page_shift
,
pas
,
0
);
(
*
in
)
->
pas
,
0
);
(
*
in
)
->
ctx
.
log_pg_sz_remote_qpn
=
qpc
=
MLX5_ADDR_OF
(
create_qp_in
,
*
in
,
qpc
);
cpu_to_be32
((
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
)
<<
24
);
(
*
in
)
->
ctx
.
params2
=
cpu_to_be32
(
offset
<<
6
);
(
*
in
)
->
ctx
.
qp_counter_set_usr_page
=
cpu_to_be32
(
uar_index
);
MLX5_SET
(
qpc
,
qpc
,
log_page_size
,
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
);
MLX5_SET
(
qpc
,
qpc
,
page_offset
,
offset
);
MLX5_SET
(
qpc
,
qpc
,
uar_page
,
uar_index
);
resp
->
uuar_index
=
uuarn
;
resp
->
uuar_index
=
uuarn
;
qp
->
uuarn
=
uuarn
;
qp
->
uuarn
=
uuarn
;
...
@@ -857,12 +863,13 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
...
@@ -857,12 +863,13 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
static
int
create_kernel_qp
(
struct
mlx5_ib_dev
*
dev
,
static
int
create_kernel_qp
(
struct
mlx5_ib_dev
*
dev
,
struct
ib_qp_init_attr
*
init_attr
,
struct
ib_qp_init_attr
*
init_attr
,
struct
mlx5_ib_qp
*
qp
,
struct
mlx5_ib_qp
*
qp
,
struct
mlx5_create_qp_mbox_in
**
in
,
int
*
inlen
,
u32
**
in
,
int
*
inlen
,
struct
mlx5_ib_qp_base
*
base
)
struct
mlx5_ib_qp_base
*
base
)
{
{
enum
mlx5_ib_latency_class
lc
=
MLX5_IB_LATENCY_CLASS_LOW
;
enum
mlx5_ib_latency_class
lc
=
MLX5_IB_LATENCY_CLASS_LOW
;
struct
mlx5_uuar_info
*
uuari
;
struct
mlx5_uuar_info
*
uuari
;
int
uar_index
;
int
uar_index
;
void
*
qpc
;
int
uuarn
;
int
uuarn
;
int
err
;
int
err
;
...
@@ -902,25 +909,29 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
...
@@ -902,25 +909,29 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
}
}
qp
->
sq
.
qend
=
mlx5_get_send_wqe
(
qp
,
qp
->
sq
.
wqe_cnt
);
qp
->
sq
.
qend
=
mlx5_get_send_wqe
(
qp
,
qp
->
sq
.
wqe_cnt
);
*
inlen
=
sizeof
(
**
in
)
+
sizeof
(
*
(
*
in
)
->
pas
)
*
qp
->
buf
.
npages
;
*
inlen
=
MLX5_ST_SZ_BYTES
(
create_qp_in
)
+
MLX5_FLD_SZ_BYTES
(
create_qp_in
,
pas
[
0
])
*
qp
->
buf
.
npages
;
*
in
=
mlx5_vzalloc
(
*
inlen
);
*
in
=
mlx5_vzalloc
(
*
inlen
);
if
(
!*
in
)
{
if
(
!*
in
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
goto
err_buf
;
goto
err_buf
;
}
}
(
*
in
)
->
ctx
.
qp_counter_set_usr_page
=
cpu_to_be32
(
uar_index
);
(
*
in
)
->
ctx
.
log_pg_sz_remote_qpn
=
qpc
=
MLX5_ADDR_OF
(
create_qp_in
,
*
in
,
qpc
);
cpu_to_be32
((
qp
->
buf
.
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
)
<<
24
);
MLX5_SET
(
qpc
,
qpc
,
uar_page
,
uar_index
);
MLX5_SET
(
qpc
,
qpc
,
log_page_size
,
qp
->
buf
.
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
);
/* Set "fast registration enabled" for all kernel QPs */
/* Set "fast registration enabled" for all kernel QPs */
(
*
in
)
->
ctx
.
params1
|=
cpu_to_be32
(
1
<<
1
1
);
MLX5_SET
(
qpc
,
qpc
,
fre
,
1
);
(
*
in
)
->
ctx
.
sq_crq_size
|=
cpu_to_be16
(
1
<<
4
);
MLX5_SET
(
qpc
,
qpc
,
rlky
,
1
);
if
(
init_attr
->
create_flags
&
mlx5_ib_create_qp_sqpn_qp1
())
{
if
(
init_attr
->
create_flags
&
mlx5_ib_create_qp_sqpn_qp1
())
{
(
*
in
)
->
ctx
.
deth_sqpn
=
cpu_to_be32
(
1
);
MLX5_SET
(
qpc
,
qpc
,
deth_sqpn
,
1
);
qp
->
flags
|=
MLX5_IB_QP_SQPN_QP1
;
qp
->
flags
|=
MLX5_IB_QP_SQPN_QP1
;
}
}
mlx5_fill_page_array
(
&
qp
->
buf
,
(
*
in
)
->
pas
);
mlx5_fill_page_array
(
&
qp
->
buf
,
(
__be64
*
)
MLX5_ADDR_OF
(
create_qp_in
,
*
in
,
pas
));
err
=
mlx5_db_alloc
(
dev
->
mdev
,
&
qp
->
db
);
err
=
mlx5_db_alloc
(
dev
->
mdev
,
&
qp
->
db
);
if
(
err
)
{
if
(
err
)
{
...
@@ -974,15 +985,15 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
...
@@ -974,15 +985,15 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
free_uuar
(
&
dev
->
mdev
->
priv
.
uuari
,
qp
->
bf
->
uuarn
);
free_uuar
(
&
dev
->
mdev
->
priv
.
uuari
,
qp
->
bf
->
uuarn
);
}
}
static
__be
32
get_rx_type
(
struct
mlx5_ib_qp
*
qp
,
struct
ib_qp_init_attr
*
attr
)
static
u
32
get_rx_type
(
struct
mlx5_ib_qp
*
qp
,
struct
ib_qp_init_attr
*
attr
)
{
{
if
(
attr
->
srq
||
(
attr
->
qp_type
==
IB_QPT_XRC_TGT
)
||
if
(
attr
->
srq
||
(
attr
->
qp_type
==
IB_QPT_XRC_TGT
)
||
(
attr
->
qp_type
==
IB_QPT_XRC_INI
))
(
attr
->
qp_type
==
IB_QPT_XRC_INI
))
return
cpu_to_be32
(
MLX5_SRQ_RQ
)
;
return
MLX5_SRQ_RQ
;
else
if
(
!
qp
->
has_rq
)
else
if
(
!
qp
->
has_rq
)
return
cpu_to_be32
(
MLX5_ZERO_LEN_RQ
)
;
return
MLX5_ZERO_LEN_RQ
;
else
else
return
cpu_to_be32
(
MLX5_NON_ZERO_RQ
)
;
return
MLX5_NON_ZERO_RQ
;
}
}
static
int
is_connected
(
enum
ib_qp_type
qp_type
)
static
int
is_connected
(
enum
ib_qp_type
qp_type
)
...
@@ -996,13 +1007,10 @@ static int is_connected(enum ib_qp_type qp_type)
...
@@ -996,13 +1007,10 @@ static int is_connected(enum ib_qp_type qp_type)
static
int
create_raw_packet_qp_tis
(
struct
mlx5_ib_dev
*
dev
,
static
int
create_raw_packet_qp_tis
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_sq
*
sq
,
u32
tdn
)
struct
mlx5_ib_sq
*
sq
,
u32
tdn
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
create_tis_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
create_tis_in
)]
=
{
0
}
;
void
*
tisc
=
MLX5_ADDR_OF
(
create_tis_in
,
in
,
ctx
);
void
*
tisc
=
MLX5_ADDR_OF
(
create_tis_in
,
in
,
ctx
);
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
tisc
,
tisc
,
transport_domain
,
tdn
);
MLX5_SET
(
tisc
,
tisc
,
transport_domain
,
tdn
);
return
mlx5_core_create_tis
(
dev
->
mdev
,
in
,
sizeof
(
in
),
&
sq
->
tisn
);
return
mlx5_core_create_tis
(
dev
->
mdev
,
in
,
sizeof
(
in
),
&
sq
->
tisn
);
}
}
...
@@ -1191,7 +1199,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
...
@@ -1191,7 +1199,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
}
}
static
int
create_raw_packet_qp
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_qp
*
qp
,
static
int
create_raw_packet_qp
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_qp
*
qp
,
struct
mlx5_create_qp_mbox_in
*
in
,
u32
*
in
,
struct
ib_pd
*
pd
)
struct
ib_pd
*
pd
)
{
{
struct
mlx5_ib_raw_packet_qp
*
raw_packet_qp
=
&
qp
->
raw_packet_qp
;
struct
mlx5_ib_raw_packet_qp
*
raw_packet_qp
=
&
qp
->
raw_packet_qp
;
...
@@ -1461,18 +1469,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
...
@@ -1461,18 +1469,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct
ib_udata
*
udata
,
struct
mlx5_ib_qp
*
qp
)
struct
ib_udata
*
udata
,
struct
mlx5_ib_qp
*
qp
)
{
{
struct
mlx5_ib_resources
*
devr
=
&
dev
->
devr
;
struct
mlx5_ib_resources
*
devr
=
&
dev
->
devr
;
int
inlen
=
MLX5_ST_SZ_BYTES
(
create_qp_in
);
struct
mlx5_core_dev
*
mdev
=
dev
->
mdev
;
struct
mlx5_core_dev
*
mdev
=
dev
->
mdev
;
struct
mlx5_ib_qp_base
*
base
;
struct
mlx5_ib_create_qp_resp
resp
;
struct
mlx5_ib_create_qp_resp
resp
;
struct
mlx5_create_qp_mbox_in
*
in
;
struct
mlx5_ib_create_qp
ucmd
;
struct
mlx5_ib_cq
*
send_cq
;
struct
mlx5_ib_cq
*
send_cq
;
struct
mlx5_ib_cq
*
recv_cq
;
struct
mlx5_ib_cq
*
recv_cq
;
unsigned
long
flags
;
unsigned
long
flags
;
int
inlen
=
sizeof
(
*
in
);
int
err
;
u32
uidx
=
MLX5_IB_DEFAULT_UIDX
;
u32
uidx
=
MLX5_IB_DEFAULT_UIDX
;
struct
mlx5_ib_create_qp
ucmd
;
struct
mlx5_ib_qp_base
*
base
;
void
*
qpc
;
void
*
qpc
;
u32
*
in
;
int
err
;
base
=
init_attr
->
qp_type
==
IB_QPT_RAW_PACKET
?
base
=
init_attr
->
qp_type
==
IB_QPT_RAW_PACKET
?
&
qp
->
raw_packet_qp
.
rq
.
base
:
&
qp
->
raw_packet_qp
.
rq
.
base
:
...
@@ -1600,7 +1608,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
...
@@ -1600,7 +1608,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if
(
err
)
if
(
err
)
return
err
;
return
err
;
}
else
{
}
else
{
in
=
mlx5_vzalloc
(
sizeof
(
*
in
)
);
in
=
mlx5_vzalloc
(
inlen
);
if
(
!
in
)
if
(
!
in
)
return
-
ENOMEM
;
return
-
ENOMEM
;
...
@@ -1610,26 +1618,29 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
...
@@ -1610,26 +1618,29 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if
(
is_sqp
(
init_attr
->
qp_type
))
if
(
is_sqp
(
init_attr
->
qp_type
))
qp
->
port
=
init_attr
->
port_num
;
qp
->
port
=
init_attr
->
port_num
;
in
->
ctx
.
flags
=
cpu_to_be32
(
to_mlx5_st
(
init_attr
->
qp_type
)
<<
16
|
qpc
=
MLX5_ADDR_OF
(
create_qp_in
,
in
,
qpc
);
MLX5_QP_PM_MIGRATED
<<
11
);
MLX5_SET
(
qpc
,
qpc
,
st
,
to_mlx5_st
(
init_attr
->
qp_type
));
MLX5_SET
(
qpc
,
qpc
,
pm_state
,
MLX5_QP_PM_MIGRATED
);
if
(
init_attr
->
qp_type
!=
MLX5_IB_QPT_REG_UMR
)
if
(
init_attr
->
qp_type
!=
MLX5_IB_QPT_REG_UMR
)
in
->
ctx
.
flags_pd
=
cpu_to_be32
(
to_mpd
(
pd
?
pd
:
devr
->
p0
)
->
pdn
);
MLX5_SET
(
qpc
,
qpc
,
pd
,
to_mpd
(
pd
?
pd
:
devr
->
p0
)
->
pdn
);
else
else
in
->
ctx
.
flags_pd
=
cpu_to_be32
(
MLX5_QP_LAT_SENSITIVE
);
MLX5_SET
(
qpc
,
qpc
,
latency_sensitive
,
1
);
if
(
qp
->
wq_sig
)
if
(
qp
->
wq_sig
)
in
->
ctx
.
flags_pd
|=
cpu_to_be32
(
MLX5_QP_ENABLE_SIG
);
MLX5_SET
(
qpc
,
qpc
,
wq_signature
,
1
);
if
(
qp
->
flags
&
MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
if
(
qp
->
flags
&
MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
in
->
ctx
.
flags_pd
|=
cpu_to_be32
(
MLX5_QP_BLOCK_MCAST
);
MLX5_SET
(
qpc
,
qpc
,
block_lb_mc
,
1
);
if
(
qp
->
flags
&
MLX5_IB_QP_CROSS_CHANNEL
)
if
(
qp
->
flags
&
MLX5_IB_QP_CROSS_CHANNEL
)
in
->
ctx
.
params2
|=
cpu_to_be32
(
MLX5_QP_BIT_CC_MASTER
);
MLX5_SET
(
qpc
,
qpc
,
cd_master
,
1
);
if
(
qp
->
flags
&
MLX5_IB_QP_MANAGED_SEND
)
if
(
qp
->
flags
&
MLX5_IB_QP_MANAGED_SEND
)
in
->
ctx
.
params2
|=
cpu_to_be32
(
MLX5_QP_BIT_CC_SLAVE_SEND
);
MLX5_SET
(
qpc
,
qpc
,
cd_slave_send
,
1
);
if
(
qp
->
flags
&
MLX5_IB_QP_MANAGED_RECV
)
if
(
qp
->
flags
&
MLX5_IB_QP_MANAGED_RECV
)
in
->
ctx
.
params2
|=
cpu_to_be32
(
MLX5_QP_BIT_CC_SLAVE_RECV
);
MLX5_SET
(
qpc
,
qpc
,
cd_slave_receive
,
1
);
if
(
qp
->
scat_cqe
&&
is_connected
(
init_attr
->
qp_type
))
{
if
(
qp
->
scat_cqe
&&
is_connected
(
init_attr
->
qp_type
))
{
int
rcqe_sz
;
int
rcqe_sz
;
...
@@ -1639,71 +1650,68 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
...
@@ -1639,71 +1650,68 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
scqe_sz
=
mlx5_ib_get_cqe_size
(
dev
,
init_attr
->
send_cq
);
scqe_sz
=
mlx5_ib_get_cqe_size
(
dev
,
init_attr
->
send_cq
);
if
(
rcqe_sz
==
128
)
if
(
rcqe_sz
==
128
)
in
->
ctx
.
cs_res
=
MLX5_RES_SCAT_DATA64_CQE
;
MLX5_SET
(
qpc
,
qpc
,
cs_res
,
MLX5_RES_SCAT_DATA64_CQE
)
;
else
else
in
->
ctx
.
cs_res
=
MLX5_RES_SCAT_DATA32_CQE
;
MLX5_SET
(
qpc
,
qpc
,
cs_res
,
MLX5_RES_SCAT_DATA32_CQE
)
;
if
(
init_attr
->
sq_sig_type
==
IB_SIGNAL_ALL_WR
)
{
if
(
init_attr
->
sq_sig_type
==
IB_SIGNAL_ALL_WR
)
{
if
(
scqe_sz
==
128
)
if
(
scqe_sz
==
128
)
in
->
ctx
.
cs_req
=
MLX5_REQ_SCAT_DATA64_CQE
;
MLX5_SET
(
qpc
,
qpc
,
cs_req
,
MLX5_REQ_SCAT_DATA64_CQE
)
;
else
else
in
->
ctx
.
cs_req
=
MLX5_REQ_SCAT_DATA32_CQE
;
MLX5_SET
(
qpc
,
qpc
,
cs_req
,
MLX5_REQ_SCAT_DATA32_CQE
)
;
}
}
}
}
if
(
qp
->
rq
.
wqe_cnt
)
{
if
(
qp
->
rq
.
wqe_cnt
)
{
in
->
ctx
.
rq_size_stride
=
(
qp
->
rq
.
wqe_shift
-
4
);
MLX5_SET
(
qpc
,
qpc
,
log_rq_stride
,
qp
->
rq
.
wqe_shift
-
4
);
in
->
ctx
.
rq_size_stride
|=
ilog2
(
qp
->
rq
.
wqe_cnt
)
<<
3
;
MLX5_SET
(
qpc
,
qpc
,
log_rq_size
,
ilog2
(
qp
->
rq
.
wqe_cnt
))
;
}
}
in
->
ctx
.
rq_type_srqn
=
get_rx_type
(
qp
,
init_attr
);
MLX5_SET
(
qpc
,
qpc
,
rq_type
,
get_rx_type
(
qp
,
init_attr
)
);
if
(
qp
->
sq
.
wqe_cnt
)
if
(
qp
->
sq
.
wqe_cnt
)
in
->
ctx
.
sq_crq_size
|=
cpu_to_be16
(
ilog2
(
qp
->
sq
.
wqe_cnt
)
<<
11
);
MLX5_SET
(
qpc
,
qpc
,
log_sq_size
,
ilog2
(
qp
->
sq
.
wqe_cnt
)
);
else
else
in
->
ctx
.
sq_crq_size
|=
cpu_to_be16
(
0x8000
);
MLX5_SET
(
qpc
,
qpc
,
no_sq
,
1
);
/* Set default resources */
/* Set default resources */
switch
(
init_attr
->
qp_type
)
{
switch
(
init_attr
->
qp_type
)
{
case
IB_QPT_XRC_TGT
:
case
IB_QPT_XRC_TGT
:
in
->
ctx
.
cqn_recv
=
cpu_to_be32
(
to_mcq
(
devr
->
c0
)
->
mcq
.
cqn
);
MLX5_SET
(
qpc
,
qpc
,
cqn_rcv
,
to_mcq
(
devr
->
c0
)
->
mcq
.
cqn
);
in
->
ctx
.
cqn_send
=
cpu_to_be32
(
to_mcq
(
devr
->
c0
)
->
mcq
.
cqn
);
MLX5_SET
(
qpc
,
qpc
,
cqn_snd
,
to_mcq
(
devr
->
c0
)
->
mcq
.
cqn
);
in
->
ctx
.
rq_type_srqn
|=
cpu_to_be32
(
to_msrq
(
devr
->
s0
)
->
msrq
.
srqn
);
MLX5_SET
(
qpc
,
qpc
,
srqn_rmpn_xrqn
,
to_msrq
(
devr
->
s0
)
->
msrq
.
srqn
);
in
->
ctx
.
xrcd
=
cpu_to_be32
(
to_mxrcd
(
init_attr
->
xrcd
)
->
xrcdn
);
MLX5_SET
(
qpc
,
qpc
,
xrcd
,
to_mxrcd
(
init_attr
->
xrcd
)
->
xrcdn
);
break
;
break
;
case
IB_QPT_XRC_INI
:
case
IB_QPT_XRC_INI
:
in
->
ctx
.
cqn_recv
=
cpu_to_be32
(
to_mcq
(
devr
->
c0
)
->
mcq
.
cqn
);
MLX5_SET
(
qpc
,
qpc
,
cqn_rcv
,
to_mcq
(
devr
->
c0
)
->
mcq
.
cqn
);
in
->
ctx
.
xrcd
=
cpu_to_be32
(
to_mxrcd
(
devr
->
x1
)
->
xrcdn
);
MLX5_SET
(
qpc
,
qpc
,
xrcd
,
to_mxrcd
(
devr
->
x1
)
->
xrcdn
);
in
->
ctx
.
rq_type_srqn
|=
cpu_to_be32
(
to_msrq
(
devr
->
s0
)
->
msrq
.
srqn
);
MLX5_SET
(
qpc
,
qpc
,
srqn_rmpn_xrqn
,
to_msrq
(
devr
->
s0
)
->
msrq
.
srqn
);
break
;
break
;
default:
default:
if
(
init_attr
->
srq
)
{
if
(
init_attr
->
srq
)
{
in
->
ctx
.
xrcd
=
cpu_to_be32
(
to_mxrcd
(
devr
->
x0
)
->
xrcdn
);
MLX5_SET
(
qpc
,
qpc
,
xrcd
,
to_mxrcd
(
devr
->
x0
)
->
xrcdn
);
in
->
ctx
.
rq_type_srqn
|=
cpu_to_be32
(
to_msrq
(
init_attr
->
srq
)
->
msrq
.
srqn
);
MLX5_SET
(
qpc
,
qpc
,
srqn_rmpn_xrqn
,
to_msrq
(
init_attr
->
srq
)
->
msrq
.
srqn
);
}
else
{
}
else
{
in
->
ctx
.
xrcd
=
cpu_to_be32
(
to_mxrcd
(
devr
->
x1
)
->
xrcdn
);
MLX5_SET
(
qpc
,
qpc
,
xrcd
,
to_mxrcd
(
devr
->
x1
)
->
xrcdn
);
in
->
ctx
.
rq_type_srqn
|=
MLX5_SET
(
qpc
,
qpc
,
srqn_rmpn_xrqn
,
to_msrq
(
devr
->
s1
)
->
msrq
.
srqn
);
cpu_to_be32
(
to_msrq
(
devr
->
s1
)
->
msrq
.
srqn
);
}
}
}
}
if
(
init_attr
->
send_cq
)
if
(
init_attr
->
send_cq
)
in
->
ctx
.
cqn_send
=
cpu_to_be32
(
to_mcq
(
init_attr
->
send_cq
)
->
mcq
.
cqn
);
MLX5_SET
(
qpc
,
qpc
,
cqn_snd
,
to_mcq
(
init_attr
->
send_cq
)
->
mcq
.
cqn
);
if
(
init_attr
->
recv_cq
)
if
(
init_attr
->
recv_cq
)
in
->
ctx
.
cqn_recv
=
cpu_to_be32
(
to_mcq
(
init_attr
->
recv_cq
)
->
mcq
.
cqn
);
MLX5_SET
(
qpc
,
qpc
,
cqn_rcv
,
to_mcq
(
init_attr
->
recv_cq
)
->
mcq
.
cqn
);
in
->
ctx
.
db_rec_addr
=
cpu_to_be64
(
qp
->
db
.
dma
);
MLX5_SET64
(
qpc
,
qpc
,
dbr_addr
,
qp
->
db
.
dma
);
if
(
MLX5_CAP_GEN
(
mdev
,
cqe_version
)
==
MLX5_CQE_VERSION_V1
)
{
/* 0xffffff means we ask to work with cqe version 0 */
qpc
=
MLX5_ADDR_OF
(
create_qp_in
,
in
,
qpc
);
if
(
MLX5_CAP_GEN
(
mdev
,
cqe_version
)
==
MLX5_CQE_VERSION_V1
)
/* 0xffffff means we ask to work with cqe version 0 */
MLX5_SET
(
qpc
,
qpc
,
user_index
,
uidx
);
MLX5_SET
(
qpc
,
qpc
,
user_index
,
uidx
);
}
/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
if
(
init_attr
->
qp_type
==
IB_QPT_UD
&&
if
(
init_attr
->
qp_type
==
IB_QPT_UD
&&
(
init_attr
->
create_flags
&
IB_QP_CREATE_IPOIB_UD_LSO
))
{
(
init_attr
->
create_flags
&
IB_QP_CREATE_IPOIB_UD_LSO
))
{
qpc
=
MLX5_ADDR_OF
(
create_qp_in
,
in
,
qpc
);
MLX5_SET
(
qpc
,
qpc
,
ulp_stateless_offload_mode
,
1
);
MLX5_SET
(
qpc
,
qpc
,
ulp_stateless_offload_mode
,
1
);
qp
->
flags
|=
MLX5_IB_QP_LSO
;
qp
->
flags
|=
MLX5_IB_QP_LSO
;
}
}
...
@@ -1860,7 +1868,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
...
@@ -1860,7 +1868,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
{
struct
mlx5_ib_cq
*
send_cq
,
*
recv_cq
;
struct
mlx5_ib_cq
*
send_cq
,
*
recv_cq
;
struct
mlx5_ib_qp_base
*
base
=
&
qp
->
trans_qp
.
base
;
struct
mlx5_ib_qp_base
*
base
=
&
qp
->
trans_qp
.
base
;
struct
mlx5_modify_qp_mbox_in
*
in
;
unsigned
long
flags
;
unsigned
long
flags
;
int
err
;
int
err
;
...
@@ -1873,16 +1880,12 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
...
@@ -1873,16 +1880,12 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
&
qp
->
raw_packet_qp
.
rq
.
base
:
&
qp
->
raw_packet_qp
.
rq
.
base
:
&
qp
->
trans_qp
.
base
;
&
qp
->
trans_qp
.
base
;
in
=
kzalloc
(
sizeof
(
*
in
),
GFP_KERNEL
);
if
(
!
in
)
return
;
if
(
qp
->
state
!=
IB_QPS_RESET
)
{
if
(
qp
->
state
!=
IB_QPS_RESET
)
{
if
(
qp
->
ibqp
.
qp_type
!=
IB_QPT_RAW_PACKET
)
{
if
(
qp
->
ibqp
.
qp_type
!=
IB_QPT_RAW_PACKET
)
{
mlx5_ib_qp_disable_pagefaults
(
qp
);
mlx5_ib_qp_disable_pagefaults
(
qp
);
err
=
mlx5_core_qp_modify
(
dev
->
mdev
,
err
=
mlx5_core_qp_modify
(
dev
->
mdev
,
MLX5_CMD_OP_2RST_QP
,
in
,
0
,
MLX5_CMD_OP_2RST_QP
,
0
,
&
base
->
mqp
);
NULL
,
&
base
->
mqp
);
}
else
{
}
else
{
err
=
modify_raw_packet_qp
(
dev
,
qp
,
err
=
modify_raw_packet_qp
(
dev
,
qp
,
MLX5_CMD_OP_2RST_QP
);
MLX5_CMD_OP_2RST_QP
);
...
@@ -1924,8 +1927,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
...
@@ -1924,8 +1927,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
base
->
mqp
.
qpn
);
base
->
mqp
.
qpn
);
}
}
kfree
(
in
);
if
(
qp
->
create_type
==
MLX5_QP_KERNEL
)
if
(
qp
->
create_type
==
MLX5_QP_KERNEL
)
destroy_qp_kernel
(
dev
,
qp
);
destroy_qp_kernel
(
dev
,
qp
);
else
if
(
qp
->
create_type
==
MLX5_QP_USER
)
else
if
(
qp
->
create_type
==
MLX5_QP_USER
)
...
@@ -2511,7 +2512,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
...
@@ -2511,7 +2512,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct
mlx5_ib_qp_base
*
base
=
&
qp
->
trans_qp
.
base
;
struct
mlx5_ib_qp_base
*
base
=
&
qp
->
trans_qp
.
base
;
struct
mlx5_ib_cq
*
send_cq
,
*
recv_cq
;
struct
mlx5_ib_cq
*
send_cq
,
*
recv_cq
;
struct
mlx5_qp_context
*
context
;
struct
mlx5_qp_context
*
context
;
struct
mlx5_modify_qp_mbox_in
*
in
;
struct
mlx5_ib_pd
*
pd
;
struct
mlx5_ib_pd
*
pd
;
enum
mlx5_qp_state
mlx5_cur
,
mlx5_new
;
enum
mlx5_qp_state
mlx5_cur
,
mlx5_new
;
enum
mlx5_qp_optpar
optpar
;
enum
mlx5_qp_optpar
optpar
;
...
@@ -2520,11 +2520,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
...
@@ -2520,11 +2520,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
int
err
;
int
err
;
u16
op
;
u16
op
;
in
=
kzalloc
(
sizeof
(
*
in
),
GFP_KERNEL
);
context
=
kzalloc
(
sizeof
(
*
context
),
GFP_KERNEL
);
if
(
!
in
)
if
(
!
context
)
return
-
ENOMEM
;
return
-
ENOMEM
;
context
=
&
in
->
ctx
;
err
=
to_mlx5_st
(
ibqp
->
qp_type
);
err
=
to_mlx5_st
(
ibqp
->
qp_type
);
if
(
err
<
0
)
{
if
(
err
<
0
)
{
mlx5_ib_dbg
(
dev
,
"unsupported qp type %d
\n
"
,
ibqp
->
qp_type
);
mlx5_ib_dbg
(
dev
,
"unsupported qp type %d
\n
"
,
ibqp
->
qp_type
);
...
@@ -2689,12 +2688,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
...
@@ -2689,12 +2688,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
op
=
optab
[
mlx5_cur
][
mlx5_new
];
op
=
optab
[
mlx5_cur
][
mlx5_new
];
optpar
=
ib_mask_to_mlx5_opt
(
attr_mask
);
optpar
=
ib_mask_to_mlx5_opt
(
attr_mask
);
optpar
&=
opt_mask
[
mlx5_cur
][
mlx5_new
][
mlx5_st
];
optpar
&=
opt_mask
[
mlx5_cur
][
mlx5_new
][
mlx5_st
];
in
->
optparam
=
cpu_to_be32
(
optpar
);
if
(
qp
->
ibqp
.
qp_type
==
IB_QPT_RAW_PACKET
)
if
(
qp
->
ibqp
.
qp_type
==
IB_QPT_RAW_PACKET
)
err
=
modify_raw_packet_qp
(
dev
,
qp
,
op
);
err
=
modify_raw_packet_qp
(
dev
,
qp
,
op
);
else
else
err
=
mlx5_core_qp_modify
(
dev
->
mdev
,
op
,
in
,
sqd_even
t
,
err
=
mlx5_core_qp_modify
(
dev
->
mdev
,
op
,
optpar
,
contex
t
,
&
base
->
mqp
);
&
base
->
mqp
);
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
...
@@ -2735,7 +2733,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
...
@@ -2735,7 +2733,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
}
out:
out:
kfree
(
in
);
kfree
(
context
);
return
err
;
return
err
;
}
}
...
@@ -2968,7 +2966,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
...
@@ -2968,7 +2966,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
memset
(
umr
,
0
,
sizeof
(
*
umr
));
memset
(
umr
,
0
,
sizeof
(
*
umr
));
if
(
mr
->
access_mode
==
MLX5_
ACCESS_MODE_KLM
)
if
(
mr
->
access_mode
==
MLX5_
MKC_ACCESS_MODE_KLMS
)
/* KLMs take twice the size of MTTs */
/* KLMs take twice the size of MTTs */
ndescs
*=
2
;
ndescs
*=
2
;
...
@@ -3111,9 +3109,9 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
...
@@ -3111,9 +3109,9 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
memset
(
seg
,
0
,
sizeof
(
*
seg
));
memset
(
seg
,
0
,
sizeof
(
*
seg
));
if
(
mr
->
access_mode
==
MLX5_ACCESS_MODE_MTT
)
if
(
mr
->
access_mode
==
MLX5_
MKC_
ACCESS_MODE_MTT
)
seg
->
log2_page_size
=
ilog2
(
mr
->
ibmr
.
page_size
);
seg
->
log2_page_size
=
ilog2
(
mr
->
ibmr
.
page_size
);
else
if
(
mr
->
access_mode
==
MLX5_
ACCESS_MODE_KLM
)
else
if
(
mr
->
access_mode
==
MLX5_
MKC_ACCESS_MODE_KLMS
)
/* KLMs take twice the size of MTTs */
/* KLMs take twice the size of MTTs */
ndescs
*=
2
;
ndescs
*=
2
;
...
@@ -3454,7 +3452,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
...
@@ -3454,7 +3452,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
memset
(
seg
,
0
,
sizeof
(
*
seg
));
memset
(
seg
,
0
,
sizeof
(
*
seg
));
seg
->
flags
=
get_umr_flags
(
wr
->
access_flags
)
|
seg
->
flags
=
get_umr_flags
(
wr
->
access_flags
)
|
MLX5_
ACCESS_MODE_KLM
;
MLX5_
MKC_ACCESS_MODE_KLMS
;
seg
->
qpn_mkey7_0
=
cpu_to_be32
((
sig_key
&
0xff
)
|
0xffffff00
);
seg
->
qpn_mkey7_0
=
cpu_to_be32
((
sig_key
&
0xff
)
|
0xffffff00
);
seg
->
flags_pd
=
cpu_to_be32
(
MLX5_MKEY_REMOTE_INVAL
|
sigerr
<<
26
|
seg
->
flags_pd
=
cpu_to_be32
(
MLX5_MKEY_REMOTE_INVAL
|
sigerr
<<
26
|
MLX5_MKEY_BSF_EN
|
pdn
);
MLX5_MKEY_BSF_EN
|
pdn
);
...
@@ -4320,21 +4318,24 @@ static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
...
@@ -4320,21 +4318,24 @@ static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
static
int
query_qp_attr
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_qp
*
qp
,
static
int
query_qp_attr
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_qp
*
qp
,
struct
ib_qp_attr
*
qp_attr
)
struct
ib_qp_attr
*
qp_attr
)
{
{
struct
mlx5_query_qp_mbox_out
*
outb
;
int
outlen
=
MLX5_ST_SZ_BYTES
(
query_qp_out
)
;
struct
mlx5_qp_context
*
context
;
struct
mlx5_qp_context
*
context
;
int
mlx5_state
;
int
mlx5_state
;
u32
*
outb
;
int
err
=
0
;
int
err
=
0
;
outb
=
kzalloc
(
sizeof
(
*
outb
)
,
GFP_KERNEL
);
outb
=
kzalloc
(
outlen
,
GFP_KERNEL
);
if
(
!
outb
)
if
(
!
outb
)
return
-
ENOMEM
;
return
-
ENOMEM
;
context
=
&
outb
->
ctx
;
err
=
mlx5_core_qp_query
(
dev
->
mdev
,
&
qp
->
trans_qp
.
base
.
mqp
,
outb
,
err
=
mlx5_core_qp_query
(
dev
->
mdev
,
&
qp
->
trans_qp
.
base
.
mqp
,
outb
,
sizeof
(
*
outb
)
);
outlen
);
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
/* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
context
=
(
struct
mlx5_qp_context
*
)
MLX5_ADDR_OF
(
query_qp_out
,
outb
,
qpc
);
mlx5_state
=
be32_to_cpu
(
context
->
flags
)
>>
28
;
mlx5_state
=
be32_to_cpu
(
context
->
flags
)
>>
28
;
qp
->
state
=
to_ib_qp_state
(
mlx5_state
);
qp
->
state
=
to_ib_qp_state
(
mlx5_state
);
...
...
drivers/net/ethernet/mellanox/mlx5/core/Makefile
浏览文件 @
d68478da
...
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
...
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y
:=
main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o
\
mlx5_core-y
:=
main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o
\
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o
\
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o
\
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
\
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
\
fs_counters.o rl.o
fs_counters.o rl.o
lag.o
mlx5_core-$(CONFIG_MLX5_CORE_EN)
+=
wq.o eswitch.o eswitch_offloads.o
\
mlx5_core-$(CONFIG_MLX5_CORE_EN)
+=
wq.o eswitch.o eswitch_offloads.o
\
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o
\
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o
\
...
...
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
浏览文件 @
d68478da
...
@@ -280,11 +280,13 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
...
@@ -280,11 +280,13 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case
MLX5_CMD_OP_DEALLOC_Q_COUNTER
:
case
MLX5_CMD_OP_DEALLOC_Q_COUNTER
:
case
MLX5_CMD_OP_DEALLOC_PD
:
case
MLX5_CMD_OP_DEALLOC_PD
:
case
MLX5_CMD_OP_DEALLOC_UAR
:
case
MLX5_CMD_OP_DEALLOC_UAR
:
case
MLX5_CMD_OP_DET
T
ACH_FROM_MCG
:
case
MLX5_CMD_OP_DETACH_FROM_MCG
:
case
MLX5_CMD_OP_DEALLOC_XRCD
:
case
MLX5_CMD_OP_DEALLOC_XRCD
:
case
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN
:
case
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN
:
case
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT
:
case
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT
:
case
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
:
case
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
:
case
MLX5_CMD_OP_DESTROY_LAG
:
case
MLX5_CMD_OP_DESTROY_VPORT_LAG
:
case
MLX5_CMD_OP_DESTROY_TIR
:
case
MLX5_CMD_OP_DESTROY_TIR
:
case
MLX5_CMD_OP_DESTROY_SQ
:
case
MLX5_CMD_OP_DESTROY_SQ
:
case
MLX5_CMD_OP_DESTROY_RQ
:
case
MLX5_CMD_OP_DESTROY_RQ
:
...
@@ -301,6 +303,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
...
@@ -301,6 +303,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case
MLX5_CMD_OP_MODIFY_FLOW_TABLE
:
case
MLX5_CMD_OP_MODIFY_FLOW_TABLE
:
case
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
:
case
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
:
case
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT
:
case
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT
:
case
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER
:
return
MLX5_CMD_STAT_OK
;
return
MLX5_CMD_STAT_OK
;
case
MLX5_CMD_OP_QUERY_HCA_CAP
:
case
MLX5_CMD_OP_QUERY_HCA_CAP
:
...
@@ -375,6 +378,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
...
@@ -375,6 +378,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
:
case
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
:
case
MLX5_CMD_OP_SET_L2_TABLE_ENTRY
:
case
MLX5_CMD_OP_SET_L2_TABLE_ENTRY
:
case
MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY
:
case
MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY
:
case
MLX5_CMD_OP_CREATE_LAG
:
case
MLX5_CMD_OP_MODIFY_LAG
:
case
MLX5_CMD_OP_QUERY_LAG
:
case
MLX5_CMD_OP_CREATE_VPORT_LAG
:
case
MLX5_CMD_OP_CREATE_TIR
:
case
MLX5_CMD_OP_CREATE_TIR
:
case
MLX5_CMD_OP_MODIFY_TIR
:
case
MLX5_CMD_OP_MODIFY_TIR
:
case
MLX5_CMD_OP_QUERY_TIR
:
case
MLX5_CMD_OP_QUERY_TIR
:
...
@@ -402,6 +409,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
...
@@ -402,6 +409,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case
MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY
:
case
MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY
:
case
MLX5_CMD_OP_ALLOC_FLOW_COUNTER
:
case
MLX5_CMD_OP_ALLOC_FLOW_COUNTER
:
case
MLX5_CMD_OP_QUERY_FLOW_COUNTER
:
case
MLX5_CMD_OP_QUERY_FLOW_COUNTER
:
case
MLX5_CMD_OP_ALLOC_ENCAP_HEADER
:
*
status
=
MLX5_DRIVER_STATUS_ABORTED
;
*
status
=
MLX5_DRIVER_STATUS_ABORTED
;
*
synd
=
MLX5_DRIVER_SYND
;
*
synd
=
MLX5_DRIVER_SYND
;
return
-
EIO
;
return
-
EIO
;
...
@@ -490,7 +498,7 @@ const char *mlx5_command_str(int command)
...
@@ -490,7 +498,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE
(
CONFIG_INT_MODERATION
);
MLX5_COMMAND_STR_CASE
(
CONFIG_INT_MODERATION
);
MLX5_COMMAND_STR_CASE
(
ACCESS_REG
);
MLX5_COMMAND_STR_CASE
(
ACCESS_REG
);
MLX5_COMMAND_STR_CASE
(
ATTACH_TO_MCG
);
MLX5_COMMAND_STR_CASE
(
ATTACH_TO_MCG
);
MLX5_COMMAND_STR_CASE
(
DET
T
ACH_FROM_MCG
);
MLX5_COMMAND_STR_CASE
(
DETACH_FROM_MCG
);
MLX5_COMMAND_STR_CASE
(
GET_DROPPED_PACKET_LOG
);
MLX5_COMMAND_STR_CASE
(
GET_DROPPED_PACKET_LOG
);
MLX5_COMMAND_STR_CASE
(
MAD_IFC
);
MLX5_COMMAND_STR_CASE
(
MAD_IFC
);
MLX5_COMMAND_STR_CASE
(
QUERY_MAD_DEMUX
);
MLX5_COMMAND_STR_CASE
(
QUERY_MAD_DEMUX
);
...
@@ -512,6 +520,12 @@ const char *mlx5_command_str(int command)
...
@@ -512,6 +520,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE
(
DELETE_L2_TABLE_ENTRY
);
MLX5_COMMAND_STR_CASE
(
DELETE_L2_TABLE_ENTRY
);
MLX5_COMMAND_STR_CASE
(
SET_WOL_ROL
);
MLX5_COMMAND_STR_CASE
(
SET_WOL_ROL
);
MLX5_COMMAND_STR_CASE
(
QUERY_WOL_ROL
);
MLX5_COMMAND_STR_CASE
(
QUERY_WOL_ROL
);
MLX5_COMMAND_STR_CASE
(
CREATE_LAG
);
MLX5_COMMAND_STR_CASE
(
MODIFY_LAG
);
MLX5_COMMAND_STR_CASE
(
QUERY_LAG
);
MLX5_COMMAND_STR_CASE
(
DESTROY_LAG
);
MLX5_COMMAND_STR_CASE
(
CREATE_VPORT_LAG
);
MLX5_COMMAND_STR_CASE
(
DESTROY_VPORT_LAG
);
MLX5_COMMAND_STR_CASE
(
CREATE_TIR
);
MLX5_COMMAND_STR_CASE
(
CREATE_TIR
);
MLX5_COMMAND_STR_CASE
(
MODIFY_TIR
);
MLX5_COMMAND_STR_CASE
(
MODIFY_TIR
);
MLX5_COMMAND_STR_CASE
(
DESTROY_TIR
);
MLX5_COMMAND_STR_CASE
(
DESTROY_TIR
);
...
@@ -550,15 +564,130 @@ const char *mlx5_command_str(int command)
...
@@ -550,15 +564,130 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE
(
DEALLOC_FLOW_COUNTER
);
MLX5_COMMAND_STR_CASE
(
DEALLOC_FLOW_COUNTER
);
MLX5_COMMAND_STR_CASE
(
QUERY_FLOW_COUNTER
);
MLX5_COMMAND_STR_CASE
(
QUERY_FLOW_COUNTER
);
MLX5_COMMAND_STR_CASE
(
MODIFY_FLOW_TABLE
);
MLX5_COMMAND_STR_CASE
(
MODIFY_FLOW_TABLE
);
MLX5_COMMAND_STR_CASE
(
ALLOC_ENCAP_HEADER
);
MLX5_COMMAND_STR_CASE
(
DEALLOC_ENCAP_HEADER
);
default:
return
"unknown command opcode"
;
default:
return
"unknown command opcode"
;
}
}
}
}
static
const
char
*
cmd_status_str
(
u8
status
)
{
switch
(
status
)
{
case
MLX5_CMD_STAT_OK
:
return
"OK"
;
case
MLX5_CMD_STAT_INT_ERR
:
return
"internal error"
;
case
MLX5_CMD_STAT_BAD_OP_ERR
:
return
"bad operation"
;
case
MLX5_CMD_STAT_BAD_PARAM_ERR
:
return
"bad parameter"
;
case
MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
return
"bad system state"
;
case
MLX5_CMD_STAT_BAD_RES_ERR
:
return
"bad resource"
;
case
MLX5_CMD_STAT_RES_BUSY
:
return
"resource busy"
;
case
MLX5_CMD_STAT_LIM_ERR
:
return
"limits exceeded"
;
case
MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
return
"bad resource state"
;
case
MLX5_CMD_STAT_IX_ERR
:
return
"bad index"
;
case
MLX5_CMD_STAT_NO_RES_ERR
:
return
"no resources"
;
case
MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
return
"bad input length"
;
case
MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
return
"bad output length"
;
case
MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
return
"bad QP state"
;
case
MLX5_CMD_STAT_BAD_PKT_ERR
:
return
"bad packet (discarded)"
;
case
MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
return
"bad size too many outstanding CQEs"
;
default:
return
"unknown status"
;
}
}
static
int
cmd_status_to_err
(
u8
status
)
{
switch
(
status
)
{
case
MLX5_CMD_STAT_OK
:
return
0
;
case
MLX5_CMD_STAT_INT_ERR
:
return
-
EIO
;
case
MLX5_CMD_STAT_BAD_OP_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_BAD_PARAM_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
return
-
EIO
;
case
MLX5_CMD_STAT_BAD_RES_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_RES_BUSY
:
return
-
EBUSY
;
case
MLX5_CMD_STAT_LIM_ERR
:
return
-
ENOMEM
;
case
MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_IX_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_NO_RES_ERR
:
return
-
EAGAIN
;
case
MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
return
-
EIO
;
case
MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
return
-
EIO
;
case
MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_BAD_PKT_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
return
-
EINVAL
;
default:
return
-
EIO
;
}
}
struct
mlx5_ifc_mbox_out_bits
{
u8
status
[
0x8
];
u8
reserved_at_8
[
0x18
];
u8
syndrome
[
0x20
];
u8
reserved_at_40
[
0x40
];
};
struct
mlx5_ifc_mbox_in_bits
{
u8
opcode
[
0x10
];
u8
reserved_at_10
[
0x10
];
u8
reserved_at_20
[
0x10
];
u8
op_mod
[
0x10
];
u8
reserved_at_40
[
0x40
];
};
void
mlx5_cmd_mbox_status
(
void
*
out
,
u8
*
status
,
u32
*
syndrome
)
{
*
status
=
MLX5_GET
(
mbox_out
,
out
,
status
);
*
syndrome
=
MLX5_GET
(
mbox_out
,
out
,
syndrome
);
}
static
int
mlx5_cmd_check
(
struct
mlx5_core_dev
*
dev
,
void
*
in
,
void
*
out
)
{
u32
syndrome
;
u8
status
;
u16
opcode
;
u16
op_mod
;
mlx5_cmd_mbox_status
(
out
,
&
status
,
&
syndrome
);
if
(
!
status
)
return
0
;
opcode
=
MLX5_GET
(
mbox_in
,
in
,
opcode
);
op_mod
=
MLX5_GET
(
mbox_in
,
in
,
op_mod
);
mlx5_core_err
(
dev
,
"%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)
\n
"
,
mlx5_command_str
(
opcode
),
opcode
,
op_mod
,
cmd_status_str
(
status
),
status
,
syndrome
);
return
cmd_status_to_err
(
status
);
}
static
void
dump_command
(
struct
mlx5_core_dev
*
dev
,
static
void
dump_command
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_cmd_work_ent
*
ent
,
int
input
)
struct
mlx5_cmd_work_ent
*
ent
,
int
input
)
{
{
u16
op
=
be16_to_cpu
(((
struct
mlx5_inbox_hdr
*
)(
ent
->
lay
->
in
))
->
opcode
);
struct
mlx5_cmd_msg
*
msg
=
input
?
ent
->
in
:
ent
->
out
;
struct
mlx5_cmd_msg
*
msg
=
input
?
ent
->
in
:
ent
->
out
;
u16
op
=
MLX5_GET
(
mbox_in
,
ent
->
lay
->
in
,
opcode
);
struct
mlx5_cmd_mailbox
*
next
=
msg
->
next
;
struct
mlx5_cmd_mailbox
*
next
=
msg
->
next
;
int
data_only
;
int
data_only
;
u32
offset
=
0
;
u32
offset
=
0
;
...
@@ -608,9 +737,7 @@ static void dump_command(struct mlx5_core_dev *dev,
...
@@ -608,9 +737,7 @@ static void dump_command(struct mlx5_core_dev *dev,
static
u16
msg_to_opcode
(
struct
mlx5_cmd_msg
*
in
)
static
u16
msg_to_opcode
(
struct
mlx5_cmd_msg
*
in
)
{
{
struct
mlx5_inbox_hdr
*
hdr
=
(
struct
mlx5_inbox_hdr
*
)(
in
->
first
.
data
);
return
MLX5_GET
(
mbox_in
,
in
->
first
.
data
,
opcode
);
return
be16_to_cpu
(
hdr
->
opcode
);
}
}
static
void
cb_timeout_handler
(
struct
work_struct
*
work
)
static
void
cb_timeout_handler
(
struct
work_struct
*
work
)
...
@@ -749,16 +876,6 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
...
@@ -749,16 +876,6 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
return
err
;
return
err
;
}
}
static
__be32
*
get_synd_ptr
(
struct
mlx5_outbox_hdr
*
out
)
{
return
&
out
->
syndrome
;
}
static
u8
*
get_status_ptr
(
struct
mlx5_outbox_hdr
*
out
)
{
return
&
out
->
status
;
}
/* Notes:
/* Notes:
* 1. Callback functions may not sleep
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
* 2. page queue commands do not support asynchrous completion
...
@@ -804,7 +921,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
...
@@ -804,7 +921,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto
out_free
;
goto
out_free
;
ds
=
ent
->
ts2
-
ent
->
ts1
;
ds
=
ent
->
ts2
-
ent
->
ts1
;
op
=
be16_to_cpu
(((
struct
mlx5_inbox_hdr
*
)
in
->
first
.
data
)
->
opcode
);
op
=
MLX5_GET
(
mbox_in
,
in
->
first
.
data
,
opcode
);
if
(
op
<
ARRAY_SIZE
(
cmd
->
stats
))
{
if
(
op
<
ARRAY_SIZE
(
cmd
->
stats
))
{
stats
=
&
cmd
->
stats
[
op
];
stats
=
&
cmd
->
stats
[
op
];
spin_lock_irq
(
&
stats
->
lock
);
spin_lock_irq
(
&
stats
->
lock
);
...
@@ -1301,11 +1418,16 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
...
@@ -1301,11 +1418,16 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
callback
=
ent
->
callback
;
callback
=
ent
->
callback
;
context
=
ent
->
context
;
context
=
ent
->
context
;
err
=
ent
->
ret
;
err
=
ent
->
ret
;
if
(
!
err
)
if
(
!
err
)
{
err
=
mlx5_copy_from_msg
(
ent
->
uout
,
err
=
mlx5_copy_from_msg
(
ent
->
uout
,
ent
->
out
,
ent
->
out
,
ent
->
uout_size
);
ent
->
uout_size
);
err
=
err
?
err
:
mlx5_cmd_check
(
dev
,
ent
->
in
->
first
.
data
,
ent
->
uout
);
}
mlx5_free_cmd_msg
(
dev
,
ent
->
out
);
mlx5_free_cmd_msg
(
dev
,
ent
->
out
);
free_msg
(
dev
,
ent
->
in
);
free_msg
(
dev
,
ent
->
in
);
...
@@ -1357,14 +1479,9 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
...
@@ -1357,14 +1479,9 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
return
msg
;
return
msg
;
}
}
static
u16
opcode_from_in
(
struct
mlx5_inbox_hdr
*
in
)
static
int
is_manage_pages
(
void
*
in
)
{
return
be16_to_cpu
(
in
->
opcode
);
}
static
int
is_manage_pages
(
struct
mlx5_inbox_hdr
*
in
)
{
{
return
be16_to_cpu
(
in
->
opcode
)
==
MLX5_CMD_OP_MANAGE_PAGES
;
return
MLX5_GET
(
mbox_in
,
in
,
opcode
)
==
MLX5_CMD_OP_MANAGE_PAGES
;
}
}
static
int
cmd_exec
(
struct
mlx5_core_dev
*
dev
,
void
*
in
,
int
in_size
,
void
*
out
,
static
int
cmd_exec
(
struct
mlx5_core_dev
*
dev
,
void
*
in
,
int
in_size
,
void
*
out
,
...
@@ -1380,9 +1497,11 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
...
@@ -1380,9 +1497,11 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if
(
pci_channel_offline
(
dev
->
pdev
)
||
if
(
pci_channel_offline
(
dev
->
pdev
)
||
dev
->
state
==
MLX5_DEVICE_STATE_INTERNAL_ERROR
)
{
dev
->
state
==
MLX5_DEVICE_STATE_INTERNAL_ERROR
)
{
err
=
mlx5_internal_err_ret_value
(
dev
,
opcode_from_in
(
in
),
&
drv_synd
,
&
status
);
u16
opcode
=
MLX5_GET
(
mbox_in
,
in
,
opcode
);
*
get_synd_ptr
(
out
)
=
cpu_to_be32
(
drv_synd
);
*
get_status_ptr
(
out
)
=
status
;
err
=
mlx5_internal_err_ret_value
(
dev
,
opcode
,
&
drv_synd
,
&
status
);
MLX5_SET
(
mbox_out
,
out
,
status
,
status
);
MLX5_SET
(
mbox_out
,
out
,
syndrome
,
drv_synd
);
return
err
;
return
err
;
}
}
...
@@ -1434,7 +1553,10 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
...
@@ -1434,7 +1553,10 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int
mlx5_cmd_exec
(
struct
mlx5_core_dev
*
dev
,
void
*
in
,
int
in_size
,
void
*
out
,
int
mlx5_cmd_exec
(
struct
mlx5_core_dev
*
dev
,
void
*
in
,
int
in_size
,
void
*
out
,
int
out_size
)
int
out_size
)
{
{
return
cmd_exec
(
dev
,
in
,
in_size
,
out
,
out_size
,
NULL
,
NULL
);
int
err
;
err
=
cmd_exec
(
dev
,
in
,
in_size
,
out
,
out_size
,
NULL
,
NULL
);
return
err
?
:
mlx5_cmd_check
(
dev
,
in
,
out
);
}
}
EXPORT_SYMBOL
(
mlx5_cmd_exec
);
EXPORT_SYMBOL
(
mlx5_cmd_exec
);
...
@@ -1671,96 +1793,3 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
...
@@ -1671,96 +1793,3 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
pci_pool_destroy
(
cmd
->
pool
);
pci_pool_destroy
(
cmd
->
pool
);
}
}
EXPORT_SYMBOL
(
mlx5_cmd_cleanup
);
EXPORT_SYMBOL
(
mlx5_cmd_cleanup
);
static
const
char
*
cmd_status_str
(
u8
status
)
{
switch
(
status
)
{
case
MLX5_CMD_STAT_OK
:
return
"OK"
;
case
MLX5_CMD_STAT_INT_ERR
:
return
"internal error"
;
case
MLX5_CMD_STAT_BAD_OP_ERR
:
return
"bad operation"
;
case
MLX5_CMD_STAT_BAD_PARAM_ERR
:
return
"bad parameter"
;
case
MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
return
"bad system state"
;
case
MLX5_CMD_STAT_BAD_RES_ERR
:
return
"bad resource"
;
case
MLX5_CMD_STAT_RES_BUSY
:
return
"resource busy"
;
case
MLX5_CMD_STAT_LIM_ERR
:
return
"limits exceeded"
;
case
MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
return
"bad resource state"
;
case
MLX5_CMD_STAT_IX_ERR
:
return
"bad index"
;
case
MLX5_CMD_STAT_NO_RES_ERR
:
return
"no resources"
;
case
MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
return
"bad input length"
;
case
MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
return
"bad output length"
;
case
MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
return
"bad QP state"
;
case
MLX5_CMD_STAT_BAD_PKT_ERR
:
return
"bad packet (discarded)"
;
case
MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
return
"bad size too many outstanding CQEs"
;
default:
return
"unknown status"
;
}
}
static
int
cmd_status_to_err
(
u8
status
)
{
switch
(
status
)
{
case
MLX5_CMD_STAT_OK
:
return
0
;
case
MLX5_CMD_STAT_INT_ERR
:
return
-
EIO
;
case
MLX5_CMD_STAT_BAD_OP_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_BAD_PARAM_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
return
-
EIO
;
case
MLX5_CMD_STAT_BAD_RES_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_RES_BUSY
:
return
-
EBUSY
;
case
MLX5_CMD_STAT_LIM_ERR
:
return
-
ENOMEM
;
case
MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_IX_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_NO_RES_ERR
:
return
-
EAGAIN
;
case
MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
return
-
EIO
;
case
MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
return
-
EIO
;
case
MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_BAD_PKT_ERR
:
return
-
EINVAL
;
case
MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
return
-
EINVAL
;
default:
return
-
EIO
;
}
}
/* this will be available till all the commands use set/get macros */
int
mlx5_cmd_status_to_err
(
struct
mlx5_outbox_hdr
*
hdr
)
{
if
(
!
hdr
->
status
)
return
0
;
pr_warn
(
"command failed, status %s(0x%x), syndrome 0x%x
\n
"
,
cmd_status_str
(
hdr
->
status
),
hdr
->
status
,
be32_to_cpu
(
hdr
->
syndrome
));
return
cmd_status_to_err
(
hdr
->
status
);
}
int
mlx5_cmd_status_to_err_v2
(
void
*
ptr
)
{
u32
syndrome
;
u8
status
;
status
=
be32_to_cpu
(
*
(
__be32
*
)
ptr
)
>>
24
;
if
(
!
status
)
return
0
;
syndrome
=
be32_to_cpu
(
*
(
__be32
*
)(
ptr
+
4
));
pr_warn
(
"command failed, status %s(0x%x), syndrome 0x%x
\n
"
,
cmd_status_str
(
status
),
status
,
syndrome
);
return
cmd_status_to_err
(
status
);
}
drivers/net/ethernet/mellanox/mlx5/core/cq.c
浏览文件 @
d68478da
...
@@ -134,33 +134,29 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
...
@@ -134,33 +134,29 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
complete
(
&
cq
->
free
);
complete
(
&
cq
->
free
);
}
}
int
mlx5_core_create_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
int
mlx5_core_create_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
struct
mlx5_create_cq_mbox_in
*
in
,
int
inlen
)
u32
*
in
,
int
inlen
)
{
{
int
err
;
struct
mlx5_cq_table
*
table
=
&
dev
->
priv
.
cq_table
;
struct
mlx5_cq_table
*
table
=
&
dev
->
priv
.
cq_table
;
struct
mlx5_create_cq_mbox_out
out
;
u32
out
[
MLX5_ST_SZ_DW
(
create_cq_out
)]
;
struct
mlx5_destroy_cq_mbox_in
din
;
u32
din
[
MLX5_ST_SZ_DW
(
destroy_cq_in
)]
;
struct
mlx5_destroy_cq_mbox_out
dout
;
u32
dout
[
MLX5_ST_SZ_DW
(
destroy_cq_out
)]
;
int
eqn
=
MLX5_GET
(
cqc
,
MLX5_ADDR_OF
(
create_cq_in
,
in
,
cq_context
),
int
eqn
=
MLX5_GET
(
cqc
,
MLX5_ADDR_OF
(
create_cq_in
,
in
,
cq_context
),
c_eqn
);
c_eqn
);
struct
mlx5_eq
*
eq
;
struct
mlx5_eq
*
eq
;
int
err
;
eq
=
mlx5_eqn2eq
(
dev
,
eqn
);
eq
=
mlx5_eqn2eq
(
dev
,
eqn
);
if
(
IS_ERR
(
eq
))
if
(
IS_ERR
(
eq
))
return
PTR_ERR
(
eq
);
return
PTR_ERR
(
eq
);
in
->
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_CREATE_CQ
);
memset
(
out
,
0
,
sizeof
(
out
)
);
memset
(
&
out
,
0
,
sizeof
(
out
)
);
MLX5_SET
(
create_cq_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_CQ
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
&
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
if
(
err
)
if
(
err
)
return
err
;
return
err
;
if
(
out
.
hdr
.
status
)
cq
->
cqn
=
MLX5_GET
(
create_cq_out
,
out
,
cqn
);
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
cq
->
cqn
=
be32_to_cpu
(
out
.
cqn
)
&
0xffffff
;
cq
->
cons_index
=
0
;
cq
->
cons_index
=
0
;
cq
->
arm_sn
=
0
;
cq
->
arm_sn
=
0
;
atomic_set
(
&
cq
->
refcount
,
1
);
atomic_set
(
&
cq
->
refcount
,
1
);
...
@@ -186,10 +182,11 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
...
@@ -186,10 +182,11 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return
0
;
return
0
;
err_cmd:
err_cmd:
memset
(
&
din
,
0
,
sizeof
(
din
));
memset
(
din
,
0
,
sizeof
(
din
));
memset
(
&
dout
,
0
,
sizeof
(
dout
));
memset
(
dout
,
0
,
sizeof
(
dout
));
din
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_DESTROY_CQ
);
MLX5_SET
(
destroy_cq_in
,
din
,
opcode
,
MLX5_CMD_OP_DESTROY_CQ
);
mlx5_cmd_exec
(
dev
,
&
din
,
sizeof
(
din
),
&
dout
,
sizeof
(
dout
));
MLX5_SET
(
destroy_cq_in
,
din
,
cqn
,
cq
->
cqn
);
mlx5_cmd_exec
(
dev
,
din
,
sizeof
(
din
),
dout
,
sizeof
(
dout
));
return
err
;
return
err
;
}
}
EXPORT_SYMBOL
(
mlx5_core_create_cq
);
EXPORT_SYMBOL
(
mlx5_core_create_cq
);
...
@@ -197,8 +194,8 @@ EXPORT_SYMBOL(mlx5_core_create_cq);
...
@@ -197,8 +194,8 @@ EXPORT_SYMBOL(mlx5_core_create_cq);
int
mlx5_core_destroy_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
)
int
mlx5_core_destroy_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
)
{
{
struct
mlx5_cq_table
*
table
=
&
dev
->
priv
.
cq_table
;
struct
mlx5_cq_table
*
table
=
&
dev
->
priv
.
cq_table
;
struct
mlx5_destroy_cq_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
destroy_cq_out
)]
=
{
0
}
;
struct
mlx5_destroy_cq_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
destroy_cq_in
)]
=
{
0
}
;
struct
mlx5_core_cq
*
tmp
;
struct
mlx5_core_cq
*
tmp
;
int
err
;
int
err
;
...
@@ -214,17 +211,12 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
...
@@ -214,17 +211,12 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
return
-
EINVAL
;
return
-
EINVAL
;
}
}
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
destroy_cq_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_CQ
);
memset
(
&
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
destroy_cq_in
,
in
,
cqn
,
cq
->
cqn
);
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_DESTROY_CQ
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
in
.
cqn
=
cpu_to_be32
(
cq
->
cqn
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
if
(
err
)
return
err
;
return
err
;
if
(
out
.
hdr
.
status
)
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
synchronize_irq
(
cq
->
irqn
);
synchronize_irq
(
cq
->
irqn
);
mlx5_debug_cq_remove
(
dev
,
cq
);
mlx5_debug_cq_remove
(
dev
,
cq
);
...
@@ -237,44 +229,23 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
...
@@ -237,44 +229,23 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
EXPORT_SYMBOL
(
mlx5_core_destroy_cq
);
EXPORT_SYMBOL
(
mlx5_core_destroy_cq
);
int
mlx5_core_query_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
int
mlx5_core_query_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
struct
mlx5_query_cq_mbox_out
*
out
)
u32
*
out
,
int
outlen
)
{
{
struct
mlx5_query_cq_mbox_in
in
;
u32
in
[
MLX5_ST_SZ_DW
(
query_cq_in
)]
=
{
0
};
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
*
out
));
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_QUERY_CQ
);
in
.
cqn
=
cpu_to_be32
(
cq
->
cqn
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
out
,
sizeof
(
*
out
));
if
(
err
)
return
err
;
if
(
out
->
hdr
.
status
)
return
mlx5_cmd_status_to_err
(
&
out
->
hdr
);
return
err
;
MLX5_SET
(
query_cq_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_CQ
);
MLX5_SET
(
query_cq_in
,
in
,
cqn
,
cq
->
cqn
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
}
}
EXPORT_SYMBOL
(
mlx5_core_query_cq
);
EXPORT_SYMBOL
(
mlx5_core_query_cq
);
int
mlx5_core_modify_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
int
mlx5_core_modify_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
struct
mlx5_modify_cq_mbox_in
*
in
,
int
in_sz
)
u32
*
in
,
int
inlen
)
{
{
struct
mlx5_modify_cq_mbox_out
out
;
u32
out
[
MLX5_ST_SZ_DW
(
modify_cq_out
)]
=
{
0
};
int
err
;
memset
(
&
out
,
0
,
sizeof
(
out
));
in
->
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_MODIFY_CQ
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
in_sz
,
&
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
if
(
out
.
hdr
.
status
)
MLX5_SET
(
modify_cq_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_CQ
);
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
return
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
return
0
;
}
}
EXPORT_SYMBOL
(
mlx5_core_modify_cq
);
EXPORT_SYMBOL
(
mlx5_core_modify_cq
);
...
@@ -283,18 +254,20 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
...
@@ -283,18 +254,20 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
u16
cq_period
,
u16
cq_period
,
u16
cq_max_count
)
u16
cq_max_count
)
{
{
struct
mlx5_modify_cq_mbox_in
in
;
u32
in
[
MLX5_ST_SZ_DW
(
modify_cq_in
)]
=
{
0
};
void
*
cqc
;
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
modify_cq_in
,
in
,
cqn
,
cq
->
cqn
);
in
.
cqn
=
cpu_to_be32
(
cq
->
cqn
);
cqc
=
MLX5_ADDR_OF
(
modify_cq_in
,
in
,
cq_context
);
in
.
ctx
.
cq_period
=
cpu_to_be16
(
cq_period
);
MLX5_SET
(
cqc
,
cqc
,
cq_period
,
cq_period
);
in
.
ctx
.
cq_max_count
=
cpu_to_be16
(
cq_max_count
);
MLX5_SET
(
cqc
,
cqc
,
cq_max_count
,
cq_max_count
);
in
.
field_select
=
cpu_to_be32
(
MLX5_CQ_MODIFY_PERIOD
|
MLX5_SET
(
modify_cq_in
,
in
,
MLX5_CQ_MODIFY_COUNT
);
modify_field_select_resize_field_select
.
modify_field_select
.
modify_field_select
,
MLX5_CQ_MODIFY_PERIOD
|
MLX5_CQ_MODIFY_COUNT
);
return
mlx5_core_modify_cq
(
dev
,
cq
,
&
in
,
sizeof
(
in
));
return
mlx5_core_modify_cq
(
dev
,
cq
,
in
,
sizeof
(
in
));
}
}
EXPORT_SYMBOL
(
mlx5_core_modify_cq_moderation
);
int
mlx5_init_cq_table
(
struct
mlx5_core_dev
*
dev
)
int
mlx5_init_cq_table
(
struct
mlx5_core_dev
*
dev
)
{
{
...
...
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
浏览文件 @
d68478da
...
@@ -277,24 +277,28 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
...
@@ -277,24 +277,28 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
static
u64
qp_read_field
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_qp
*
qp
,
static
u64
qp_read_field
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_qp
*
qp
,
int
index
,
int
*
is_str
)
int
index
,
int
*
is_str
)
{
{
struct
mlx5_query_qp_mbox_out
*
out
;
int
outlen
=
MLX5_ST_SZ_BYTES
(
query_qp_out
)
;
struct
mlx5_qp_context
*
ctx
;
struct
mlx5_qp_context
*
ctx
;
u64
param
=
0
;
u64
param
=
0
;
u32
*
out
;
int
err
;
int
err
;
int
no_sq
;
int
no_sq
;
out
=
kzalloc
(
sizeof
(
*
out
)
,
GFP_KERNEL
);
out
=
kzalloc
(
outlen
,
GFP_KERNEL
);
if
(
!
out
)
if
(
!
out
)
return
param
;
return
param
;
err
=
mlx5_core_qp_query
(
dev
,
qp
,
out
,
sizeof
(
*
out
)
);
err
=
mlx5_core_qp_query
(
dev
,
qp
,
out
,
outlen
);
if
(
err
)
{
if
(
err
)
{
mlx5_core_warn
(
dev
,
"failed to query qp
\n
"
);
mlx5_core_warn
(
dev
,
"failed to query qp
err=%d
\n
"
,
err
);
goto
out
;
goto
out
;
}
}
*
is_str
=
0
;
*
is_str
=
0
;
ctx
=
&
out
->
ctx
;
/* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
ctx
=
(
struct
mlx5_qp_context
*
)
MLX5_ADDR_OF
(
query_qp_out
,
out
,
qpc
);
switch
(
index
)
{
switch
(
index
)
{
case
QP_PID
:
case
QP_PID
:
param
=
qp
->
pid
;
param
=
qp
->
pid
;
...
@@ -358,32 +362,32 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
...
@@ -358,32 +362,32 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
static
u64
eq_read_field
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
,
static
u64
eq_read_field
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
,
int
index
)
int
index
)
{
{
struct
mlx5_query_eq_mbox_out
*
out
;
int
outlen
=
MLX5_ST_SZ_BYTES
(
query_eq_out
);
struct
mlx5_eq_context
*
ctx
;
u64
param
=
0
;
u64
param
=
0
;
void
*
ctx
;
u32
*
out
;
int
err
;
int
err
;
out
=
kzalloc
(
sizeof
(
*
out
)
,
GFP_KERNEL
);
out
=
kzalloc
(
outlen
,
GFP_KERNEL
);
if
(
!
out
)
if
(
!
out
)
return
param
;
return
param
;
ctx
=
&
out
->
ctx
;
err
=
mlx5_core_eq_query
(
dev
,
eq
,
out
,
outlen
);
err
=
mlx5_core_eq_query
(
dev
,
eq
,
out
,
sizeof
(
*
out
));
if
(
err
)
{
if
(
err
)
{
mlx5_core_warn
(
dev
,
"failed to query eq
\n
"
);
mlx5_core_warn
(
dev
,
"failed to query eq
\n
"
);
goto
out
;
goto
out
;
}
}
ctx
=
MLX5_ADDR_OF
(
query_eq_out
,
out
,
eq_context_entry
);
switch
(
index
)
{
switch
(
index
)
{
case
EQ_NUM_EQES
:
case
EQ_NUM_EQES
:
param
=
1
<<
((
be32_to_cpu
(
ctx
->
log_sz_usr_page
)
>>
24
)
&
0x1f
);
param
=
1
<<
MLX5_GET
(
eqc
,
ctx
,
log_eq_size
);
break
;
break
;
case
EQ_INTR
:
case
EQ_INTR
:
param
=
ctx
->
intr
;
param
=
MLX5_GET
(
eqc
,
ctx
,
intr
)
;
break
;
break
;
case
EQ_LOG_PG_SZ
:
case
EQ_LOG_PG_SZ
:
param
=
(
ctx
->
log_page_size
&
0x1f
)
+
12
;
param
=
MLX5_GET
(
eqc
,
ctx
,
log_page_size
)
+
12
;
break
;
break
;
}
}
...
@@ -395,37 +399,37 @@ static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
...
@@ -395,37 +399,37 @@ static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
static
u64
cq_read_field
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
static
u64
cq_read_field
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
int
index
)
int
index
)
{
{
struct
mlx5_query_cq_mbox_out
*
out
;
int
outlen
=
MLX5_ST_SZ_BYTES
(
query_cq_out
);
struct
mlx5_cq_context
*
ctx
;
u64
param
=
0
;
u64
param
=
0
;
void
*
ctx
;
u32
*
out
;
int
err
;
int
err
;
out
=
kzalloc
(
sizeof
(
*
out
),
GFP_KERNEL
);
out
=
mlx5_vzalloc
(
outlen
);
if
(
!
out
)
if
(
!
out
)
return
param
;
return
param
;
ctx
=
&
out
->
ctx
;
err
=
mlx5_core_query_cq
(
dev
,
cq
,
out
,
outlen
);
err
=
mlx5_core_query_cq
(
dev
,
cq
,
out
);
if
(
err
)
{
if
(
err
)
{
mlx5_core_warn
(
dev
,
"failed to query cq
\n
"
);
mlx5_core_warn
(
dev
,
"failed to query cq
\n
"
);
goto
out
;
goto
out
;
}
}
ctx
=
MLX5_ADDR_OF
(
query_cq_out
,
out
,
cq_context
);
switch
(
index
)
{
switch
(
index
)
{
case
CQ_PID
:
case
CQ_PID
:
param
=
cq
->
pid
;
param
=
cq
->
pid
;
break
;
break
;
case
CQ_NUM_CQES
:
case
CQ_NUM_CQES
:
param
=
1
<<
((
be32_to_cpu
(
ctx
->
log_sz_usr_page
)
>>
24
)
&
0x1f
);
param
=
1
<<
MLX5_GET
(
cqc
,
ctx
,
log_cq_size
);
break
;
break
;
case
CQ_LOG_PG_SZ
:
case
CQ_LOG_PG_SZ
:
param
=
(
ctx
->
log_pg_sz
&
0x1f
)
+
12
;
param
=
MLX5_GET
(
cqc
,
ctx
,
log_page_size
)
;
break
;
break
;
}
}
out:
out:
kfree
(
out
);
k
v
free
(
out
);
return
param
;
return
param
;
}
}
...
...
drivers/net/ethernet/mellanox/mlx5/core/en.h
浏览文件 @
d68478da
...
@@ -651,40 +651,6 @@ struct mlx5e_priv {
...
@@ -651,40 +651,6 @@ struct mlx5e_priv {
void
*
ppriv
;
void
*
ppriv
;
};
};
enum
mlx5e_link_mode
{
MLX5E_1000BASE_CX_SGMII
=
0
,
MLX5E_1000BASE_KX
=
1
,
MLX5E_10GBASE_CX4
=
2
,
MLX5E_10GBASE_KX4
=
3
,
MLX5E_10GBASE_KR
=
4
,
MLX5E_20GBASE_KR2
=
5
,
MLX5E_40GBASE_CR4
=
6
,
MLX5E_40GBASE_KR4
=
7
,
MLX5E_56GBASE_R4
=
8
,
MLX5E_10GBASE_CR
=
12
,
MLX5E_10GBASE_SR
=
13
,
MLX5E_10GBASE_ER
=
14
,
MLX5E_40GBASE_SR4
=
15
,
MLX5E_40GBASE_LR4
=
16
,
MLX5E_50GBASE_SR2
=
18
,
MLX5E_100GBASE_CR4
=
20
,
MLX5E_100GBASE_SR4
=
21
,
MLX5E_100GBASE_KR4
=
22
,
MLX5E_100GBASE_LR4
=
23
,
MLX5E_100BASE_TX
=
24
,
MLX5E_1000BASE_T
=
25
,
MLX5E_10GBASE_T
=
26
,
MLX5E_25GBASE_CR
=
27
,
MLX5E_25GBASE_KR
=
28
,
MLX5E_25GBASE_SR
=
29
,
MLX5E_50GBASE_CR2
=
30
,
MLX5E_50GBASE_KR2
=
31
,
MLX5E_LINK_MODES_NUMBER
,
};
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
void
mlx5e_build_ptys2ethtool_map
(
void
);
void
mlx5e_build_ptys2ethtool_map
(
void
);
void
mlx5e_send_nop
(
struct
mlx5e_sq
*
sq
,
bool
notify_hw
);
void
mlx5e_send_nop
(
struct
mlx5e_sq
*
sq
,
bool
notify_hw
);
...
...
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
浏览文件 @
d68478da
...
@@ -60,24 +60,27 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
...
@@ -60,24 +60,27 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
static
int
mlx5e_create_mkey
(
struct
mlx5_core_dev
*
mdev
,
u32
pdn
,
static
int
mlx5e_create_mkey
(
struct
mlx5_core_dev
*
mdev
,
u32
pdn
,
struct
mlx5_core_mkey
*
mkey
)
struct
mlx5_core_mkey
*
mkey
)
{
{
struct
mlx5_create_mkey_mbox_in
*
in
;
int
inlen
=
MLX5_ST_SZ_BYTES
(
create_mkey_in
);
void
*
mkc
;
u32
*
in
;
int
err
;
int
err
;
in
=
mlx5_vzalloc
(
sizeof
(
*
in
)
);
in
=
mlx5_vzalloc
(
inlen
);
if
(
!
in
)
if
(
!
in
)
return
-
ENOMEM
;
return
-
ENOMEM
;
in
->
seg
.
flags
=
MLX5_PERM_LOCAL_WRITE
|
mkc
=
MLX5_ADDR_OF
(
create_mkey_in
,
in
,
memory_key_mkey_entry
);
MLX5_PERM_LOCAL_READ
|
MLX5_SET
(
mkc
,
mkc
,
access_mode
,
MLX5_MKC_ACCESS_MODE_PA
);
MLX5_ACCESS_MODE_PA
;
MLX5_SET
(
mkc
,
mkc
,
lw
,
1
);
in
->
seg
.
flags_pd
=
cpu_to_be32
(
pdn
|
MLX5_MKEY_LEN64
);
MLX5_SET
(
mkc
,
mkc
,
lr
,
1
);
in
->
seg
.
qpn_mkey7_0
=
cpu_to_be32
(
0xffffff
<<
8
);
err
=
mlx5_core_create_mkey
(
mdev
,
mkey
,
in
,
sizeof
(
*
in
),
NULL
,
NULL
,
MLX5_SET
(
mkc
,
mkc
,
pd
,
pdn
);
NULL
);
MLX5_SET
(
mkc
,
mkc
,
length64
,
1
);
MLX5_SET
(
mkc
,
mkc
,
qpn
,
0xffffff
);
kvfree
(
i
n
);
err
=
mlx5_core_create_mkey
(
mdev
,
mkey
,
in
,
inle
n
);
kvfree
(
in
);
return
err
;
return
err
;
}
}
...
...
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
浏览文件 @
d68478da
...
@@ -726,7 +726,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
...
@@ -726,7 +726,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
{
{
struct
mlx5e_priv
*
priv
=
netdev_priv
(
netdev
);
struct
mlx5e_priv
*
priv
=
netdev_priv
(
netdev
);
struct
mlx5_core_dev
*
mdev
=
priv
->
mdev
;
struct
mlx5_core_dev
*
mdev
=
priv
->
mdev
;
u32
out
[
MLX5_ST_SZ_DW
(
ptys_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
ptys_reg
)]
=
{
0
}
;
u32
eth_proto_cap
;
u32
eth_proto_cap
;
u32
eth_proto_admin
;
u32
eth_proto_admin
;
u32
eth_proto_lp
;
u32
eth_proto_lp
;
...
@@ -736,7 +736,6 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
...
@@ -736,7 +736,6 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
int
err
;
int
err
;
err
=
mlx5_query_port_ptys
(
mdev
,
out
,
sizeof
(
out
),
MLX5_PTYS_EN
,
1
);
err
=
mlx5_query_port_ptys
(
mdev
,
out
,
sizeof
(
out
),
MLX5_PTYS_EN
,
1
);
if
(
err
)
{
if
(
err
)
{
netdev_err
(
netdev
,
"%s: query port ptys failed: %d
\n
"
,
netdev_err
(
netdev
,
"%s: query port ptys failed: %d
\n
"
,
__func__
,
err
);
__func__
,
err
);
...
...
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
浏览文件 @
d68478da
...
@@ -180,18 +180,15 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
...
@@ -180,18 +180,15 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
{
{
int
outlen
=
MLX5_ST_SZ_BYTES
(
query_vport_counter_out
);
int
outlen
=
MLX5_ST_SZ_BYTES
(
query_vport_counter_out
);
u32
*
out
=
(
u32
*
)
priv
->
stats
.
vport
.
query_vport_out
;
u32
*
out
=
(
u32
*
)
priv
->
stats
.
vport
.
query_vport_out
;
u32
in
[
MLX5_ST_SZ_DW
(
query_vport_counter_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_vport_counter_in
)]
=
{
0
}
;
struct
mlx5_core_dev
*
mdev
=
priv
->
mdev
;
struct
mlx5_core_dev
*
mdev
=
priv
->
mdev
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
query_vport_counter_in
,
in
,
opcode
,
MLX5_SET
(
query_vport_counter_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
MLX5_SET
(
query_vport_counter_in
,
in
,
op_mod
,
0
);
MLX5_SET
(
query_vport_counter_in
,
in
,
op_mod
,
0
);
MLX5_SET
(
query_vport_counter_in
,
in
,
other_vport
,
0
);
MLX5_SET
(
query_vport_counter_in
,
in
,
other_vport
,
0
);
memset
(
out
,
0
,
outlen
);
memset
(
out
,
0
,
outlen
);
mlx5_cmd_exec
(
mdev
,
in
,
sizeof
(
in
),
out
,
outlen
);
mlx5_cmd_exec
(
mdev
,
in
,
sizeof
(
in
),
out
,
outlen
);
}
}
...
@@ -492,7 +489,8 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
...
@@ -492,7 +489,8 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
rqc
=
MLX5_ADDR_OF
(
modify_rq_in
,
in
,
ctx
);
rqc
=
MLX5_ADDR_OF
(
modify_rq_in
,
in
,
ctx
);
MLX5_SET
(
modify_rq_in
,
in
,
rq_state
,
MLX5_RQC_STATE_RDY
);
MLX5_SET
(
modify_rq_in
,
in
,
rq_state
,
MLX5_RQC_STATE_RDY
);
MLX5_SET64
(
modify_rq_in
,
in
,
modify_bitmask
,
MLX5_RQ_BITMASK_VSD
);
MLX5_SET64
(
modify_rq_in
,
in
,
modify_bitmask
,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
MLX5_SET
(
rqc
,
rqc
,
vsd
,
vsd
);
MLX5_SET
(
rqc
,
rqc
,
vsd
,
vsd
);
MLX5_SET
(
rqc
,
rqc
,
state
,
MLX5_RQC_STATE_RDY
);
MLX5_SET
(
rqc
,
rqc
,
state
,
MLX5_RQC_STATE_RDY
);
...
@@ -2022,14 +2020,15 @@ static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
...
@@ -2022,14 +2020,15 @@ static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
static
int
mlx5e_create_tis
(
struct
mlx5e_priv
*
priv
,
int
tc
)
static
int
mlx5e_create_tis
(
struct
mlx5e_priv
*
priv
,
int
tc
)
{
{
struct
mlx5_core_dev
*
mdev
=
priv
->
mdev
;
struct
mlx5_core_dev
*
mdev
=
priv
->
mdev
;
u32
in
[
MLX5_ST_SZ_DW
(
create_tis_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
create_tis_in
)]
=
{
0
}
;
void
*
tisc
=
MLX5_ADDR_OF
(
create_tis_in
,
in
,
ctx
);
void
*
tisc
=
MLX5_ADDR_OF
(
create_tis_in
,
in
,
ctx
);
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
tisc
,
tisc
,
prio
,
tc
<<
1
);
MLX5_SET
(
tisc
,
tisc
,
prio
,
tc
<<
1
);
MLX5_SET
(
tisc
,
tisc
,
transport_domain
,
mdev
->
mlx5e_res
.
td
.
tdn
);
MLX5_SET
(
tisc
,
tisc
,
transport_domain
,
mdev
->
mlx5e_res
.
td
.
tdn
);
if
(
mlx5_lag_is_lacp_owner
(
mdev
))
MLX5_SET
(
tisc
,
tisc
,
strict_lag_tx_port_affinity
,
1
);
return
mlx5_core_create_tis
(
mdev
,
in
,
sizeof
(
in
),
&
priv
->
tisn
[
tc
]);
return
mlx5_core_create_tis
(
mdev
,
in
,
sizeof
(
in
),
&
priv
->
tisn
[
tc
]);
}
}
...
@@ -3228,35 +3227,34 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
...
@@ -3228,35 +3227,34 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
static
int
mlx5e_create_umr_mkey
(
struct
mlx5e_priv
*
priv
)
static
int
mlx5e_create_umr_mkey
(
struct
mlx5e_priv
*
priv
)
{
{
struct
mlx5_core_dev
*
mdev
=
priv
->
mdev
;
struct
mlx5_core_dev
*
mdev
=
priv
->
mdev
;
struct
mlx5_create_mkey_mbox_in
*
in
;
u64
npages
=
priv
->
profile
->
max_nch
(
mdev
)
*
MLX5_CHANNEL_MAX_NUM_MTTS
;
struct
mlx5_mkey_seg
*
mkc
;
int
inlen
=
MLX5_ST_SZ_BYTES
(
create_mkey_in
);
int
inlen
=
sizeof
(
*
in
);
void
*
mkc
;
u64
npages
=
u32
*
in
;
priv
->
profile
->
max_nch
(
mdev
)
*
MLX5_CHANNEL_MAX_NUM_MTTS
;
int
err
;
int
err
;
in
=
mlx5_vzalloc
(
inlen
);
in
=
mlx5_vzalloc
(
inlen
);
if
(
!
in
)
if
(
!
in
)
return
-
ENOMEM
;
return
-
ENOMEM
;
mkc
=
&
in
->
seg
;
mkc
=
MLX5_ADDR_OF
(
create_mkey_in
,
in
,
memory_key_mkey_entry
);
mkc
->
status
=
MLX5_MKEY_STATUS_FREE
;
mkc
->
flags
=
MLX5_PERM_UMR_EN
|
MLX5_PERM_LOCAL_READ
|
MLX5_PERM_LOCAL_WRITE
|
MLX5_ACCESS_MODE_MTT
;
mkc
->
qpn_mkey7_0
=
cpu_to_be32
(
0xffffff
<<
8
);
MLX5_SET
(
mkc
,
mkc
,
free
,
1
);
mkc
->
flags_pd
=
cpu_to_be32
(
mdev
->
mlx5e_res
.
pdn
);
MLX5_SET
(
mkc
,
mkc
,
umr_en
,
1
);
mkc
->
len
=
cpu_to_be64
(
npages
<<
PAGE_SHIFT
);
MLX5_SET
(
mkc
,
mkc
,
lw
,
1
);
mkc
->
xlt_oct_size
=
cpu_to_be32
(
mlx5e_get_mtt_octw
(
npages
)
);
MLX5_SET
(
mkc
,
mkc
,
lr
,
1
);
mkc
->
log2_page_size
=
PAGE_SHIFT
;
MLX5_SET
(
mkc
,
mkc
,
access_mode
,
MLX5_MKC_ACCESS_MODE_MTT
)
;
err
=
mlx5_core_create_mkey
(
mdev
,
&
priv
->
umr_mkey
,
in
,
inlen
,
NULL
,
MLX5_SET
(
mkc
,
mkc
,
qpn
,
0xffffff
);
NULL
,
NULL
);
MLX5_SET
(
mkc
,
mkc
,
pd
,
mdev
->
mlx5e_res
.
pdn
);
MLX5_SET64
(
mkc
,
mkc
,
len
,
npages
<<
PAGE_SHIFT
);
MLX5_SET
(
mkc
,
mkc
,
translations_octword_size
,
mlx5e_get_mtt_octw
(
npages
));
MLX5_SET
(
mkc
,
mkc
,
log_page_size
,
PAGE_SHIFT
);
kvfree
(
i
n
);
err
=
mlx5_core_create_mkey
(
mdev
,
&
priv
->
umr_mkey
,
in
,
inle
n
);
kvfree
(
in
);
return
err
;
return
err
;
}
}
...
@@ -3375,6 +3373,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
...
@@ -3375,6 +3373,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
struct
mlx5_eswitch
*
esw
=
mdev
->
priv
.
eswitch
;
struct
mlx5_eswitch
*
esw
=
mdev
->
priv
.
eswitch
;
struct
mlx5_eswitch_rep
rep
;
struct
mlx5_eswitch_rep
rep
;
mlx5_lag_add
(
mdev
,
netdev
);
if
(
mlx5e_vxlan_allowed
(
mdev
))
{
if
(
mlx5e_vxlan_allowed
(
mdev
))
{
rtnl_lock
();
rtnl_lock
();
udp_tunnel_get_rx_info
(
netdev
);
udp_tunnel_get_rx_info
(
netdev
);
...
@@ -3397,6 +3397,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
...
@@ -3397,6 +3397,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
{
queue_work
(
priv
->
wq
,
&
priv
->
set_rx_mode_work
);
queue_work
(
priv
->
wq
,
&
priv
->
set_rx_mode_work
);
mlx5e_disable_async_events
(
priv
);
mlx5e_disable_async_events
(
priv
);
mlx5_lag_remove
(
priv
->
mdev
);
}
}
static
const
struct
mlx5e_profile
mlx5e_nic_profile
=
{
static
const
struct
mlx5e_profile
mlx5e_nic_profile
=
{
...
...
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
浏览文件 @
d68478da
...
@@ -416,8 +416,8 @@ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
...
@@ -416,8 +416,8 @@ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
{
{
rep
->
priv_data
=
mlx5e_create_netdev
(
esw
->
dev
,
&
mlx5e_rep_profile
,
rep
);
rep
->
priv_data
=
mlx5e_create_netdev
(
esw
->
dev
,
&
mlx5e_rep_profile
,
rep
);
if
(
!
rep
->
priv_data
)
{
if
(
!
rep
->
priv_data
)
{
pr_warn
(
"Failed to create representor for vport %d
\n
"
,
mlx5_core_warn
(
esw
->
dev
,
"Failed to create representor for vport %d
\n
"
,
rep
->
vport
);
rep
->
vport
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
return
0
;
return
0
;
...
...
drivers/net/ethernet/mellanox/mlx5/core/eq.c
浏览文件 @
d68478da
...
@@ -86,23 +86,12 @@ struct cre_des_eq {
...
@@ -86,23 +86,12 @@ struct cre_des_eq {
static
int
mlx5_cmd_destroy_eq
(
struct
mlx5_core_dev
*
dev
,
u8
eqn
)
static
int
mlx5_cmd_destroy_eq
(
struct
mlx5_core_dev
*
dev
,
u8
eqn
)
{
{
struct
mlx5_destroy_eq_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
destroy_eq_out
)]
=
{
0
};
struct
mlx5_destroy_eq_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
destroy_eq_in
)]
=
{
0
};
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
memset
(
&
out
,
0
,
sizeof
(
out
));
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_DESTROY_EQ
);
in
.
eqn
=
eqn
;
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
!
err
)
goto
ex
;
if
(
out
.
hdr
.
status
)
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
ex:
MLX5_SET
(
destroy_eq_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_EQ
);
return
err
;
MLX5_SET
(
destroy_eq_in
,
in
,
eq_number
,
eqn
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
static
struct
mlx5_eqe
*
get_eqe
(
struct
mlx5_eq
*
eq
,
u32
entry
)
static
struct
mlx5_eqe
*
get_eqe
(
struct
mlx5_eq
*
eq
,
u32
entry
)
...
@@ -351,11 +340,13 @@ static void init_eq_buf(struct mlx5_eq *eq)
...
@@ -351,11 +340,13 @@ static void init_eq_buf(struct mlx5_eq *eq)
int
mlx5_create_map_eq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
,
u8
vecidx
,
int
mlx5_create_map_eq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
,
u8
vecidx
,
int
nent
,
u64
mask
,
const
char
*
name
,
struct
mlx5_uar
*
uar
)
int
nent
,
u64
mask
,
const
char
*
name
,
struct
mlx5_uar
*
uar
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
create_eq_out
)]
=
{
0
};
struct
mlx5_priv
*
priv
=
&
dev
->
priv
;
struct
mlx5_priv
*
priv
=
&
dev
->
priv
;
struct
mlx5_create_eq_mbox_in
*
in
;
__be64
*
pas
;
struct
mlx5_create_eq_mbox_out
out
;
void
*
eqc
;
int
err
;
int
inlen
;
int
inlen
;
u32
*
in
;
int
err
;
eq
->
nent
=
roundup_pow_of_two
(
nent
+
MLX5_NUM_SPARE_EQE
);
eq
->
nent
=
roundup_pow_of_two
(
nent
+
MLX5_NUM_SPARE_EQE
);
eq
->
cons_index
=
0
;
eq
->
cons_index
=
0
;
...
@@ -365,35 +356,36 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
...
@@ -365,35 +356,36 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
init_eq_buf
(
eq
);
init_eq_buf
(
eq
);
inlen
=
sizeof
(
*
in
)
+
sizeof
(
in
->
pas
[
0
])
*
eq
->
buf
.
npages
;
inlen
=
MLX5_ST_SZ_BYTES
(
create_eq_in
)
+
MLX5_FLD_SZ_BYTES
(
create_eq_in
,
pas
[
0
])
*
eq
->
buf
.
npages
;
in
=
mlx5_vzalloc
(
inlen
);
in
=
mlx5_vzalloc
(
inlen
);
if
(
!
in
)
{
if
(
!
in
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
goto
err_buf
;
goto
err_buf
;
}
}
memset
(
&
out
,
0
,
sizeof
(
out
));
mlx5_fill_page_array
(
&
eq
->
buf
,
in
->
pas
);
pas
=
(
__be64
*
)
MLX5_ADDR_OF
(
create_eq_in
,
in
,
pas
);
mlx5_fill_page_array
(
&
eq
->
buf
,
pas
);
in
->
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_CREATE_EQ
);
MLX5_SET
(
create_eq_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_EQ
);
in
->
ctx
.
log_sz_usr_page
=
cpu_to_be32
(
ilog2
(
eq
->
nent
)
<<
24
|
uar
->
index
);
MLX5_SET64
(
create_eq_in
,
in
,
event_bitmask
,
mask
);
in
->
ctx
.
intr
=
vecidx
;
in
->
ctx
.
log_page_size
=
eq
->
buf
.
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
;
in
->
events_mask
=
cpu_to_be64
(
mask
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
&
out
,
sizeof
(
out
));
eqc
=
MLX5_ADDR_OF
(
create_eq_in
,
in
,
eq_context_entry
);
if
(
err
)
MLX5_SET
(
eqc
,
eqc
,
log_eq_size
,
ilog2
(
eq
->
nent
));
goto
err_in
;
MLX5_SET
(
eqc
,
eqc
,
uar_page
,
uar
->
index
);
MLX5_SET
(
eqc
,
eqc
,
intr
,
vecidx
);
MLX5_SET
(
eqc
,
eqc
,
log_page_size
,
eq
->
buf
.
page_shift
-
MLX5_ADAPTER_PAGE_SHIFT
);
if
(
out
.
hdr
.
status
)
{
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
if
(
err
)
goto
err_in
;
goto
err_in
;
}
snprintf
(
priv
->
irq_info
[
vecidx
].
name
,
MLX5_MAX_IRQ_NAME
,
"%s@pci:%s"
,
snprintf
(
priv
->
irq_info
[
vecidx
].
name
,
MLX5_MAX_IRQ_NAME
,
"%s@pci:%s"
,
name
,
pci_name
(
dev
->
pdev
));
name
,
pci_name
(
dev
->
pdev
));
eq
->
eqn
=
out
.
eq_number
;
eq
->
eqn
=
MLX5_GET
(
create_eq_out
,
out
,
eq_number
)
;
eq
->
irqn
=
priv
->
msix_arr
[
vecidx
].
vector
;
eq
->
irqn
=
priv
->
msix_arr
[
vecidx
].
vector
;
eq
->
dev
=
dev
;
eq
->
dev
=
dev
;
eq
->
doorbell
=
uar
->
map
+
MLX5_EQ_DOORBEL_OFFSET
;
eq
->
doorbell
=
uar
->
map
+
MLX5_EQ_DOORBEL_OFFSET
;
...
@@ -547,22 +539,12 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
...
@@ -547,22 +539,12 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
}
}
int
mlx5_core_eq_query
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
,
int
mlx5_core_eq_query
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
,
struct
mlx5_query_eq_mbox_out
*
out
,
int
outlen
)
u32
*
out
,
int
outlen
)
{
{
struct
mlx5_query_eq_mbox_in
in
;
u32
in
[
MLX5_ST_SZ_DW
(
query_eq_in
)]
=
{
0
};
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
outlen
);
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_QUERY_EQ
);
in
.
eqn
=
eq
->
eqn
;
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
out
,
outlen
);
if
(
err
)
return
err
;
if
(
out
->
hdr
.
status
)
MLX5_SET
(
query_eq_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_EQ
);
err
=
mlx5_cmd_status_to_err
(
&
out
->
hdr
);
MLX5_SET
(
query_eq_in
,
in
,
eq_number
,
eq
->
eqn
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
return
err
;
}
}
EXPORT_SYMBOL_GPL
(
mlx5_core_eq_query
);
EXPORT_SYMBOL_GPL
(
mlx5_core_eq_query
);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
浏览文件 @
d68478da
...
@@ -87,13 +87,9 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
...
@@ -87,13 +87,9 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
static
int
arm_vport_context_events_cmd
(
struct
mlx5_core_dev
*
dev
,
u16
vport
,
static
int
arm_vport_context_events_cmd
(
struct
mlx5_core_dev
*
dev
,
u16
vport
,
u32
events_mask
)
u32
events_mask
)
{
{
int
in
[
MLX5_ST_SZ_DW
(
modify_nic_vport_context_in
)];
int
in
[
MLX5_ST_SZ_DW
(
modify_nic_vport_context_in
)]
=
{
0
}
;
int
out
[
MLX5_ST_SZ_DW
(
modify_nic_vport_context_out
)];
int
out
[
MLX5_ST_SZ_DW
(
modify_nic_vport_context_out
)]
=
{
0
}
;
void
*
nic_vport_ctx
;
void
*
nic_vport_ctx
;
int
err
;
memset
(
out
,
0
,
sizeof
(
out
));
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
modify_nic_vport_context_in
,
in
,
MLX5_SET
(
modify_nic_vport_context_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
opcode
,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
...
@@ -116,45 +112,31 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
...
@@ -116,45 +112,31 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET
(
nic_vport_context
,
nic_vport_ctx
,
MLX5_SET
(
nic_vport_context
,
nic_vport_ctx
,
event_on_promisc_change
,
1
);
event_on_promisc_change
,
1
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
err
)
goto
ex
;
err
=
mlx5_cmd_status_to_err_v2
(
out
);
if
(
err
)
goto
ex
;
return
0
;
ex:
return
err
;
}
}
/* E-Switch vport context HW commands */
/* E-Switch vport context HW commands */
static
int
query_esw_vport_context_cmd
(
struct
mlx5_core_dev
*
mdev
,
u32
vport
,
static
int
query_esw_vport_context_cmd
(
struct
mlx5_core_dev
*
mdev
,
u32
vport
,
u32
*
out
,
int
outlen
)
u32
*
out
,
int
outlen
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
query_esw_vport_context_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_esw_vport_context_in
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
query_nic_vport_context_in
,
in
,
opcode
,
MLX5_SET
(
query_nic_vport_context_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT
);
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT
);
MLX5_SET
(
query_esw_vport_context_in
,
in
,
vport_number
,
vport
);
MLX5_SET
(
query_esw_vport_context_in
,
in
,
vport_number
,
vport
);
if
(
vport
)
if
(
vport
)
MLX5_SET
(
query_esw_vport_context_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
query_esw_vport_context_in
,
in
,
other_vport
,
1
);
return
mlx5_cmd_exec
(
mdev
,
in
,
sizeof
(
in
),
out
,
outlen
);
return
mlx5_cmd_exec_check_status
(
mdev
,
in
,
sizeof
(
in
),
out
,
outlen
);
}
}
static
int
query_esw_vport_cvlan
(
struct
mlx5_core_dev
*
dev
,
u32
vport
,
static
int
query_esw_vport_cvlan
(
struct
mlx5_core_dev
*
dev
,
u32
vport
,
u16
*
vlan
,
u8
*
qos
)
u16
*
vlan
,
u8
*
qos
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
query_esw_vport_context_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
query_esw_vport_context_out
)]
=
{
0
}
;
int
err
;
int
err
;
bool
cvlan_strip
;
bool
cvlan_strip
;
bool
cvlan_insert
;
bool
cvlan_insert
;
memset
(
out
,
0
,
sizeof
(
out
));
*
vlan
=
0
;
*
vlan
=
0
;
*
qos
=
0
;
*
qos
=
0
;
...
@@ -188,27 +170,20 @@ static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
...
@@ -188,27 +170,20 @@ static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
static
int
modify_esw_vport_context_cmd
(
struct
mlx5_core_dev
*
dev
,
u16
vport
,
static
int
modify_esw_vport_context_cmd
(
struct
mlx5_core_dev
*
dev
,
u16
vport
,
void
*
in
,
int
inlen
)
void
*
in
,
int
inlen
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
modify_esw_vport_context_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
modify_esw_vport_context_out
)]
=
{
0
};
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
modify_esw_vport_context_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT
);
MLX5_SET
(
modify_esw_vport_context_in
,
in
,
vport_number
,
vport
);
MLX5_SET
(
modify_esw_vport_context_in
,
in
,
vport_number
,
vport
);
if
(
vport
)
if
(
vport
)
MLX5_SET
(
modify_esw_vport_context_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
modify_esw_vport_context_in
,
in
,
other_vport
,
1
);
return
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
MLX5_SET
(
modify_esw_vport_context_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT
);
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
}
}
static
int
modify_esw_vport_cvlan
(
struct
mlx5_core_dev
*
dev
,
u32
vport
,
static
int
modify_esw_vport_cvlan
(
struct
mlx5_core_dev
*
dev
,
u32
vport
,
u16
vlan
,
u8
qos
,
bool
set
)
u16
vlan
,
u8
qos
,
bool
set
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
modify_esw_vport_context_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
modify_esw_vport_context_in
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
if
(
!
MLX5_CAP_ESW
(
dev
,
vport_cvlan_strip
)
||
if
(
!
MLX5_CAP_ESW
(
dev
,
vport_cvlan_strip
)
||
!
MLX5_CAP_ESW
(
dev
,
vport_cvlan_insert_if_not_exist
))
!
MLX5_CAP_ESW
(
dev
,
vport_cvlan_insert_if_not_exist
))
...
@@ -216,7 +191,6 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
...
@@ -216,7 +191,6 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
esw_debug
(
dev
,
"Set Vport[%d] VLAN %d qos %d set=%d
\n
"
,
esw_debug
(
dev
,
"Set Vport[%d] VLAN %d qos %d set=%d
\n
"
,
vport
,
vlan
,
qos
,
set
);
vport
,
vlan
,
qos
,
set
);
if
(
set
)
{
if
(
set
)
{
MLX5_SET
(
modify_esw_vport_context_in
,
in
,
MLX5_SET
(
modify_esw_vport_context_in
,
in
,
esw_vport_context
.
vport_cvlan_strip
,
1
);
esw_vport_context
.
vport_cvlan_strip
,
1
);
...
@@ -241,13 +215,10 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
...
@@ -241,13 +215,10 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
static
int
set_l2_table_entry_cmd
(
struct
mlx5_core_dev
*
dev
,
u32
index
,
static
int
set_l2_table_entry_cmd
(
struct
mlx5_core_dev
*
dev
,
u32
index
,
u8
*
mac
,
u8
vlan_valid
,
u16
vlan
)
u8
*
mac
,
u8
vlan_valid
,
u16
vlan
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
set_l2_table_entry_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
set_l2_table_entry_in
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
set_l2_table_entry_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
set_l2_table_entry_out
)]
=
{
0
}
;
u8
*
in_mac_addr
;
u8
*
in_mac_addr
;
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
set_l2_table_entry_in
,
in
,
opcode
,
MLX5_SET
(
set_l2_table_entry_in
,
in
,
opcode
,
MLX5_CMD_OP_SET_L2_TABLE_ENTRY
);
MLX5_CMD_OP_SET_L2_TABLE_ENTRY
);
MLX5_SET
(
set_l2_table_entry_in
,
in
,
table_index
,
index
);
MLX5_SET
(
set_l2_table_entry_in
,
in
,
table_index
,
index
);
...
@@ -257,23 +228,18 @@ static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
...
@@ -257,23 +228,18 @@ static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
in_mac_addr
=
MLX5_ADDR_OF
(
set_l2_table_entry_in
,
in
,
mac_address
);
in_mac_addr
=
MLX5_ADDR_OF
(
set_l2_table_entry_in
,
in
,
mac_address
);
ether_addr_copy
(
&
in_mac_addr
[
2
],
mac
);
ether_addr_copy
(
&
in_mac_addr
[
2
],
mac
);
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
out
,
sizeof
(
out
));
}
}
static
int
del_l2_table_entry_cmd
(
struct
mlx5_core_dev
*
dev
,
u32
index
)
static
int
del_l2_table_entry_cmd
(
struct
mlx5_core_dev
*
dev
,
u32
index
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
delete_l2_table_entry_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
delete_l2_table_entry_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
delete_l2_table_entry_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
delete_l2_table_entry_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
delete_l2_table_entry_in
,
in
,
opcode
,
MLX5_SET
(
delete_l2_table_entry_in
,
in
,
opcode
,
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
);
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
);
MLX5_SET
(
delete_l2_table_entry_in
,
in
,
table_index
,
index
);
MLX5_SET
(
delete_l2_table_entry_in
,
in
,
table_index
,
index
);
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
out
,
sizeof
(
out
));
}
}
static
int
alloc_l2_table_index
(
struct
mlx5_l2_table
*
l2_table
,
u32
*
ix
)
static
int
alloc_l2_table_index
(
struct
mlx5_l2_table
*
l2_table
,
u32
*
ix
)
...
@@ -340,7 +306,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
...
@@ -340,7 +306,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
spec
=
mlx5_vzalloc
(
sizeof
(
*
spec
));
spec
=
mlx5_vzalloc
(
sizeof
(
*
spec
));
if
(
!
spec
)
{
if
(
!
spec
)
{
pr_warn
(
"FDB: Failed to alloc match parameters
\n
"
);
esw_warn
(
esw
->
dev
,
"FDB: Failed to alloc match parameters
\n
"
);
return
NULL
;
return
NULL
;
}
}
dmac_v
=
MLX5_ADDR_OF
(
fte_match_param
,
spec
->
match_value
,
dmac_v
=
MLX5_ADDR_OF
(
fte_match_param
,
spec
->
match_value
,
...
@@ -374,8 +340,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
...
@@ -374,8 +340,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
0
,
&
dest
);
0
,
&
dest
);
if
(
IS_ERR
(
flow_rule
))
{
if
(
IS_ERR
(
flow_rule
))
{
pr_warn
(
esw_warn
(
esw
->
dev
,
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)
\n
"
,
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)
\n
"
,
dmac_v
,
dmac_c
,
vport
,
PTR_ERR
(
flow_rule
));
dmac_v
,
dmac_c
,
vport
,
PTR_ERR
(
flow_rule
));
flow_rule
=
NULL
;
flow_rule
=
NULL
;
}
}
...
@@ -1352,8 +1318,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
...
@@ -1352,8 +1318,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
0
,
NULL
);
0
,
NULL
);
if
(
IS_ERR
(
vport
->
ingress
.
allow_rule
))
{
if
(
IS_ERR
(
vport
->
ingress
.
allow_rule
))
{
err
=
PTR_ERR
(
vport
->
ingress
.
allow_rule
);
err
=
PTR_ERR
(
vport
->
ingress
.
allow_rule
);
pr_warn
(
"vport[%d] configure ingress allow rule, err(%d)
\n
"
,
esw_warn
(
esw
->
dev
,
vport
->
vport
,
err
);
"vport[%d] configure ingress allow rule, err(%d)
\n
"
,
vport
->
vport
,
err
);
vport
->
ingress
.
allow_rule
=
NULL
;
vport
->
ingress
.
allow_rule
=
NULL
;
goto
out
;
goto
out
;
}
}
...
@@ -1365,8 +1332,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
...
@@ -1365,8 +1332,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
0
,
NULL
);
0
,
NULL
);
if
(
IS_ERR
(
vport
->
ingress
.
drop_rule
))
{
if
(
IS_ERR
(
vport
->
ingress
.
drop_rule
))
{
err
=
PTR_ERR
(
vport
->
ingress
.
drop_rule
);
err
=
PTR_ERR
(
vport
->
ingress
.
drop_rule
);
pr_warn
(
"vport[%d] configure ingress drop rule, err(%d)
\n
"
,
esw_warn
(
esw
->
dev
,
vport
->
vport
,
err
);
"vport[%d] configure ingress drop rule, err(%d)
\n
"
,
vport
->
vport
,
err
);
vport
->
ingress
.
drop_rule
=
NULL
;
vport
->
ingress
.
drop_rule
=
NULL
;
goto
out
;
goto
out
;
}
}
...
@@ -1418,8 +1386,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
...
@@ -1418,8 +1386,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
0
,
NULL
);
0
,
NULL
);
if
(
IS_ERR
(
vport
->
egress
.
allowed_vlan
))
{
if
(
IS_ERR
(
vport
->
egress
.
allowed_vlan
))
{
err
=
PTR_ERR
(
vport
->
egress
.
allowed_vlan
);
err
=
PTR_ERR
(
vport
->
egress
.
allowed_vlan
);
pr_warn
(
"vport[%d] configure egress allowed vlan rule failed, err(%d)
\n
"
,
esw_warn
(
esw
->
dev
,
vport
->
vport
,
err
);
"vport[%d] configure egress allowed vlan rule failed, err(%d)
\n
"
,
vport
->
vport
,
err
);
vport
->
egress
.
allowed_vlan
=
NULL
;
vport
->
egress
.
allowed_vlan
=
NULL
;
goto
out
;
goto
out
;
}
}
...
@@ -1432,8 +1401,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
...
@@ -1432,8 +1401,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
0
,
NULL
);
0
,
NULL
);
if
(
IS_ERR
(
vport
->
egress
.
drop_rule
))
{
if
(
IS_ERR
(
vport
->
egress
.
drop_rule
))
{
err
=
PTR_ERR
(
vport
->
egress
.
drop_rule
);
err
=
PTR_ERR
(
vport
->
egress
.
drop_rule
);
pr_warn
(
"vport[%d] configure egress drop rule failed, err(%d)
\n
"
,
esw_warn
(
esw
->
dev
,
vport
->
vport
,
err
);
"vport[%d] configure egress drop rule failed, err(%d)
\n
"
,
vport
->
vport
,
err
);
vport
->
egress
.
drop_rule
=
NULL
;
vport
->
egress
.
drop_rule
=
NULL
;
}
}
out:
out:
...
@@ -1903,7 +1873,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
...
@@ -1903,7 +1873,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct
ifla_vf_stats
*
vf_stats
)
struct
ifla_vf_stats
*
vf_stats
)
{
{
int
outlen
=
MLX5_ST_SZ_BYTES
(
query_vport_counter_out
);
int
outlen
=
MLX5_ST_SZ_BYTES
(
query_vport_counter_out
);
u32
in
[
MLX5_ST_SZ_DW
(
query_vport_counter_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_vport_counter_in
)]
=
{
0
}
;
int
err
=
0
;
int
err
=
0
;
u32
*
out
;
u32
*
out
;
...
@@ -1916,8 +1886,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
...
@@ -1916,8 +1886,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
if
(
!
out
)
if
(
!
out
)
return
-
ENOMEM
;
return
-
ENOMEM
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
query_vport_counter_in
,
in
,
opcode
,
MLX5_SET
(
query_vport_counter_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
MLX5_SET
(
query_vport_counter_in
,
in
,
op_mod
,
0
);
MLX5_SET
(
query_vport_counter_in
,
in
,
op_mod
,
0
);
...
...
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
浏览文件 @
d68478da
...
@@ -41,10 +41,8 @@
...
@@ -41,10 +41,8 @@
int
mlx5_cmd_update_root_ft
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_cmd_update_root_ft
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_flow_table
*
ft
)
struct
mlx5_flow_table
*
ft
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
set_flow_table_root_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
set_flow_table_root_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
set_flow_table_root_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
set_flow_table_root_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
set_flow_table_root_in
,
in
,
opcode
,
MLX5_SET
(
set_flow_table_root_in
,
in
,
opcode
,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT
);
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT
);
...
@@ -55,30 +53,23 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
...
@@ -55,30 +53,23 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_SET
(
set_flow_table_root_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
set_flow_table_root_in
,
in
,
other_vport
,
1
);
}
}
memset
(
out
,
0
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
int
mlx5_cmd_create_flow_table
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_cmd_create_flow_table
(
struct
mlx5_core_dev
*
dev
,
u16
vport
,
u16
vport
,
enum
fs_flow_table_op_mod
op_mod
,
enum
fs_flow_table_type
type
,
unsigned
int
level
,
enum
fs_flow_table_type
type
,
unsigned
int
level
,
unsigned
int
log_size
,
struct
mlx5_flow_table
unsigned
int
log_size
,
struct
mlx5_flow_table
*
next_ft
,
unsigned
int
*
table_id
)
*
next_ft
,
unsigned
int
*
table_id
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
create_flow_table_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
create_flow_table_out
)]
=
{
0
}
;
u32
in
[
MLX5_ST_SZ_DW
(
create_flow_table_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
create_flow_table_in
)]
=
{
0
}
;
int
err
;
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
create_flow_table_in
,
in
,
opcode
,
MLX5_SET
(
create_flow_table_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_FLOW_TABLE
);
MLX5_CMD_OP_CREATE_FLOW_TABLE
);
if
(
next_ft
)
{
MLX5_SET
(
create_flow_table_in
,
in
,
table_miss_mode
,
1
);
MLX5_SET
(
create_flow_table_in
,
in
,
table_miss_id
,
next_ft
->
id
);
}
MLX5_SET
(
create_flow_table_in
,
in
,
table_type
,
type
);
MLX5_SET
(
create_flow_table_in
,
in
,
table_type
,
type
);
MLX5_SET
(
create_flow_table_in
,
in
,
level
,
level
);
MLX5_SET
(
create_flow_table_in
,
in
,
level
,
level
);
MLX5_SET
(
create_flow_table_in
,
in
,
log_size
,
log_size
);
MLX5_SET
(
create_flow_table_in
,
in
,
log_size
,
log_size
);
...
@@ -87,10 +78,23 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
...
@@ -87,10 +78,23 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET
(
create_flow_table_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
create_flow_table_in
,
in
,
other_vport
,
1
);
}
}
memset
(
out
,
0
,
sizeof
(
out
));
switch
(
op_mod
)
{
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
case
FS_FT_OP_MOD_NORMAL
:
sizeof
(
out
));
if
(
next_ft
)
{
MLX5_SET
(
create_flow_table_in
,
in
,
table_miss_mode
,
1
);
MLX5_SET
(
create_flow_table_in
,
in
,
table_miss_id
,
next_ft
->
id
);
}
break
;
case
FS_FT_OP_MOD_LAG_DEMUX
:
MLX5_SET
(
create_flow_table_in
,
in
,
op_mod
,
0x1
);
if
(
next_ft
)
MLX5_SET
(
create_flow_table_in
,
in
,
lag_master_next_table_id
,
next_ft
->
id
);
break
;
}
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
table_id
=
MLX5_GET
(
create_flow_table_out
,
out
,
*
table_id
=
MLX5_GET
(
create_flow_table_out
,
out
,
table_id
);
table_id
);
...
@@ -100,11 +104,8 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
...
@@ -100,11 +104,8 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
int
mlx5_cmd_destroy_flow_table
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_cmd_destroy_flow_table
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_flow_table
*
ft
)
struct
mlx5_flow_table
*
ft
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
destroy_flow_table_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
destroy_flow_table_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
destroy_flow_table_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
destroy_flow_table_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
destroy_flow_table_in
,
in
,
opcode
,
MLX5_SET
(
destroy_flow_table_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_FLOW_TABLE
);
MLX5_CMD_OP_DESTROY_FLOW_TABLE
);
...
@@ -115,39 +116,49 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
...
@@ -115,39 +116,49 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
MLX5_SET
(
destroy_flow_table_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
destroy_flow_table_in
,
in
,
other_vport
,
1
);
}
}
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
sizeof
(
out
));
}
}
int
mlx5_cmd_modify_flow_table
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_cmd_modify_flow_table
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_flow_table
*
ft
,
struct
mlx5_flow_table
*
ft
,
struct
mlx5_flow_table
*
next_ft
)
struct
mlx5_flow_table
*
next_ft
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
modify_flow_table_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
modify_flow_table_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
modify_flow_table_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
modify_flow_table_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
modify_flow_table_in
,
in
,
opcode
,
MLX5_SET
(
modify_flow_table_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_FLOW_TABLE
);
MLX5_CMD_OP_MODIFY_FLOW_TABLE
);
MLX5_SET
(
modify_flow_table_in
,
in
,
table_type
,
ft
->
type
);
MLX5_SET
(
modify_flow_table_in
,
in
,
table_type
,
ft
->
type
);
MLX5_SET
(
modify_flow_table_in
,
in
,
table_id
,
ft
->
id
);
MLX5_SET
(
modify_flow_table_in
,
in
,
table_id
,
ft
->
id
);
if
(
ft
->
vport
)
{
MLX5_SET
(
modify_flow_table_in
,
in
,
vport_number
,
ft
->
vport
);
if
(
ft
->
op_mod
==
FS_FT_OP_MOD_LAG_DEMUX
)
{
MLX5_SET
(
modify_flow_table_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
modify_flow_table_in
,
in
,
modify_field_select
,
}
MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID
);
MLX5_SET
(
modify_flow_table_in
,
in
,
modify_field_select
,
if
(
next_ft
)
{
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID
);
MLX5_SET
(
modify_flow_table_in
,
in
,
if
(
next_ft
)
{
lag_master_next_table_id
,
next_ft
->
id
);
MLX5_SET
(
modify_flow_table_in
,
in
,
table_miss_mode
,
1
);
}
else
{
MLX5_SET
(
modify_flow_table_in
,
in
,
table_miss_id
,
next_ft
->
id
);
MLX5_SET
(
modify_flow_table_in
,
in
,
lag_master_next_table_id
,
0
);
}
}
else
{
}
else
{
MLX5_SET
(
modify_flow_table_in
,
in
,
table_miss_mode
,
0
);
if
(
ft
->
vport
)
{
MLX5_SET
(
modify_flow_table_in
,
in
,
vport_number
,
ft
->
vport
);
MLX5_SET
(
modify_flow_table_in
,
in
,
other_vport
,
1
);
}
MLX5_SET
(
modify_flow_table_in
,
in
,
modify_field_select
,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID
);
if
(
next_ft
)
{
MLX5_SET
(
modify_flow_table_in
,
in
,
table_miss_mode
,
1
);
MLX5_SET
(
modify_flow_table_in
,
in
,
table_miss_id
,
next_ft
->
id
);
}
else
{
MLX5_SET
(
modify_flow_table_in
,
in
,
table_miss_mode
,
0
);
}
}
}
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
sizeof
(
out
));
}
}
int
mlx5_cmd_create_flow_group
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_cmd_create_flow_group
(
struct
mlx5_core_dev
*
dev
,
...
@@ -155,12 +166,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
...
@@ -155,12 +166,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
u32
*
in
,
u32
*
in
,
unsigned
int
*
group_id
)
unsigned
int
*
group_id
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
create_flow_group_out
)]
=
{
0
};
int
inlen
=
MLX5_ST_SZ_BYTES
(
create_flow_group_in
);
int
inlen
=
MLX5_ST_SZ_BYTES
(
create_flow_group_in
);
u32
out
[
MLX5_ST_SZ_DW
(
create_flow_group_out
)];
int
err
;
int
err
;
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
create_flow_group_in
,
in
,
opcode
,
MLX5_SET
(
create_flow_group_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_FLOW_GROUP
);
MLX5_CMD_OP_CREATE_FLOW_GROUP
);
MLX5_SET
(
create_flow_group_in
,
in
,
table_type
,
ft
->
type
);
MLX5_SET
(
create_flow_group_in
,
in
,
table_type
,
ft
->
type
);
...
@@ -170,13 +179,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
...
@@ -170,13 +179,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
MLX5_SET
(
create_flow_group_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
create_flow_group_in
,
in
,
other_vport
,
1
);
}
}
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
inlen
,
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
group_id
=
MLX5_GET
(
create_flow_group_out
,
out
,
*
group_id
=
MLX5_GET
(
create_flow_group_out
,
out
,
group_id
);
group_id
);
return
err
;
return
err
;
}
}
...
@@ -184,11 +190,8 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
...
@@ -184,11 +190,8 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
struct
mlx5_flow_table
*
ft
,
struct
mlx5_flow_table
*
ft
,
unsigned
int
group_id
)
unsigned
int
group_id
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
destroy_flow_group_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
destroy_flow_group_out
)]
=
{
0
};
u32
in
[
MLX5_ST_SZ_DW
(
destroy_flow_group_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
destroy_flow_group_in
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
destroy_flow_group_in
,
in
,
opcode
,
MLX5_SET
(
destroy_flow_group_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_FLOW_GROUP
);
MLX5_CMD_OP_DESTROY_FLOW_GROUP
);
...
@@ -200,8 +203,7 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
...
@@ -200,8 +203,7 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
MLX5_SET
(
destroy_flow_group_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
destroy_flow_group_in
,
in
,
other_vport
,
1
);
}
}
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
sizeof
(
out
));
}
}
static
int
mlx5_cmd_set_fte
(
struct
mlx5_core_dev
*
dev
,
static
int
mlx5_cmd_set_fte
(
struct
mlx5_core_dev
*
dev
,
...
@@ -212,7 +214,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
...
@@ -212,7 +214,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
{
{
unsigned
int
inlen
=
MLX5_ST_SZ_BYTES
(
set_fte_in
)
+
unsigned
int
inlen
=
MLX5_ST_SZ_BYTES
(
set_fte_in
)
+
fte
->
dests_size
*
MLX5_ST_SZ_BYTES
(
dest_format_struct
);
fte
->
dests_size
*
MLX5_ST_SZ_BYTES
(
dest_format_struct
);
u32
out
[
MLX5_ST_SZ_DW
(
set_fte_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
set_fte_out
)]
=
{
0
}
;
struct
mlx5_flow_rule
*
dst
;
struct
mlx5_flow_rule
*
dst
;
void
*
in_flow_context
;
void
*
in_flow_context
;
void
*
in_match_value
;
void
*
in_match_value
;
...
@@ -290,11 +292,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
...
@@ -290,11 +292,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size
);
list_size
);
}
}
memset
(
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
kvfree
(
in
);
kvfree
(
in
);
return
err
;
return
err
;
}
}
...
@@ -303,7 +302,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
...
@@ -303,7 +302,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
unsigned
group_id
,
unsigned
group_id
,
struct
fs_fte
*
fte
)
struct
fs_fte
*
fte
)
{
{
return
mlx5_cmd_set_fte
(
dev
,
0
,
0
,
ft
,
group_id
,
fte
);
return
mlx5_cmd_set_fte
(
dev
,
0
,
0
,
ft
,
group_id
,
fte
);
}
}
int
mlx5_cmd_update_fte
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_cmd_update_fte
(
struct
mlx5_core_dev
*
dev
,
...
@@ -327,12 +326,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
...
@@ -327,12 +326,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
struct
mlx5_flow_table
*
ft
,
struct
mlx5_flow_table
*
ft
,
unsigned
int
index
)
unsigned
int
index
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
delete_fte_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
delete_fte_out
)]
=
{
0
};
u32
in
[
MLX5_ST_SZ_DW
(
delete_fte_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
delete_fte_in
)]
=
{
0
};
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
delete_fte_in
,
in
,
opcode
,
MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY
);
MLX5_SET
(
delete_fte_in
,
in
,
opcode
,
MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY
);
MLX5_SET
(
delete_fte_in
,
in
,
table_type
,
ft
->
type
);
MLX5_SET
(
delete_fte_in
,
in
,
table_type
,
ft
->
type
);
...
@@ -343,74 +338,55 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
...
@@ -343,74 +338,55 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
MLX5_SET
(
delete_fte_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
delete_fte_in
,
in
,
other_vport
,
1
);
}
}
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
err
;
}
}
int
mlx5_cmd_fc_alloc
(
struct
mlx5_core_dev
*
dev
,
u16
*
id
)
int
mlx5_cmd_fc_alloc
(
struct
mlx5_core_dev
*
dev
,
u16
*
id
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
alloc_flow_counter_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
alloc_flow_counter_in
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
alloc_flow_counter_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
alloc_flow_counter_out
)]
=
{
0
}
;
int
err
;
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
alloc_flow_counter_in
,
in
,
opcode
,
MLX5_SET
(
alloc_flow_counter_in
,
in
,
opcode
,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER
);
MLX5_CMD_OP_ALLOC_FLOW_COUNTER
);
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
sizeof
(
out
));
if
(
!
err
)
if
(
err
)
*
id
=
MLX5_GET
(
alloc_flow_counter_out
,
out
,
flow_counter_id
);
return
err
;
return
err
;
*
id
=
MLX5_GET
(
alloc_flow_counter_out
,
out
,
flow_counter_id
);
return
0
;
}
}
int
mlx5_cmd_fc_free
(
struct
mlx5_core_dev
*
dev
,
u16
id
)
int
mlx5_cmd_fc_free
(
struct
mlx5_core_dev
*
dev
,
u16
id
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
dealloc_flow_counter_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
dealloc_flow_counter_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
dealloc_flow_counter_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
dealloc_flow_counter_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
dealloc_flow_counter_in
,
in
,
opcode
,
MLX5_SET
(
dealloc_flow_counter_in
,
in
,
opcode
,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER
);
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER
);
MLX5_SET
(
dealloc_flow_counter_in
,
in
,
flow_counter_id
,
id
);
MLX5_SET
(
dealloc_flow_counter_in
,
in
,
flow_counter_id
,
id
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
int
mlx5_cmd_fc_query
(
struct
mlx5_core_dev
*
dev
,
u16
id
,
int
mlx5_cmd_fc_query
(
struct
mlx5_core_dev
*
dev
,
u16
id
,
u64
*
packets
,
u64
*
bytes
)
u64
*
packets
,
u64
*
bytes
)
{
{
u32
out
[
MLX5_ST_SZ_BYTES
(
query_flow_counter_out
)
+
u32
out
[
MLX5_ST_SZ_BYTES
(
query_flow_counter_out
)
+
MLX5_ST_SZ_BYTES
(
traffic_counter
)];
MLX5_ST_SZ_BYTES
(
traffic_counter
)]
=
{
0
}
;
u32
in
[
MLX5_ST_SZ_DW
(
query_flow_counter_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_flow_counter_in
)]
=
{
0
}
;
void
*
stats
;
void
*
stats
;
int
err
=
0
;
int
err
=
0
;
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
query_flow_counter_in
,
in
,
opcode
,
MLX5_SET
(
query_flow_counter_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_FLOW_COUNTER
);
MLX5_CMD_OP_QUERY_FLOW_COUNTER
);
MLX5_SET
(
query_flow_counter_in
,
in
,
op_mod
,
0
);
MLX5_SET
(
query_flow_counter_in
,
in
,
op_mod
,
0
);
MLX5_SET
(
query_flow_counter_in
,
in
,
flow_counter_id
,
id
);
MLX5_SET
(
query_flow_counter_in
,
in
,
flow_counter_id
,
id
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
err
)
if
(
err
)
return
err
;
return
err
;
stats
=
MLX5_ADDR_OF
(
query_flow_counter_out
,
out
,
flow_statistics
);
stats
=
MLX5_ADDR_OF
(
query_flow_counter_out
,
out
,
flow_statistics
);
*
packets
=
MLX5_GET64
(
traffic_counter
,
stats
,
packets
);
*
packets
=
MLX5_GET64
(
traffic_counter
,
stats
,
packets
);
*
bytes
=
MLX5_GET64
(
traffic_counter
,
stats
,
octets
);
*
bytes
=
MLX5_GET64
(
traffic_counter
,
stats
,
octets
);
return
0
;
return
0
;
}
}
...
@@ -448,18 +424,14 @@ void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
...
@@ -448,18 +424,14 @@ void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
int
int
mlx5_cmd_fc_bulk_query
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_cmd_fc_bulk
*
b
)
mlx5_cmd_fc_bulk_query
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_cmd_fc_bulk
*
b
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
query_flow_counter_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_flow_counter_in
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
query_flow_counter_in
,
in
,
opcode
,
MLX5_SET
(
query_flow_counter_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_FLOW_COUNTER
);
MLX5_CMD_OP_QUERY_FLOW_COUNTER
);
MLX5_SET
(
query_flow_counter_in
,
in
,
op_mod
,
0
);
MLX5_SET
(
query_flow_counter_in
,
in
,
op_mod
,
0
);
MLX5_SET
(
query_flow_counter_in
,
in
,
flow_counter_id
,
b
->
id
);
MLX5_SET
(
query_flow_counter_in
,
in
,
flow_counter_id
,
b
->
id
);
MLX5_SET
(
query_flow_counter_in
,
in
,
num_of_counters
,
b
->
num
);
MLX5_SET
(
query_flow_counter_in
,
in
,
num_of_counters
,
b
->
num
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
b
->
out
,
b
->
outlen
);
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
b
->
out
,
b
->
outlen
);
}
}
void
mlx5_cmd_fc_bulk_get
(
struct
mlx5_core_dev
*
dev
,
void
mlx5_cmd_fc_bulk_get
(
struct
mlx5_core_dev
*
dev
,
...
@@ -480,3 +452,51 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
...
@@ -480,3 +452,51 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
*
packets
=
MLX5_GET64
(
traffic_counter
,
stats
,
packets
);
*
packets
=
MLX5_GET64
(
traffic_counter
,
stats
,
packets
);
*
bytes
=
MLX5_GET64
(
traffic_counter
,
stats
,
octets
);
*
bytes
=
MLX5_GET64
(
traffic_counter
,
stats
,
octets
);
}
}
#define MAX_ENCAP_SIZE (128)
int
mlx5_cmd_alloc_encap
(
struct
mlx5_core_dev
*
dev
,
int
header_type
,
size_t
size
,
void
*
encap_header
,
u32
*
encap_id
)
{
u32
out
[
MLX5_ST_SZ_DW
(
alloc_encap_header_out
)];
u32
in
[
MLX5_ST_SZ_DW
(
alloc_encap_header_in
)
+
(
MAX_ENCAP_SIZE
/
sizeof
(
u32
))];
void
*
encap_header_in
=
MLX5_ADDR_OF
(
alloc_encap_header_in
,
in
,
encap_header
);
void
*
header
=
MLX5_ADDR_OF
(
encap_header_in
,
encap_header_in
,
encap_header
);
int
inlen
=
header
-
(
void
*
)
in
+
size
;
int
err
;
if
(
size
>
MAX_ENCAP_SIZE
)
return
-
EINVAL
;
memset
(
in
,
0
,
inlen
);
MLX5_SET
(
alloc_encap_header_in
,
in
,
opcode
,
MLX5_CMD_OP_ALLOC_ENCAP_HEADER
);
MLX5_SET
(
encap_header_in
,
encap_header_in
,
encap_header_size
,
size
);
MLX5_SET
(
encap_header_in
,
encap_header_in
,
header_type
,
header_type
);
memcpy
(
header
,
encap_header
,
size
);
memset
(
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
*
encap_id
=
MLX5_GET
(
alloc_encap_header_out
,
out
,
encap_id
);
return
err
;
}
void
mlx5_cmd_dealloc_encap
(
struct
mlx5_core_dev
*
dev
,
u32
encap_id
)
{
u32
in
[
MLX5_ST_SZ_DW
(
dealloc_encap_header_in
)];
u32
out
[
MLX5_ST_SZ_DW
(
dealloc_encap_header_out
)];
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
dealloc_encap_header_in
,
in
,
opcode
,
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER
);
MLX5_SET
(
dealloc_encap_header_in
,
in
,
encap_id
,
encap_id
);
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
浏览文件 @
d68478da
...
@@ -35,6 +35,7 @@
...
@@ -35,6 +35,7 @@
int
mlx5_cmd_create_flow_table
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_cmd_create_flow_table
(
struct
mlx5_core_dev
*
dev
,
u16
vport
,
u16
vport
,
enum
fs_flow_table_op_mod
op_mod
,
enum
fs_flow_table_type
type
,
unsigned
int
level
,
enum
fs_flow_table_type
type
,
unsigned
int
level
,
unsigned
int
log_size
,
struct
mlx5_flow_table
unsigned
int
log_size
,
struct
mlx5_flow_table
*
next_ft
,
unsigned
int
*
table_id
);
*
next_ft
,
unsigned
int
*
table_id
);
...
@@ -88,4 +89,11 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
...
@@ -88,4 +89,11 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct
mlx5_cmd_fc_bulk
*
b
,
u16
id
,
struct
mlx5_cmd_fc_bulk
*
b
,
u16
id
,
u64
*
packets
,
u64
*
bytes
);
u64
*
packets
,
u64
*
bytes
);
int
mlx5_cmd_alloc_encap
(
struct
mlx5_core_dev
*
dev
,
int
header_type
,
size_t
size
,
void
*
encap_header
,
u32
*
encap_id
);
void
mlx5_cmd_dealloc_encap
(
struct
mlx5_core_dev
*
dev
,
u32
encap_id
);
#endif
#endif
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
浏览文件 @
d68478da
...
@@ -96,6 +96,10 @@
...
@@ -96,6 +96,10 @@
#define OFFLOADS_NUM_PRIOS 1
#define OFFLOADS_NUM_PRIOS 1
#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
#define LAG_PRIO_NUM_LEVELS 1
#define LAG_NUM_PRIOS 1
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
struct
node_caps
{
struct
node_caps
{
size_t
arr_sz
;
size_t
arr_sz
;
long
*
caps
;
long
*
caps
;
...
@@ -111,12 +115,16 @@ static struct init_tree_node {
...
@@ -111,12 +115,16 @@ static struct init_tree_node {
int
num_levels
;
int
num_levels
;
}
root_fs
=
{
}
root_fs
=
{
.
type
=
FS_TYPE_NAMESPACE
,
.
type
=
FS_TYPE_NAMESPACE
,
.
ar_size
=
6
,
.
ar_size
=
7
,
.
children
=
(
struct
init_tree_node
[])
{
.
children
=
(
struct
init_tree_node
[])
{
ADD_PRIO
(
0
,
BY_PASS_MIN_LEVEL
,
0
,
ADD_PRIO
(
0
,
BY_PASS_MIN_LEVEL
,
0
,
FS_CHAINING_CAPS
,
FS_CHAINING_CAPS
,
ADD_NS
(
ADD_MULTIPLE_PRIO
(
MLX5_BY_PASS_NUM_PRIOS
,
ADD_NS
(
ADD_MULTIPLE_PRIO
(
MLX5_BY_PASS_NUM_PRIOS
,
BY_PASS_PRIO_NUM_LEVELS
))),
BY_PASS_PRIO_NUM_LEVELS
))),
ADD_PRIO
(
0
,
LAG_MIN_LEVEL
,
0
,
FS_CHAINING_CAPS
,
ADD_NS
(
ADD_MULTIPLE_PRIO
(
LAG_NUM_PRIOS
,
LAG_PRIO_NUM_LEVELS
))),
ADD_PRIO
(
0
,
OFFLOADS_MIN_LEVEL
,
0
,
{},
ADD_PRIO
(
0
,
OFFLOADS_MIN_LEVEL
,
0
,
{},
ADD_NS
(
ADD_MULTIPLE_PRIO
(
OFFLOADS_NUM_PRIOS
,
OFFLOADS_MAX_FT
))),
ADD_NS
(
ADD_MULTIPLE_PRIO
(
OFFLOADS_NUM_PRIOS
,
OFFLOADS_MAX_FT
))),
ADD_PRIO
(
0
,
ETHTOOL_MIN_LEVEL
,
0
,
ADD_PRIO
(
0
,
ETHTOOL_MIN_LEVEL
,
0
,
...
@@ -345,7 +353,7 @@ static void del_flow_table(struct fs_node *node)
...
@@ -345,7 +353,7 @@ static void del_flow_table(struct fs_node *node)
err
=
mlx5_cmd_destroy_flow_table
(
dev
,
ft
);
err
=
mlx5_cmd_destroy_flow_table
(
dev
,
ft
);
if
(
err
)
if
(
err
)
pr_warn
(
"flow steering can't destroy ft
\n
"
);
mlx5_core_warn
(
dev
,
"flow steering can't destroy ft
\n
"
);
fs_get_obj
(
prio
,
ft
->
node
.
parent
);
fs_get_obj
(
prio
,
ft
->
node
.
parent
);
prio
->
num_ft
--
;
prio
->
num_ft
--
;
}
}
...
@@ -364,7 +372,7 @@ static void del_rule(struct fs_node *node)
...
@@ -364,7 +372,7 @@ static void del_rule(struct fs_node *node)
match_value
=
mlx5_vzalloc
(
match_len
);
match_value
=
mlx5_vzalloc
(
match_len
);
if
(
!
match_value
)
{
if
(
!
match_value
)
{
pr_warn
(
"failed to allocate inbox
\n
"
);
mlx5_core_warn
(
dev
,
"failed to allocate inbox
\n
"
);
return
;
return
;
}
}
...
@@ -387,8 +395,9 @@ static void del_rule(struct fs_node *node)
...
@@ -387,8 +395,9 @@ static void del_rule(struct fs_node *node)
modify_mask
,
modify_mask
,
fte
);
fte
);
if
(
err
)
if
(
err
)
pr_warn
(
"%s can't del rule fg id=%d fte_index=%d
\n
"
,
mlx5_core_warn
(
dev
,
__func__
,
fg
->
id
,
fte
->
index
);
"%s can't del rule fg id=%d fte_index=%d
\n
"
,
__func__
,
fg
->
id
,
fte
->
index
);
}
}
kvfree
(
match_value
);
kvfree
(
match_value
);
}
}
...
@@ -409,8 +418,9 @@ static void del_fte(struct fs_node *node)
...
@@ -409,8 +418,9 @@ static void del_fte(struct fs_node *node)
err
=
mlx5_cmd_delete_fte
(
dev
,
ft
,
err
=
mlx5_cmd_delete_fte
(
dev
,
ft
,
fte
->
index
);
fte
->
index
);
if
(
err
)
if
(
err
)
pr_warn
(
"flow steering can't delete fte in index %d of flow group id %d
\n
"
,
mlx5_core_warn
(
dev
,
fte
->
index
,
fg
->
id
);
"flow steering can't delete fte in index %d of flow group id %d
\n
"
,
fte
->
index
,
fg
->
id
);
fte
->
status
=
0
;
fte
->
status
=
0
;
fg
->
num_ftes
--
;
fg
->
num_ftes
--
;
...
@@ -427,8 +437,8 @@ static void del_flow_group(struct fs_node *node)
...
@@ -427,8 +437,8 @@ static void del_flow_group(struct fs_node *node)
dev
=
get_dev
(
&
ft
->
node
);
dev
=
get_dev
(
&
ft
->
node
);
if
(
mlx5_cmd_destroy_flow_group
(
dev
,
ft
,
fg
->
id
))
if
(
mlx5_cmd_destroy_flow_group
(
dev
,
ft
,
fg
->
id
))
pr_warn
(
"flow steering can't destroy fg %d of ft %d
\n
"
,
mlx5_core_warn
(
dev
,
"flow steering can't destroy fg %d of ft %d
\n
"
,
fg
->
id
,
ft
->
id
);
fg
->
id
,
ft
->
id
);
}
}
static
struct
fs_fte
*
alloc_fte
(
u8
action
,
static
struct
fs_fte
*
alloc_fte
(
u8
action
,
...
@@ -475,7 +485,8 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
...
@@ -475,7 +485,8 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
}
}
static
struct
mlx5_flow_table
*
alloc_flow_table
(
int
level
,
u16
vport
,
int
max_fte
,
static
struct
mlx5_flow_table
*
alloc_flow_table
(
int
level
,
u16
vport
,
int
max_fte
,
enum
fs_flow_table_type
table_type
)
enum
fs_flow_table_type
table_type
,
enum
fs_flow_table_op_mod
op_mod
)
{
{
struct
mlx5_flow_table
*
ft
;
struct
mlx5_flow_table
*
ft
;
...
@@ -485,6 +496,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
...
@@ -485,6 +496,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
ft
->
level
=
level
;
ft
->
level
=
level
;
ft
->
node
.
type
=
FS_TYPE_FLOW_TABLE
;
ft
->
node
.
type
=
FS_TYPE_FLOW_TABLE
;
ft
->
op_mod
=
op_mod
;
ft
->
type
=
table_type
;
ft
->
type
=
table_type
;
ft
->
vport
=
vport
;
ft
->
vport
=
vport
;
ft
->
max_fte
=
max_fte
;
ft
->
max_fte
=
max_fte
;
...
@@ -722,6 +734,7 @@ static void list_add_flow_table(struct mlx5_flow_table *ft,
...
@@ -722,6 +734,7 @@ static void list_add_flow_table(struct mlx5_flow_table *ft,
}
}
static
struct
mlx5_flow_table
*
__mlx5_create_flow_table
(
struct
mlx5_flow_namespace
*
ns
,
static
struct
mlx5_flow_table
*
__mlx5_create_flow_table
(
struct
mlx5_flow_namespace
*
ns
,
enum
fs_flow_table_op_mod
op_mod
,
u16
vport
,
int
prio
,
u16
vport
,
int
prio
,
int
max_fte
,
u32
level
)
int
max_fte
,
u32
level
)
{
{
...
@@ -754,18 +767,19 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
...
@@ -754,18 +767,19 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
level
+=
fs_prio
->
start_level
;
level
+=
fs_prio
->
start_level
;
ft
=
alloc_flow_table
(
level
,
ft
=
alloc_flow_table
(
level
,
vport
,
vport
,
roundup_pow_of_two
(
max_fte
),
max_fte
?
roundup_pow_of_two
(
max_fte
)
:
0
,
root
->
table_type
);
root
->
table_type
,
op_mod
);
if
(
!
ft
)
{
if
(
!
ft
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
goto
unlock_root
;
goto
unlock_root
;
}
}
tree_init_node
(
&
ft
->
node
,
1
,
del_flow_table
);
tree_init_node
(
&
ft
->
node
,
1
,
del_flow_table
);
log_table_sz
=
ilog2
(
ft
->
max_fte
)
;
log_table_sz
=
ft
->
max_fte
?
ilog2
(
ft
->
max_fte
)
:
0
;
next_ft
=
find_next_chained_ft
(
fs_prio
);
next_ft
=
find_next_chained_ft
(
fs_prio
);
err
=
mlx5_cmd_create_flow_table
(
root
->
dev
,
ft
->
vport
,
ft
->
type
,
ft
->
level
,
err
=
mlx5_cmd_create_flow_table
(
root
->
dev
,
ft
->
vport
,
ft
->
op_mod
,
ft
->
type
,
log_table_sz
,
next_ft
,
&
ft
->
id
);
ft
->
level
,
log_table_sz
,
next_ft
,
&
ft
->
id
);
if
(
err
)
if
(
err
)
goto
free_ft
;
goto
free_ft
;
...
@@ -792,15 +806,26 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
...
@@ -792,15 +806,26 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int
prio
,
int
max_fte
,
int
prio
,
int
max_fte
,
u32
level
)
u32
level
)
{
{
return
__mlx5_create_flow_table
(
ns
,
0
,
prio
,
max_fte
,
level
);
return
__mlx5_create_flow_table
(
ns
,
FS_FT_OP_MOD_NORMAL
,
0
,
prio
,
max_fte
,
level
);
}
}
struct
mlx5_flow_table
*
mlx5_create_vport_flow_table
(
struct
mlx5_flow_namespace
*
ns
,
struct
mlx5_flow_table
*
mlx5_create_vport_flow_table
(
struct
mlx5_flow_namespace
*
ns
,
int
prio
,
int
max_fte
,
int
prio
,
int
max_fte
,
u32
level
,
u16
vport
)
u32
level
,
u16
vport
)
{
{
return
__mlx5_create_flow_table
(
ns
,
vport
,
prio
,
max_fte
,
level
);
return
__mlx5_create_flow_table
(
ns
,
FS_FT_OP_MOD_NORMAL
,
vport
,
prio
,
max_fte
,
level
);
}
struct
mlx5_flow_table
*
mlx5_create_lag_demux_flow_table
(
struct
mlx5_flow_namespace
*
ns
,
int
prio
,
u32
level
)
{
return
__mlx5_create_flow_table
(
ns
,
FS_FT_OP_MOD_LAG_DEMUX
,
0
,
prio
,
0
,
level
);
}
}
EXPORT_SYMBOL
(
mlx5_create_lag_demux_flow_table
);
struct
mlx5_flow_table
*
mlx5_create_auto_grouped_flow_table
(
struct
mlx5_flow_namespace
*
ns
,
struct
mlx5_flow_table
*
mlx5_create_auto_grouped_flow_table
(
struct
mlx5_flow_namespace
*
ns
,
int
prio
,
int
prio
,
...
@@ -1379,6 +1404,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
...
@@ -1379,6 +1404,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
switch
(
type
)
{
switch
(
type
)
{
case
MLX5_FLOW_NAMESPACE_BYPASS
:
case
MLX5_FLOW_NAMESPACE_BYPASS
:
case
MLX5_FLOW_NAMESPACE_LAG
:
case
MLX5_FLOW_NAMESPACE_OFFLOADS
:
case
MLX5_FLOW_NAMESPACE_OFFLOADS
:
case
MLX5_FLOW_NAMESPACE_ETHTOOL
:
case
MLX5_FLOW_NAMESPACE_ETHTOOL
:
case
MLX5_FLOW_NAMESPACE_KERNEL
:
case
MLX5_FLOW_NAMESPACE_KERNEL
:
...
@@ -1401,6 +1427,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
...
@@ -1401,6 +1427,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return
&
steering
->
esw_ingress_root_ns
->
ns
;
return
&
steering
->
esw_ingress_root_ns
->
ns
;
else
else
return
NULL
;
return
NULL
;
case
MLX5_FLOW_NAMESPACE_SNIFFER_RX
:
if
(
steering
->
sniffer_rx_root_ns
)
return
&
steering
->
sniffer_rx_root_ns
->
ns
;
else
return
NULL
;
case
MLX5_FLOW_NAMESPACE_SNIFFER_TX
:
if
(
steering
->
sniffer_tx_root_ns
)
return
&
steering
->
sniffer_tx_root_ns
->
ns
;
else
return
NULL
;
default:
default:
return
NULL
;
return
NULL
;
}
}
...
@@ -1700,10 +1736,46 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
...
@@ -1700,10 +1736,46 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_root_ns
(
steering
->
esw_egress_root_ns
);
cleanup_root_ns
(
steering
->
esw_egress_root_ns
);
cleanup_root_ns
(
steering
->
esw_ingress_root_ns
);
cleanup_root_ns
(
steering
->
esw_ingress_root_ns
);
cleanup_root_ns
(
steering
->
fdb_root_ns
);
cleanup_root_ns
(
steering
->
fdb_root_ns
);
cleanup_root_ns
(
steering
->
sniffer_rx_root_ns
);
cleanup_root_ns
(
steering
->
sniffer_tx_root_ns
);
mlx5_cleanup_fc_stats
(
dev
);
mlx5_cleanup_fc_stats
(
dev
);
kfree
(
steering
);
kfree
(
steering
);
}
}
static
int
init_sniffer_tx_root_ns
(
struct
mlx5_flow_steering
*
steering
)
{
struct
fs_prio
*
prio
;
steering
->
sniffer_tx_root_ns
=
create_root_ns
(
steering
,
FS_FT_SNIFFER_TX
);
if
(
!
steering
->
sniffer_tx_root_ns
)
return
-
ENOMEM
;
/* Create single prio */
prio
=
fs_create_prio
(
&
steering
->
sniffer_tx_root_ns
->
ns
,
0
,
1
);
if
(
IS_ERR
(
prio
))
{
cleanup_root_ns
(
steering
->
sniffer_tx_root_ns
);
return
PTR_ERR
(
prio
);
}
return
0
;
}
static
int
init_sniffer_rx_root_ns
(
struct
mlx5_flow_steering
*
steering
)
{
struct
fs_prio
*
prio
;
steering
->
sniffer_rx_root_ns
=
create_root_ns
(
steering
,
FS_FT_SNIFFER_RX
);
if
(
!
steering
->
sniffer_rx_root_ns
)
return
-
ENOMEM
;
/* Create single prio */
prio
=
fs_create_prio
(
&
steering
->
sniffer_rx_root_ns
->
ns
,
0
,
1
);
if
(
IS_ERR
(
prio
))
{
cleanup_root_ns
(
steering
->
sniffer_rx_root_ns
);
return
PTR_ERR
(
prio
);
}
return
0
;
}
static
int
init_fdb_root_ns
(
struct
mlx5_flow_steering
*
steering
)
static
int
init_fdb_root_ns
(
struct
mlx5_flow_steering
*
steering
)
{
{
struct
fs_prio
*
prio
;
struct
fs_prio
*
prio
;
...
@@ -1800,6 +1872,18 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
...
@@ -1800,6 +1872,18 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
}
}
}
}
if
(
MLX5_CAP_FLOWTABLE_SNIFFER_RX
(
dev
,
ft_support
))
{
err
=
init_sniffer_rx_root_ns
(
steering
);
if
(
err
)
goto
err
;
}
if
(
MLX5_CAP_FLOWTABLE_SNIFFER_TX
(
dev
,
ft_support
))
{
err
=
init_sniffer_tx_root_ns
(
steering
);
if
(
err
)
goto
err
;
}
return
0
;
return
0
;
err:
err:
mlx5_cleanup_fs
(
dev
);
mlx5_cleanup_fs
(
dev
);
...
...
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
浏览文件 @
d68478da
...
@@ -49,6 +49,13 @@ enum fs_flow_table_type {
...
@@ -49,6 +49,13 @@ enum fs_flow_table_type {
FS_FT_ESW_EGRESS_ACL
=
0x2
,
FS_FT_ESW_EGRESS_ACL
=
0x2
,
FS_FT_ESW_INGRESS_ACL
=
0x3
,
FS_FT_ESW_INGRESS_ACL
=
0x3
,
FS_FT_FDB
=
0X4
,
FS_FT_FDB
=
0X4
,
FS_FT_SNIFFER_RX
=
0X5
,
FS_FT_SNIFFER_TX
=
0X6
,
};
enum
fs_flow_table_op_mod
{
FS_FT_OP_MOD_NORMAL
,
FS_FT_OP_MOD_LAG_DEMUX
,
};
};
enum
fs_fte_status
{
enum
fs_fte_status
{
...
@@ -61,6 +68,8 @@ struct mlx5_flow_steering {
...
@@ -61,6 +68,8 @@ struct mlx5_flow_steering {
struct
mlx5_flow_root_namespace
*
fdb_root_ns
;
struct
mlx5_flow_root_namespace
*
fdb_root_ns
;
struct
mlx5_flow_root_namespace
*
esw_egress_root_ns
;
struct
mlx5_flow_root_namespace
*
esw_egress_root_ns
;
struct
mlx5_flow_root_namespace
*
esw_ingress_root_ns
;
struct
mlx5_flow_root_namespace
*
esw_ingress_root_ns
;
struct
mlx5_flow_root_namespace
*
sniffer_tx_root_ns
;
struct
mlx5_flow_root_namespace
*
sniffer_rx_root_ns
;
};
};
struct
fs_node
{
struct
fs_node
{
...
@@ -93,6 +102,7 @@ struct mlx5_flow_table {
...
@@ -93,6 +102,7 @@ struct mlx5_flow_table {
unsigned
int
max_fte
;
unsigned
int
max_fte
;
unsigned
int
level
;
unsigned
int
level
;
enum
fs_flow_table_type
type
;
enum
fs_flow_table_type
type
;
enum
fs_flow_table_op_mod
op_mod
;
struct
{
struct
{
bool
active
;
bool
active
;
unsigned
int
required_groups
;
unsigned
int
required_groups
;
...
...
drivers/net/ethernet/mellanox/mlx5/core/fw.c
浏览文件 @
d68478da
...
@@ -38,13 +38,10 @@
...
@@ -38,13 +38,10 @@
static
int
mlx5_cmd_query_adapter
(
struct
mlx5_core_dev
*
dev
,
u32
*
out
,
static
int
mlx5_cmd_query_adapter
(
struct
mlx5_core_dev
*
dev
,
u32
*
out
,
int
outlen
)
int
outlen
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
query_adapter_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_adapter_in
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
query_adapter_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_ADAPTER
);
MLX5_SET
(
query_adapter_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_ADAPTER
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
}
}
int
mlx5_query_board_id
(
struct
mlx5_core_dev
*
dev
)
int
mlx5_query_board_id
(
struct
mlx5_core_dev
*
dev
)
...
@@ -162,38 +159,18 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
...
@@ -162,38 +159,18 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
int
mlx5_cmd_init_hca
(
struct
mlx5_core_dev
*
dev
)
int
mlx5_cmd_init_hca
(
struct
mlx5_core_dev
*
dev
)
{
{
struct
mlx5_cmd_init_hca_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
init_hca_out
)]
=
{
0
};
struct
mlx5_cmd_init_hca_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
init_hca_in
)]
=
{
0
};
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
memset
(
&
out
,
0
,
sizeof
(
out
));
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_INIT_HCA
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
if
(
out
.
hdr
.
status
)
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
return
err
;
MLX5_SET
(
init_hca_in
,
in
,
opcode
,
MLX5_CMD_OP_INIT_HCA
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
int
mlx5_cmd_teardown_hca
(
struct
mlx5_core_dev
*
dev
)
int
mlx5_cmd_teardown_hca
(
struct
mlx5_core_dev
*
dev
)
{
{
struct
mlx5_cmd_teardown_hca_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
teardown_hca_out
)]
=
{
0
};
struct
mlx5_cmd_teardown_hca_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
teardown_hca_in
)]
=
{
0
};
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
teardown_hca_in
,
in
,
opcode
,
MLX5_CMD_OP_TEARDOWN_HCA
);
memset
(
&
out
,
0
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_TEARDOWN_HCA
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
if
(
out
.
hdr
.
status
)
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
return
err
;
}
}
drivers/net/ethernet/mellanox/mlx5/core/lag.c
0 → 100644
浏览文件 @
d68478da
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/netdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
enum
{
MLX5_LAG_FLAG_BONDED
=
1
<<
0
,
};
struct
lag_func
{
struct
mlx5_core_dev
*
dev
;
struct
net_device
*
netdev
;
};
/* Used for collection of netdev event info. */
struct
lag_tracker
{
enum
netdev_lag_tx_type
tx_type
;
struct
netdev_lag_lower_state_info
netdev_state
[
MLX5_MAX_PORTS
];
bool
is_bonded
;
};
/* LAG data of a ConnectX card.
* It serves both its phys functions.
*/
struct
mlx5_lag
{
u8
flags
;
u8
v2p_map
[
MLX5_MAX_PORTS
];
struct
lag_func
pf
[
MLX5_MAX_PORTS
];
struct
lag_tracker
tracker
;
struct
delayed_work
bond_work
;
struct
notifier_block
nb
;
};
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
* under it).
*/
static
DEFINE_MUTEX
(
lag_mutex
);
static
int
mlx5_cmd_create_lag
(
struct
mlx5_core_dev
*
dev
,
u8
remap_port1
,
u8
remap_port2
)
{
u32
in
[
MLX5_ST_SZ_DW
(
create_lag_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
create_lag_out
)]
=
{
0
};
void
*
lag_ctx
=
MLX5_ADDR_OF
(
create_lag_in
,
in
,
ctx
);
MLX5_SET
(
create_lag_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_LAG
);
MLX5_SET
(
lagc
,
lag_ctx
,
tx_remap_affinity_1
,
remap_port1
);
MLX5_SET
(
lagc
,
lag_ctx
,
tx_remap_affinity_2
,
remap_port2
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
static
int
mlx5_cmd_modify_lag
(
struct
mlx5_core_dev
*
dev
,
u8
remap_port1
,
u8
remap_port2
)
{
u32
in
[
MLX5_ST_SZ_DW
(
modify_lag_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
modify_lag_out
)]
=
{
0
};
void
*
lag_ctx
=
MLX5_ADDR_OF
(
modify_lag_in
,
in
,
ctx
);
MLX5_SET
(
modify_lag_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_LAG
);
MLX5_SET
(
modify_lag_in
,
in
,
field_select
,
0x1
);
MLX5_SET
(
lagc
,
lag_ctx
,
tx_remap_affinity_1
,
remap_port1
);
MLX5_SET
(
lagc
,
lag_ctx
,
tx_remap_affinity_2
,
remap_port2
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
static
int
mlx5_cmd_destroy_lag
(
struct
mlx5_core_dev
*
dev
)
{
u32
in
[
MLX5_ST_SZ_DW
(
destroy_lag_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
destroy_lag_out
)]
=
{
0
};
MLX5_SET
(
destroy_lag_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_LAG
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
int
mlx5_cmd_create_vport_lag
(
struct
mlx5_core_dev
*
dev
)
{
u32
in
[
MLX5_ST_SZ_DW
(
create_vport_lag_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
create_vport_lag_out
)]
=
{
0
};
MLX5_SET
(
create_vport_lag_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_VPORT_LAG
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
EXPORT_SYMBOL
(
mlx5_cmd_create_vport_lag
);
int
mlx5_cmd_destroy_vport_lag
(
struct
mlx5_core_dev
*
dev
)
{
u32
in
[
MLX5_ST_SZ_DW
(
destroy_vport_lag_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
destroy_vport_lag_out
)]
=
{
0
};
MLX5_SET
(
destroy_vport_lag_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_VPORT_LAG
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
EXPORT_SYMBOL
(
mlx5_cmd_destroy_vport_lag
);
static
struct
mlx5_lag
*
mlx5_lag_dev_get
(
struct
mlx5_core_dev
*
dev
)
{
return
dev
->
priv
.
lag
;
}
static
int
mlx5_lag_dev_get_netdev_idx
(
struct
mlx5_lag
*
ldev
,
struct
net_device
*
ndev
)
{
int
i
;
for
(
i
=
0
;
i
<
MLX5_MAX_PORTS
;
i
++
)
if
(
ldev
->
pf
[
i
].
netdev
==
ndev
)
return
i
;
return
-
1
;
}
static
bool
mlx5_lag_is_bonded
(
struct
mlx5_lag
*
ldev
)
{
return
!!
(
ldev
->
flags
&
MLX5_LAG_FLAG_BONDED
);
}
static
void
mlx5_infer_tx_affinity_mapping
(
struct
lag_tracker
*
tracker
,
u8
*
port1
,
u8
*
port2
)
{
if
(
tracker
->
tx_type
==
NETDEV_LAG_TX_TYPE_ACTIVEBACKUP
)
{
if
(
tracker
->
netdev_state
[
0
].
tx_enabled
)
{
*
port1
=
1
;
*
port2
=
1
;
}
else
{
*
port1
=
2
;
*
port2
=
2
;
}
}
else
{
*
port1
=
1
;
*
port2
=
2
;
if
(
!
tracker
->
netdev_state
[
0
].
link_up
)
*
port1
=
2
;
else
if
(
!
tracker
->
netdev_state
[
1
].
link_up
)
*
port2
=
1
;
}
}
static
void
mlx5_activate_lag
(
struct
mlx5_lag
*
ldev
,
struct
lag_tracker
*
tracker
)
{
struct
mlx5_core_dev
*
dev0
=
ldev
->
pf
[
0
].
dev
;
int
err
;
ldev
->
flags
|=
MLX5_LAG_FLAG_BONDED
;
mlx5_infer_tx_affinity_mapping
(
tracker
,
&
ldev
->
v2p_map
[
0
],
&
ldev
->
v2p_map
[
1
]);
err
=
mlx5_cmd_create_lag
(
dev0
,
ldev
->
v2p_map
[
0
],
ldev
->
v2p_map
[
1
]);
if
(
err
)
mlx5_core_err
(
dev0
,
"Failed to create LAG (%d)
\n
"
,
err
);
}
static
void
mlx5_deactivate_lag
(
struct
mlx5_lag
*
ldev
)
{
struct
mlx5_core_dev
*
dev0
=
ldev
->
pf
[
0
].
dev
;
int
err
;
ldev
->
flags
&=
~
MLX5_LAG_FLAG_BONDED
;
err
=
mlx5_cmd_destroy_lag
(
dev0
);
if
(
err
)
mlx5_core_err
(
dev0
,
"Failed to destroy LAG (%d)
\n
"
,
err
);
}
static
void
mlx5_do_bond
(
struct
mlx5_lag
*
ldev
)
{
struct
mlx5_core_dev
*
dev0
=
ldev
->
pf
[
0
].
dev
;
struct
mlx5_core_dev
*
dev1
=
ldev
->
pf
[
1
].
dev
;
struct
lag_tracker
tracker
;
u8
v2p_port1
,
v2p_port2
;
int
i
,
err
;
if
(
!
dev0
||
!
dev1
)
return
;
mutex_lock
(
&
lag_mutex
);
tracker
=
ldev
->
tracker
;
mutex_unlock
(
&
lag_mutex
);
if
(
tracker
.
is_bonded
&&
!
mlx5_lag_is_bonded
(
ldev
))
{
if
(
mlx5_sriov_is_enabled
(
dev0
)
||
mlx5_sriov_is_enabled
(
dev1
))
{
mlx5_core_warn
(
dev0
,
"LAG is not supported with SRIOV"
);
return
;
}
for
(
i
=
0
;
i
<
MLX5_MAX_PORTS
;
i
++
)
mlx5_remove_dev_by_protocol
(
ldev
->
pf
[
i
].
dev
,
MLX5_INTERFACE_PROTOCOL_IB
);
mlx5_activate_lag
(
ldev
,
&
tracker
);
mlx5_add_dev_by_protocol
(
dev0
,
MLX5_INTERFACE_PROTOCOL_IB
);
mlx5_nic_vport_enable_roce
(
dev1
);
}
else
if
(
tracker
.
is_bonded
&&
mlx5_lag_is_bonded
(
ldev
))
{
mlx5_infer_tx_affinity_mapping
(
&
tracker
,
&
v2p_port1
,
&
v2p_port2
);
if
((
v2p_port1
!=
ldev
->
v2p_map
[
0
])
||
(
v2p_port2
!=
ldev
->
v2p_map
[
1
]))
{
ldev
->
v2p_map
[
0
]
=
v2p_port1
;
ldev
->
v2p_map
[
1
]
=
v2p_port2
;
err
=
mlx5_cmd_modify_lag
(
dev0
,
v2p_port1
,
v2p_port2
);
if
(
err
)
mlx5_core_err
(
dev0
,
"Failed to modify LAG (%d)
\n
"
,
err
);
}
}
else
if
(
!
tracker
.
is_bonded
&&
mlx5_lag_is_bonded
(
ldev
))
{
mlx5_remove_dev_by_protocol
(
dev0
,
MLX5_INTERFACE_PROTOCOL_IB
);
mlx5_nic_vport_disable_roce
(
dev1
);
mlx5_deactivate_lag
(
ldev
);
for
(
i
=
0
;
i
<
MLX5_MAX_PORTS
;
i
++
)
if
(
ldev
->
pf
[
i
].
dev
)
mlx5_add_dev_by_protocol
(
ldev
->
pf
[
i
].
dev
,
MLX5_INTERFACE_PROTOCOL_IB
);
}
}
static
void
mlx5_queue_bond_work
(
struct
mlx5_lag
*
ldev
,
unsigned
long
delay
)
{
schedule_delayed_work
(
&
ldev
->
bond_work
,
delay
);
}
static
void
mlx5_do_bond_work
(
struct
work_struct
*
work
)
{
struct
delayed_work
*
delayed_work
=
to_delayed_work
(
work
);
struct
mlx5_lag
*
ldev
=
container_of
(
delayed_work
,
struct
mlx5_lag
,
bond_work
);
int
status
;
status
=
mutex_trylock
(
&
mlx5_intf_mutex
);
if
(
!
status
)
{
/* 1 sec delay. */
mlx5_queue_bond_work
(
ldev
,
HZ
);
return
;
}
mlx5_do_bond
(
ldev
);
mutex_unlock
(
&
mlx5_intf_mutex
);
}
static
int
mlx5_handle_changeupper_event
(
struct
mlx5_lag
*
ldev
,
struct
lag_tracker
*
tracker
,
struct
net_device
*
ndev
,
struct
netdev_notifier_changeupper_info
*
info
)
{
struct
net_device
*
upper
=
info
->
upper_dev
,
*
ndev_tmp
;
struct
netdev_lag_upper_info
*
lag_upper_info
;
bool
is_bonded
;
int
bond_status
=
0
;
int
num_slaves
=
0
;
int
idx
;
if
(
!
netif_is_lag_master
(
upper
))
return
0
;
lag_upper_info
=
info
->
upper_info
;
/* The event may still be of interest if the slave does not belong to
* us, but is enslaved to a master which has one or more of our netdevs
* as slaves (e.g., if a new slave is added to a master that bonds two
* of our netdevs, we should unbond).
*/
rcu_read_lock
();
for_each_netdev_in_bond_rcu
(
upper
,
ndev_tmp
)
{
idx
=
mlx5_lag_dev_get_netdev_idx
(
ldev
,
ndev_tmp
);
if
(
idx
>
-
1
)
bond_status
|=
(
1
<<
idx
);
num_slaves
++
;
}
rcu_read_unlock
();
/* None of this lagdev's netdevs are slaves of this master. */
if
(
!
(
bond_status
&
0x3
))
return
0
;
if
(
lag_upper_info
)
tracker
->
tx_type
=
lag_upper_info
->
tx_type
;
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
* of the same lag master, and only them.
* Lag mode must be activebackup or hash.
*/
is_bonded
=
(
num_slaves
==
MLX5_MAX_PORTS
)
&&
(
bond_status
==
0x3
)
&&
((
tracker
->
tx_type
==
NETDEV_LAG_TX_TYPE_ACTIVEBACKUP
)
||
(
tracker
->
tx_type
==
NETDEV_LAG_TX_TYPE_HASH
));
if
(
tracker
->
is_bonded
!=
is_bonded
)
{
tracker
->
is_bonded
=
is_bonded
;
return
1
;
}
return
0
;
}
static
int
mlx5_handle_changelowerstate_event
(
struct
mlx5_lag
*
ldev
,
struct
lag_tracker
*
tracker
,
struct
net_device
*
ndev
,
struct
netdev_notifier_changelowerstate_info
*
info
)
{
struct
netdev_lag_lower_state_info
*
lag_lower_info
;
int
idx
;
if
(
!
netif_is_lag_port
(
ndev
))
return
0
;
idx
=
mlx5_lag_dev_get_netdev_idx
(
ldev
,
ndev
);
if
(
idx
==
-
1
)
return
0
;
/* This information is used to determine virtual to physical
* port mapping.
*/
lag_lower_info
=
info
->
lower_state_info
;
if
(
!
lag_lower_info
)
return
0
;
tracker
->
netdev_state
[
idx
]
=
*
lag_lower_info
;
return
1
;
}
static
int
mlx5_lag_netdev_event
(
struct
notifier_block
*
this
,
unsigned
long
event
,
void
*
ptr
)
{
struct
net_device
*
ndev
=
netdev_notifier_info_to_dev
(
ptr
);
struct
lag_tracker
tracker
;
struct
mlx5_lag
*
ldev
;
int
changed
=
0
;
if
(
!
net_eq
(
dev_net
(
ndev
),
&
init_net
))
return
NOTIFY_DONE
;
if
((
event
!=
NETDEV_CHANGEUPPER
)
&&
(
event
!=
NETDEV_CHANGELOWERSTATE
))
return
NOTIFY_DONE
;
ldev
=
container_of
(
this
,
struct
mlx5_lag
,
nb
);
tracker
=
ldev
->
tracker
;
switch
(
event
)
{
case
NETDEV_CHANGEUPPER
:
changed
=
mlx5_handle_changeupper_event
(
ldev
,
&
tracker
,
ndev
,
ptr
);
break
;
case
NETDEV_CHANGELOWERSTATE
:
changed
=
mlx5_handle_changelowerstate_event
(
ldev
,
&
tracker
,
ndev
,
ptr
);
break
;
}
mutex_lock
(
&
lag_mutex
);
ldev
->
tracker
=
tracker
;
mutex_unlock
(
&
lag_mutex
);
if
(
changed
)
mlx5_queue_bond_work
(
ldev
,
0
);
return
NOTIFY_DONE
;
}
static
struct
mlx5_lag
*
mlx5_lag_dev_alloc
(
void
)
{
struct
mlx5_lag
*
ldev
;
ldev
=
kzalloc
(
sizeof
(
*
ldev
),
GFP_KERNEL
);
if
(
!
ldev
)
return
NULL
;
INIT_DELAYED_WORK
(
&
ldev
->
bond_work
,
mlx5_do_bond_work
);
return
ldev
;
}
static
void
mlx5_lag_dev_free
(
struct
mlx5_lag
*
ldev
)
{
kfree
(
ldev
);
}
static
void
mlx5_lag_dev_add_pf
(
struct
mlx5_lag
*
ldev
,
struct
mlx5_core_dev
*
dev
,
struct
net_device
*
netdev
)
{
unsigned
int
fn
=
PCI_FUNC
(
dev
->
pdev
->
devfn
);
if
(
fn
>=
MLX5_MAX_PORTS
)
return
;
mutex_lock
(
&
lag_mutex
);
ldev
->
pf
[
fn
].
dev
=
dev
;
ldev
->
pf
[
fn
].
netdev
=
netdev
;
ldev
->
tracker
.
netdev_state
[
fn
].
link_up
=
0
;
ldev
->
tracker
.
netdev_state
[
fn
].
tx_enabled
=
0
;
dev
->
priv
.
lag
=
ldev
;
mutex_unlock
(
&
lag_mutex
);
}
static
void
mlx5_lag_dev_remove_pf
(
struct
mlx5_lag
*
ldev
,
struct
mlx5_core_dev
*
dev
)
{
int
i
;
for
(
i
=
0
;
i
<
MLX5_MAX_PORTS
;
i
++
)
if
(
ldev
->
pf
[
i
].
dev
==
dev
)
break
;
if
(
i
==
MLX5_MAX_PORTS
)
return
;
mutex_lock
(
&
lag_mutex
);
memset
(
&
ldev
->
pf
[
i
],
0
,
sizeof
(
*
ldev
->
pf
));
dev
->
priv
.
lag
=
NULL
;
mutex_unlock
(
&
lag_mutex
);
}
static
u16
mlx5_gen_pci_id
(
struct
mlx5_core_dev
*
dev
)
{
return
(
u16
)((
dev
->
pdev
->
bus
->
number
<<
8
)
|
PCI_SLOT
(
dev
->
pdev
->
devfn
));
}
/* Must be called with intf_mutex held */
void
mlx5_lag_add
(
struct
mlx5_core_dev
*
dev
,
struct
net_device
*
netdev
)
{
struct
mlx5_lag
*
ldev
=
NULL
;
struct
mlx5_core_dev
*
tmp_dev
;
struct
mlx5_priv
*
priv
;
u16
pci_id
;
if
(
!
MLX5_CAP_GEN
(
dev
,
vport_group_manager
)
||
!
MLX5_CAP_GEN
(
dev
,
lag_master
)
||
(
MLX5_CAP_GEN
(
dev
,
num_lag_ports
)
!=
MLX5_MAX_PORTS
))
return
;
pci_id
=
mlx5_gen_pci_id
(
dev
);
mlx5_core_for_each_priv
(
priv
)
{
tmp_dev
=
container_of
(
priv
,
struct
mlx5_core_dev
,
priv
);
if
((
dev
!=
tmp_dev
)
&&
(
mlx5_gen_pci_id
(
tmp_dev
)
==
pci_id
))
{
ldev
=
tmp_dev
->
priv
.
lag
;
break
;
}
}
if
(
!
ldev
)
{
ldev
=
mlx5_lag_dev_alloc
();
if
(
!
ldev
)
{
mlx5_core_err
(
dev
,
"Failed to alloc lag dev
\n
"
);
return
;
}
}
mlx5_lag_dev_add_pf
(
ldev
,
dev
,
netdev
);
if
(
!
ldev
->
nb
.
notifier_call
)
{
ldev
->
nb
.
notifier_call
=
mlx5_lag_netdev_event
;
if
(
register_netdevice_notifier
(
&
ldev
->
nb
))
{
ldev
->
nb
.
notifier_call
=
NULL
;
mlx5_core_err
(
dev
,
"Failed to register LAG netdev notifier
\n
"
);
}
}
}
/* Must be called with intf_mutex held */
void
mlx5_lag_remove
(
struct
mlx5_core_dev
*
dev
)
{
struct
mlx5_lag
*
ldev
;
int
i
;
ldev
=
mlx5_lag_dev_get
(
dev
);
if
(
!
ldev
)
return
;
if
(
mlx5_lag_is_bonded
(
ldev
))
mlx5_deactivate_lag
(
ldev
);
mlx5_lag_dev_remove_pf
(
ldev
,
dev
);
for
(
i
=
0
;
i
<
MLX5_MAX_PORTS
;
i
++
)
if
(
ldev
->
pf
[
i
].
dev
)
break
;
if
(
i
==
MLX5_MAX_PORTS
)
{
if
(
ldev
->
nb
.
notifier_call
)
unregister_netdevice_notifier
(
&
ldev
->
nb
);
cancel_delayed_work_sync
(
&
ldev
->
bond_work
);
mlx5_lag_dev_free
(
ldev
);
}
}
bool
mlx5_lag_is_active
(
struct
mlx5_core_dev
*
dev
)
{
struct
mlx5_lag
*
ldev
;
bool
res
;
mutex_lock
(
&
lag_mutex
);
ldev
=
mlx5_lag_dev_get
(
dev
);
res
=
ldev
&&
mlx5_lag_is_bonded
(
ldev
);
mutex_unlock
(
&
lag_mutex
);
return
res
;
}
EXPORT_SYMBOL
(
mlx5_lag_is_active
);
struct
net_device
*
mlx5_lag_get_roce_netdev
(
struct
mlx5_core_dev
*
dev
)
{
struct
net_device
*
ndev
=
NULL
;
struct
mlx5_lag
*
ldev
;
mutex_lock
(
&
lag_mutex
);
ldev
=
mlx5_lag_dev_get
(
dev
);
if
(
!
(
ldev
&&
mlx5_lag_is_bonded
(
ldev
)))
goto
unlock
;
if
(
ldev
->
tracker
.
tx_type
==
NETDEV_LAG_TX_TYPE_ACTIVEBACKUP
)
{
ndev
=
ldev
->
tracker
.
netdev_state
[
0
].
tx_enabled
?
ldev
->
pf
[
0
].
netdev
:
ldev
->
pf
[
1
].
netdev
;
}
else
{
ndev
=
ldev
->
pf
[
0
].
netdev
;
}
if
(
ndev
)
dev_hold
(
ndev
);
unlock:
mutex_unlock
(
&
lag_mutex
);
return
ndev
;
}
EXPORT_SYMBOL
(
mlx5_lag_get_roce_netdev
);
bool
mlx5_lag_intf_add
(
struct
mlx5_interface
*
intf
,
struct
mlx5_priv
*
priv
)
{
struct
mlx5_core_dev
*
dev
=
container_of
(
priv
,
struct
mlx5_core_dev
,
priv
);
struct
mlx5_lag
*
ldev
;
if
(
intf
->
protocol
!=
MLX5_INTERFACE_PROTOCOL_IB
)
return
true
;
ldev
=
mlx5_lag_dev_get
(
dev
);
if
(
!
ldev
||
!
mlx5_lag_is_bonded
(
ldev
)
||
ldev
->
pf
[
0
].
dev
==
dev
)
return
true
;
/* If bonded, we do not add an IB device for PF1. */
return
false
;
}
drivers/net/ethernet/mellanox/mlx5/core/mad.c
浏览文件 @
d68478da
...
@@ -39,36 +39,33 @@
...
@@ -39,36 +39,33 @@
int
mlx5_core_mad_ifc
(
struct
mlx5_core_dev
*
dev
,
const
void
*
inb
,
void
*
outb
,
int
mlx5_core_mad_ifc
(
struct
mlx5_core_dev
*
dev
,
const
void
*
inb
,
void
*
outb
,
u16
opmod
,
u8
port
)
u16
opmod
,
u8
port
)
{
{
struct
mlx5_mad_ifc_mbox_in
*
in
=
NULL
;
int
outlen
=
MLX5_ST_SZ_BYTES
(
mad_ifc_out
);
struct
mlx5_mad_ifc_mbox_out
*
out
=
NULL
;
int
inlen
=
MLX5_ST_SZ_BYTES
(
mad_ifc_in
);
int
err
;
int
err
=
-
ENOMEM
;
void
*
data
;
void
*
resp
;
u32
*
out
;
u32
*
in
;
in
=
kzalloc
(
sizeof
(
*
in
),
GFP_KERNEL
);
in
=
kzalloc
(
inlen
,
GFP_KERNEL
);
if
(
!
in
)
out
=
kzalloc
(
outlen
,
GFP_KERNEL
);
return
-
ENOMEM
;
if
(
!
in
||
!
out
)
out
=
kzalloc
(
sizeof
(
*
out
),
GFP_KERNEL
);
if
(
!
out
)
{
err
=
-
ENOMEM
;
goto
out
;
goto
out
;
}
in
->
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_MAD_IFC
);
MLX5_SET
(
mad_ifc_in
,
in
,
opcode
,
MLX5_CMD_OP_MAD_IFC
);
in
->
hdr
.
opmod
=
cpu_to_be16
(
opmod
);
MLX5_SET
(
mad_ifc_in
,
in
,
op_mod
,
opmod
);
in
->
port
=
port
;
MLX5_SET
(
mad_ifc_in
,
in
,
port
,
port
)
;
memcpy
(
in
->
data
,
inb
,
sizeof
(
in
->
data
));
data
=
MLX5_ADDR_OF
(
mad_ifc_in
,
in
,
mad
);
memcpy
(
data
,
inb
,
MLX5_FLD_SZ_BYTES
(
mad_ifc_in
,
mad
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
*
in
),
out
,
sizeof
(
*
out
)
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
outlen
);
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
if
(
out
->
hdr
.
status
)
{
resp
=
MLX5_ADDR_OF
(
mad_ifc_out
,
out
,
response_mad_packet
);
err
=
mlx5_cmd_status_to_err
(
&
out
->
hdr
);
memcpy
(
outb
,
resp
,
goto
out
;
MLX5_FLD_SZ_BYTES
(
mad_ifc_out
,
response_mad_packet
));
}
memcpy
(
outb
,
out
->
data
,
sizeof
(
out
->
data
));
out:
out:
kfree
(
out
);
kfree
(
out
);
...
...
drivers/net/ethernet/mellanox/mlx5/core/main.c
浏览文件 @
d68478da
...
@@ -73,8 +73,9 @@ module_param_named(prof_sel, prof_sel, int, 0444);
...
@@ -73,8 +73,9 @@ module_param_named(prof_sel, prof_sel, int, 0444);
MODULE_PARM_DESC
(
prof_sel
,
"profile selector. Valid range 0 - 2"
);
MODULE_PARM_DESC
(
prof_sel
,
"profile selector. Valid range 0 - 2"
);
static
LIST_HEAD
(
intf_list
);
static
LIST_HEAD
(
intf_list
);
static
LIST_HEAD
(
dev_list
);
static
DEFINE_MUTEX
(
intf_mutex
);
LIST_HEAD
(
mlx5_dev_list
);
DEFINE_MUTEX
(
mlx5_intf_mutex
);
struct
mlx5_device_context
{
struct
mlx5_device_context
{
struct
list_head
list
;
struct
list_head
list
;
...
@@ -324,7 +325,7 @@ enum {
...
@@ -324,7 +325,7 @@ enum {
MLX5_DEV_CAP_FLAG_DCT
,
MLX5_DEV_CAP_FLAG_DCT
,
};
};
static
u16
to_fw_pkey_sz
(
u32
size
)
static
u16
to_fw_pkey_sz
(
struct
mlx5_core_dev
*
dev
,
u32
size
)
{
{
switch
(
size
)
{
switch
(
size
)
{
case
128
:
case
128
:
...
@@ -340,7 +341,7 @@ static u16 to_fw_pkey_sz(u32 size)
...
@@ -340,7 +341,7 @@ static u16 to_fw_pkey_sz(u32 size)
case
4096
:
case
4096
:
return
5
;
return
5
;
default:
default:
pr_warn
(
"invalid pkey table size %d
\n
"
,
size
);
mlx5_core_warn
(
dev
,
"invalid pkey table size %d
\n
"
,
size
);
return
0
;
return
0
;
}
}
}
}
...
@@ -363,10 +364,6 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
...
@@ -363,10 +364,6 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
MLX5_SET
(
query_hca_cap_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_HCA_CAP
);
MLX5_SET
(
query_hca_cap_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_HCA_CAP
);
MLX5_SET
(
query_hca_cap_in
,
in
,
op_mod
,
opmod
);
MLX5_SET
(
query_hca_cap_in
,
in
,
op_mod
,
opmod
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
out_sz
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
out_sz
);
if
(
err
)
goto
query_ex
;
err
=
mlx5_cmd_status_to_err_v2
(
out
);
if
(
err
)
{
if
(
err
)
{
mlx5_core_warn
(
dev
,
mlx5_core_warn
(
dev
,
"QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)
\n
"
,
"QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)
\n
"
,
...
@@ -409,20 +406,11 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
...
@@ -409,20 +406,11 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
static
int
set_caps
(
struct
mlx5_core_dev
*
dev
,
void
*
in
,
int
in_sz
,
int
opmod
)
static
int
set_caps
(
struct
mlx5_core_dev
*
dev
,
void
*
in
,
int
in_sz
,
int
opmod
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
set_hca_cap_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
set_hca_cap_out
)]
=
{
0
};
int
err
;
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
set_hca_cap_in
,
in
,
opcode
,
MLX5_CMD_OP_SET_HCA_CAP
);
MLX5_SET
(
set_hca_cap_in
,
in
,
opcode
,
MLX5_CMD_OP_SET_HCA_CAP
);
MLX5_SET
(
set_hca_cap_in
,
in
,
op_mod
,
opmod
<<
1
);
MLX5_SET
(
set_hca_cap_in
,
in
,
op_mod
,
opmod
<<
1
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
in_sz
,
out
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
,
in
,
in_sz
,
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
err
=
mlx5_cmd_status_to_err_v2
(
out
);
return
err
;
}
}
static
int
handle_hca_cap_atomic
(
struct
mlx5_core_dev
*
dev
)
static
int
handle_hca_cap_atomic
(
struct
mlx5_core_dev
*
dev
)
...
@@ -490,7 +478,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
...
@@ -490,7 +478,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
128
);
128
);
/* we limit the size of the pkey table to 128 entries for now */
/* we limit the size of the pkey table to 128 entries for now */
MLX5_SET
(
cmd_hca_cap
,
set_hca_cap
,
pkey_table_size
,
MLX5_SET
(
cmd_hca_cap
,
set_hca_cap
,
pkey_table_size
,
to_fw_pkey_sz
(
128
));
to_fw_pkey_sz
(
dev
,
128
));
if
(
prof
->
mask
&
MLX5_PROF_MASK_QP_SIZE
)
if
(
prof
->
mask
&
MLX5_PROF_MASK_QP_SIZE
)
MLX5_SET
(
cmd_hca_cap
,
set_hca_cap
,
log_max_qp
,
MLX5_SET
(
cmd_hca_cap
,
set_hca_cap
,
log_max_qp
,
...
@@ -528,37 +516,22 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
...
@@ -528,37 +516,22 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
int
mlx5_core_enable_hca
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
)
int
mlx5_core_enable_hca
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
enable_hca_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
enable_hca_out
)]
=
{
0
};
u32
in
[
MLX5_ST_SZ_DW
(
enable_hca_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
enable_hca_in
)]
=
{
0
};
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
enable_hca_in
,
in
,
opcode
,
MLX5_CMD_OP_ENABLE_HCA
);
MLX5_SET
(
enable_hca_in
,
in
,
opcode
,
MLX5_CMD_OP_ENABLE_HCA
);
MLX5_SET
(
enable_hca_in
,
in
,
function_id
,
func_id
);
MLX5_SET
(
enable_hca_in
,
in
,
function_id
,
func_id
);
memset
(
out
,
0
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
return
mlx5_cmd_status_to_err_v2
(
out
);
}
}
int
mlx5_core_disable_hca
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
)
int
mlx5_core_disable_hca
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
disable_hca_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
disable_hca_out
)]
=
{
0
};
u32
in
[
MLX5_ST_SZ_DW
(
disable_hca_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
disable_hca_in
)]
=
{
0
};
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
disable_hca_in
,
in
,
opcode
,
MLX5_CMD_OP_DISABLE_HCA
);
MLX5_SET
(
disable_hca_in
,
in
,
opcode
,
MLX5_CMD_OP_DISABLE_HCA
);
MLX5_SET
(
disable_hca_in
,
in
,
function_id
,
func_id
);
MLX5_SET
(
disable_hca_in
,
in
,
function_id
,
func_id
);
memset
(
out
,
0
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
return
mlx5_cmd_status_to_err_v2
(
out
);
}
}
cycle_t
mlx5_read_internal_timer
(
struct
mlx5_core_dev
*
dev
)
cycle_t
mlx5_read_internal_timer
(
struct
mlx5_core_dev
*
dev
)
...
@@ -758,44 +731,40 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
...
@@ -758,44 +731,40 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
static
int
mlx5_core_set_issi
(
struct
mlx5_core_dev
*
dev
)
static
int
mlx5_core_set_issi
(
struct
mlx5_core_dev
*
dev
)
{
{
u32
query_in
[
MLX5_ST_SZ_DW
(
query_issi_in
)];
u32
query_in
[
MLX5_ST_SZ_DW
(
query_issi_in
)]
=
{
0
};
u32
query_out
[
MLX5_ST_SZ_DW
(
query_issi_out
)];
u32
query_out
[
MLX5_ST_SZ_DW
(
query_issi_out
)]
=
{
0
};
u32
set_in
[
MLX5_ST_SZ_DW
(
set_issi_in
)];
u32
set_out
[
MLX5_ST_SZ_DW
(
set_issi_out
)];
int
err
;
u32
sup_issi
;
u32
sup_issi
;
int
err
;
memset
(
query_in
,
0
,
sizeof
(
query_in
));
memset
(
query_out
,
0
,
sizeof
(
query_out
));
MLX5_SET
(
query_issi_in
,
query_in
,
opcode
,
MLX5_CMD_OP_QUERY_ISSI
);
MLX5_SET
(
query_issi_in
,
query_in
,
opcode
,
MLX5_CMD_OP_QUERY_ISSI
);
err
=
mlx5_cmd_exec
(
dev
,
query_in
,
sizeof
(
query_in
),
err
=
mlx5_cmd_exec_check_status
(
dev
,
query_in
,
sizeof
(
query_in
),
query_out
,
sizeof
(
query_out
));
query_out
,
sizeof
(
query_out
));
if
(
err
)
{
if
(
err
)
{
if
(((
struct
mlx5_outbox_hdr
*
)
query_out
)
->
status
==
u32
syndrome
;
MLX5_CMD_STAT_BAD_OP_ERR
)
{
u8
status
;
mlx5_cmd_mbox_status
(
query_out
,
&
status
,
&
syndrome
);
if
(
status
==
MLX5_CMD_STAT_BAD_OP_ERR
)
{
pr_debug
(
"Only ISSI 0 is supported
\n
"
);
pr_debug
(
"Only ISSI 0 is supported
\n
"
);
return
0
;
return
0
;
}
}
pr_err
(
"failed to query ISSI
\n
"
);
pr_err
(
"failed to query ISSI
err(%d)
\n
"
,
err
);
return
err
;
return
err
;
}
}
sup_issi
=
MLX5_GET
(
query_issi_out
,
query_out
,
supported_issi_dw0
);
sup_issi
=
MLX5_GET
(
query_issi_out
,
query_out
,
supported_issi_dw0
);
if
(
sup_issi
&
(
1
<<
1
))
{
if
(
sup_issi
&
(
1
<<
1
))
{
memset
(
set_in
,
0
,
sizeof
(
set_in
))
;
u32
set_in
[
MLX5_ST_SZ_DW
(
set_issi_in
)]
=
{
0
}
;
memset
(
set_out
,
0
,
sizeof
(
set_out
))
;
u32
set_out
[
MLX5_ST_SZ_DW
(
set_issi_out
)]
=
{
0
}
;
MLX5_SET
(
set_issi_in
,
set_in
,
opcode
,
MLX5_CMD_OP_SET_ISSI
);
MLX5_SET
(
set_issi_in
,
set_in
,
opcode
,
MLX5_CMD_OP_SET_ISSI
);
MLX5_SET
(
set_issi_in
,
set_in
,
current_issi
,
1
);
MLX5_SET
(
set_issi_in
,
set_in
,
current_issi
,
1
);
err
=
mlx5_cmd_exec
(
dev
,
set_in
,
sizeof
(
set_in
),
err
=
mlx5_cmd_exec_check_status
(
dev
,
set_in
,
sizeof
(
set_in
),
set_out
,
sizeof
(
set_out
));
set_out
,
sizeof
(
set_out
));
if
(
err
)
{
if
(
err
)
{
pr_err
(
"failed to set ISSI=1
\n
"
);
pr_err
(
"failed to set ISSI=1
err(%d)
\n
"
,
err
);
return
err
;
return
err
;
}
}
...
@@ -814,6 +783,9 @@ static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
...
@@ -814,6 +783,9 @@ static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
struct
mlx5_device_context
*
dev_ctx
;
struct
mlx5_device_context
*
dev_ctx
;
struct
mlx5_core_dev
*
dev
=
container_of
(
priv
,
struct
mlx5_core_dev
,
priv
);
struct
mlx5_core_dev
*
dev
=
container_of
(
priv
,
struct
mlx5_core_dev
,
priv
);
if
(
!
mlx5_lag_intf_add
(
intf
,
priv
))
return
;
dev_ctx
=
kmalloc
(
sizeof
(
*
dev_ctx
),
GFP_KERNEL
);
dev_ctx
=
kmalloc
(
sizeof
(
*
dev_ctx
),
GFP_KERNEL
);
if
(
!
dev_ctx
)
if
(
!
dev_ctx
)
return
;
return
;
...
@@ -852,11 +824,11 @@ static int mlx5_register_device(struct mlx5_core_dev *dev)
...
@@ -852,11 +824,11 @@ static int mlx5_register_device(struct mlx5_core_dev *dev)
struct
mlx5_priv
*
priv
=
&
dev
->
priv
;
struct
mlx5_priv
*
priv
=
&
dev
->
priv
;
struct
mlx5_interface
*
intf
;
struct
mlx5_interface
*
intf
;
mutex_lock
(
&
intf_mutex
);
mutex_lock
(
&
mlx5_
intf_mutex
);
list_add_tail
(
&
priv
->
dev_list
,
&
dev_list
);
list_add_tail
(
&
priv
->
dev_list
,
&
mlx5_
dev_list
);
list_for_each_entry
(
intf
,
&
intf_list
,
list
)
list_for_each_entry
(
intf
,
&
intf_list
,
list
)
mlx5_add_device
(
intf
,
priv
);
mlx5_add_device
(
intf
,
priv
);
mutex_unlock
(
&
intf_mutex
);
mutex_unlock
(
&
mlx5_
intf_mutex
);
return
0
;
return
0
;
}
}
...
@@ -866,11 +838,11 @@ static void mlx5_unregister_device(struct mlx5_core_dev *dev)
...
@@ -866,11 +838,11 @@ static void mlx5_unregister_device(struct mlx5_core_dev *dev)
struct
mlx5_priv
*
priv
=
&
dev
->
priv
;
struct
mlx5_priv
*
priv
=
&
dev
->
priv
;
struct
mlx5_interface
*
intf
;
struct
mlx5_interface
*
intf
;
mutex_lock
(
&
intf_mutex
);
mutex_lock
(
&
mlx5_
intf_mutex
);
list_for_each_entry
(
intf
,
&
intf_list
,
list
)
list_for_each_entry
(
intf
,
&
intf_list
,
list
)
mlx5_remove_device
(
intf
,
priv
);
mlx5_remove_device
(
intf
,
priv
);
list_del
(
&
priv
->
dev_list
);
list_del
(
&
priv
->
dev_list
);
mutex_unlock
(
&
intf_mutex
);
mutex_unlock
(
&
mlx5_
intf_mutex
);
}
}
int
mlx5_register_interface
(
struct
mlx5_interface
*
intf
)
int
mlx5_register_interface
(
struct
mlx5_interface
*
intf
)
...
@@ -880,11 +852,11 @@ int mlx5_register_interface(struct mlx5_interface *intf)
...
@@ -880,11 +852,11 @@ int mlx5_register_interface(struct mlx5_interface *intf)
if
(
!
intf
->
add
||
!
intf
->
remove
)
if
(
!
intf
->
add
||
!
intf
->
remove
)
return
-
EINVAL
;
return
-
EINVAL
;
mutex_lock
(
&
intf_mutex
);
mutex_lock
(
&
mlx5_
intf_mutex
);
list_add_tail
(
&
intf
->
list
,
&
intf_list
);
list_add_tail
(
&
intf
->
list
,
&
intf_list
);
list_for_each_entry
(
priv
,
&
dev_list
,
dev_list
)
list_for_each_entry
(
priv
,
&
mlx5_
dev_list
,
dev_list
)
mlx5_add_device
(
intf
,
priv
);
mlx5_add_device
(
intf
,
priv
);
mutex_unlock
(
&
intf_mutex
);
mutex_unlock
(
&
mlx5_
intf_mutex
);
return
0
;
return
0
;
}
}
...
@@ -894,11 +866,11 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
...
@@ -894,11 +866,11 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
{
{
struct
mlx5_priv
*
priv
;
struct
mlx5_priv
*
priv
;
mutex_lock
(
&
intf_mutex
);
mutex_lock
(
&
mlx5_
intf_mutex
);
list_for_each_entry
(
priv
,
&
dev_list
,
dev_list
)
list_for_each_entry
(
priv
,
&
mlx5_
dev_list
,
dev_list
)
mlx5_remove_device
(
intf
,
priv
);
mlx5_remove_device
(
intf
,
priv
);
list_del
(
&
intf
->
list
);
list_del
(
&
intf
->
list
);
mutex_unlock
(
&
intf_mutex
);
mutex_unlock
(
&
mlx5_
intf_mutex
);
}
}
EXPORT_SYMBOL
(
mlx5_unregister_interface
);
EXPORT_SYMBOL
(
mlx5_unregister_interface
);
...
@@ -924,6 +896,30 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
...
@@ -924,6 +896,30 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
}
}
EXPORT_SYMBOL
(
mlx5_get_protocol_dev
);
EXPORT_SYMBOL
(
mlx5_get_protocol_dev
);
/* Must be called with intf_mutex held */
void
mlx5_add_dev_by_protocol
(
struct
mlx5_core_dev
*
dev
,
int
protocol
)
{
struct
mlx5_interface
*
intf
;
list_for_each_entry
(
intf
,
&
intf_list
,
list
)
if
(
intf
->
protocol
==
protocol
)
{
mlx5_add_device
(
intf
,
&
dev
->
priv
);
break
;
}
}
/* Must be called with intf_mutex held */
void
mlx5_remove_dev_by_protocol
(
struct
mlx5_core_dev
*
dev
,
int
protocol
)
{
struct
mlx5_interface
*
intf
;
list_for_each_entry
(
intf
,
&
intf_list
,
list
)
if
(
intf
->
protocol
==
protocol
)
{
mlx5_remove_device
(
intf
,
&
dev
->
priv
);
break
;
}
}
static
int
mlx5_pci_init
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_priv
*
priv
)
static
int
mlx5_pci_init
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_priv
*
priv
)
{
{
struct
pci_dev
*
pdev
=
dev
->
pdev
;
struct
pci_dev
*
pdev
=
dev
->
pdev
;
...
@@ -1344,8 +1340,9 @@ static int init_one(struct pci_dev *pdev,
...
@@ -1344,8 +1340,9 @@ static int init_one(struct pci_dev *pdev,
pci_set_drvdata
(
pdev
,
dev
);
pci_set_drvdata
(
pdev
,
dev
);
if
(
prof_sel
<
0
||
prof_sel
>=
ARRAY_SIZE
(
profile
))
{
if
(
prof_sel
<
0
||
prof_sel
>=
ARRAY_SIZE
(
profile
))
{
pr_warn
(
"selected profile out of range, selecting default (%d)
\n
"
,
mlx5_core_warn
(
dev
,
MLX5_DEFAULT_PROF
);
"selected profile out of range, selecting default (%d)
\n
"
,
MLX5_DEFAULT_PROF
);
prof_sel
=
MLX5_DEFAULT_PROF
;
prof_sel
=
MLX5_DEFAULT_PROF
;
}
}
dev
->
profile
=
&
profile
[
prof_sel
];
dev
->
profile
=
&
profile
[
prof_sel
];
...
...
drivers/net/ethernet/mellanox/mlx5/core/mcg.c
浏览文件 @
d68478da
...
@@ -37,70 +37,30 @@
...
@@ -37,70 +37,30 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
#include "mlx5_core.h"
struct
mlx5_attach_mcg_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
qpn
;
__be32
rsvd
;
u8
gid
[
16
];
};
struct
mlx5_attach_mcg_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvf
[
8
];
};
struct
mlx5_detach_mcg_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
qpn
;
__be32
rsvd
;
u8
gid
[
16
];
};
struct
mlx5_detach_mcg_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvf
[
8
];
};
int
mlx5_core_attach_mcg
(
struct
mlx5_core_dev
*
dev
,
union
ib_gid
*
mgid
,
u32
qpn
)
int
mlx5_core_attach_mcg
(
struct
mlx5_core_dev
*
dev
,
union
ib_gid
*
mgid
,
u32
qpn
)
{
{
struct
mlx5_attach_mcg_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
attach_to_mcg_out
)]
=
{
0
}
;
struct
mlx5_attach_mcg_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
attach_to_mcg_in
)]
=
{
0
}
;
int
err
;
void
*
gid
;
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
attach_to_mcg_in
,
in
,
opcode
,
MLX5_CMD_OP_ATTACH_TO_MCG
);
memset
(
&
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
attach_to_mcg_in
,
in
,
qpn
,
qpn
);
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_ATTACH_TO_MCG
);
gid
=
MLX5_ADDR_OF
(
attach_to_mcg_in
,
in
,
multicast_gid
);
memcpy
(
in
.
gid
,
mgid
,
sizeof
(
*
mgid
));
memcpy
(
gid
,
mgid
,
sizeof
(
*
mgid
));
in
.
qpn
=
cpu_to_be32
(
qpn
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
if
(
out
.
hdr
.
status
)
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
return
err
;
}
}
EXPORT_SYMBOL
(
mlx5_core_attach_mcg
);
EXPORT_SYMBOL
(
mlx5_core_attach_mcg
);
int
mlx5_core_detach_mcg
(
struct
mlx5_core_dev
*
dev
,
union
ib_gid
*
mgid
,
u32
qpn
)
int
mlx5_core_detach_mcg
(
struct
mlx5_core_dev
*
dev
,
union
ib_gid
*
mgid
,
u32
qpn
)
{
{
struct
mlx5_detach_mcg_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
detach_from_mcg_out
)]
=
{
0
};
struct
mlx5_detach_mcg_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
detach_from_mcg_in
)]
=
{
0
};
int
err
;
void
*
gid
;
memset
(
&
in
,
0
,
sizeof
(
in
));
memset
(
&
out
,
0
,
sizeof
(
out
));
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_DETTACH_FROM_MCG
);
memcpy
(
in
.
gid
,
mgid
,
sizeof
(
*
mgid
));
in
.
qpn
=
cpu_to_be32
(
qpn
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
if
(
out
.
hdr
.
status
)
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
return
err
;
MLX5_SET
(
detach_from_mcg_in
,
in
,
opcode
,
MLX5_CMD_OP_DETACH_FROM_MCG
);
MLX5_SET
(
detach_from_mcg_in
,
in
,
qpn
,
qpn
);
gid
=
MLX5_ADDR_OF
(
detach_from_mcg_in
,
in
,
multicast_gid
);
memcpy
(
gid
,
mgid
,
sizeof
(
*
mgid
));
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
EXPORT_SYMBOL
(
mlx5_core_detach_mcg
);
EXPORT_SYMBOL
(
mlx5_core_detach_mcg
);
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
浏览文件 @
d68478da
...
@@ -46,6 +46,9 @@
...
@@ -46,6 +46,9 @@
extern
int
mlx5_core_debug_mask
;
extern
int
mlx5_core_debug_mask
;
extern
struct
list_head
mlx5_dev_list
;
extern
struct
mutex
mlx5_intf_mutex
;
#define mlx5_core_dbg(__dev, format, ...) \
#define mlx5_core_dbg(__dev, format, ...) \
dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
(__dev)->priv.name, __func__, __LINE__, current->pid, \
(__dev)->priv.name, __func__, __LINE__, current->pid, \
...
@@ -58,8 +61,8 @@ do { \
...
@@ -58,8 +61,8 @@ do { \
} while (0)
} while (0)
#define mlx5_core_err(__dev, format, ...) \
#define mlx5_core_err(__dev, format, ...) \
dev_err(&(__dev)->pdev->dev, "%s:%
s:%
d:(pid %d): " format, \
dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
(__dev)->priv.name,
__func__, __LINE__, current->pid, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \
#define mlx5_core_warn(__dev, format, ...) \
...
@@ -70,24 +73,14 @@ do { \
...
@@ -70,24 +73,14 @@ do { \
#define mlx5_core_info(__dev, format, ...) \
#define mlx5_core_info(__dev, format, ...) \
dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
#define mlx5_core_for_each_priv(__priv) \
list_for_each_entry(__priv, &mlx5_dev_list, dev_list)
enum
{
enum
{
MLX5_CMD_DATA
,
/* print command payload only */
MLX5_CMD_DATA
,
/* print command payload only */
MLX5_CMD_TIME
,
/* print command execution time */
MLX5_CMD_TIME
,
/* print command execution time */
};
};
static
inline
int
mlx5_cmd_exec_check_status
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
in_size
,
u32
*
out
,
int
out_size
)
{
int
err
;
err
=
mlx5_cmd_exec
(
dev
,
in
,
in_size
,
out
,
out_size
);
if
(
err
)
return
err
;
return
mlx5_cmd_status_to_err
((
struct
mlx5_outbox_hdr
*
)
out
);
}
int
mlx5_query_hca_caps
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_query_hca_caps
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_query_board_id
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_query_board_id
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_cmd_init_hca
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_cmd_init_hca
(
struct
mlx5_core_dev
*
dev
);
...
@@ -97,6 +90,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
...
@@ -97,6 +90,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
void
mlx5_enter_error_state
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_enter_error_state
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_disable_device
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_disable_device
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_core_sriov_configure
(
struct
pci_dev
*
dev
,
int
num_vfs
);
int
mlx5_core_sriov_configure
(
struct
pci_dev
*
dev
,
int
num_vfs
);
bool
mlx5_sriov_is_enabled
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_core_enable_hca
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
);
int
mlx5_core_enable_hca
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
);
int
mlx5_core_disable_hca
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
);
int
mlx5_core_disable_hca
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
);
int
mlx5_wait_for_vf_pages
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_wait_for_vf_pages
(
struct
mlx5_core_dev
*
dev
);
...
@@ -105,7 +99,27 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
...
@@ -105,7 +99,27 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
struct
mlx5_eq
*
mlx5_eqn2eq
(
struct
mlx5_core_dev
*
dev
,
int
eqn
);
struct
mlx5_eq
*
mlx5_eqn2eq
(
struct
mlx5_core_dev
*
dev
,
int
eqn
);
void
mlx5_cq_tasklet_cb
(
unsigned
long
data
);
void
mlx5_cq_tasklet_cb
(
unsigned
long
data
);
void
mlx5_lag_add
(
struct
mlx5_core_dev
*
dev
,
struct
net_device
*
netdev
);
void
mlx5_lag_remove
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_add_dev_by_protocol
(
struct
mlx5_core_dev
*
dev
,
int
protocol
);
void
mlx5_remove_dev_by_protocol
(
struct
mlx5_core_dev
*
dev
,
int
protocol
);
bool
mlx5_lag_intf_add
(
struct
mlx5_interface
*
intf
,
struct
mlx5_priv
*
priv
);
void
mlx5e_init
(
void
);
void
mlx5e_init
(
void
);
void
mlx5e_cleanup
(
void
);
void
mlx5e_cleanup
(
void
);
static
inline
int
mlx5_lag_is_lacp_owner
(
struct
mlx5_core_dev
*
dev
)
{
/* LACP owner conditions:
* 1) Function is physical.
* 2) LAG is supported by FW.
* 3) LAG is managed by driver (currently the only option).
*/
return
MLX5_CAP_GEN
(
dev
,
vport_group_manager
)
&&
(
MLX5_CAP_GEN
(
dev
,
num_lag_ports
)
>
1
)
&&
MLX5_CAP_GEN
(
dev
,
lag_master
);
}
#endif
/* __MLX5_CORE_H__ */
#endif
/* __MLX5_CORE_H__ */
drivers/net/ethernet/mellanox/mlx5/core/mr.c
浏览文件 @
d68478da
...
@@ -49,48 +49,43 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
...
@@ -49,48 +49,43 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
{
{
}
}
int
mlx5_core_create_mkey
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_core_create_mkey
_cb
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
mkey
,
struct
mlx5_core_mkey
*
mkey
,
struct
mlx5_create_mkey_mbox_in
*
in
,
int
inlen
,
u32
*
in
,
int
inlen
,
mlx5_cmd_cbk_t
callback
,
void
*
context
,
u32
*
out
,
int
outlen
,
struct
mlx5_create_mkey_mbox_out
*
ou
t
)
mlx5_cmd_cbk_t
callback
,
void
*
contex
t
)
{
{
struct
mlx5_mkey_table
*
table
=
&
dev
->
priv
.
mkey_table
;
struct
mlx5_mkey_table
*
table
=
&
dev
->
priv
.
mkey_table
;
struct
mlx5_create_mkey_mbox_out
lout
;
u32
lout
[
MLX5_ST_SZ_DW
(
create_mkey_out
)]
=
{
0
};
u32
mkey_index
;
void
*
mkc
;
int
err
;
int
err
;
u8
key
;
u8
key
;
memset
(
&
lout
,
0
,
sizeof
(
lout
));
spin_lock_irq
(
&
dev
->
priv
.
mkey_lock
);
spin_lock_irq
(
&
dev
->
priv
.
mkey_lock
);
key
=
dev
->
priv
.
mkey_key
++
;
key
=
dev
->
priv
.
mkey_key
++
;
spin_unlock_irq
(
&
dev
->
priv
.
mkey_lock
);
spin_unlock_irq
(
&
dev
->
priv
.
mkey_lock
);
in
->
seg
.
qpn_mkey7_0
|=
cpu_to_be32
(
key
);
mkc
=
MLX5_ADDR_OF
(
create_mkey_in
,
in
,
memory_key_mkey_entry
);
in
->
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_CREATE_MKEY
);
if
(
callback
)
{
err
=
mlx5_cmd_exec_cb
(
dev
,
in
,
inlen
,
out
,
sizeof
(
*
out
),
callback
,
context
);
return
err
;
}
else
{
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
&
lout
,
sizeof
(
lout
));
}
if
(
err
)
{
MLX5_SET
(
create_mkey_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_MKEY
);
mlx5_core_dbg
(
dev
,
"cmd exec failed %d
\n
"
,
err
);
MLX5_SET
(
mkc
,
mkc
,
mkey_7_0
,
key
);
return
err
;
}
if
(
lout
.
hdr
.
status
)
{
if
(
callback
)
mlx5_core_dbg
(
dev
,
"status %d
\n
"
,
lout
.
hdr
.
status
);
return
mlx5_cmd_exec_cb
(
dev
,
in
,
inlen
,
out
,
outlen
,
return
mlx5_cmd_status_to_err
(
&
lout
.
hdr
);
callback
,
context
);
}
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
lout
,
sizeof
(
lout
));
if
(
err
)
return
err
;
mkey
->
iova
=
be64_to_cpu
(
in
->
seg
.
start_addr
);
mkey_index
=
MLX5_GET
(
create_mkey_out
,
lout
,
mkey_index
);
mkey
->
size
=
be64_to_cpu
(
in
->
seg
.
len
);
mkey
->
iova
=
MLX5_GET64
(
mkc
,
mkc
,
start_addr
);
mkey
->
key
=
mlx5_idx_to_mkey
(
be32_to_cpu
(
lout
.
mkey
)
&
0xffffff
)
|
key
;
mkey
->
size
=
MLX5_GET64
(
mkc
,
mkc
,
len
);
mkey
->
pd
=
be32_to_cpu
(
in
->
seg
.
flags_pd
)
&
0xffffff
;
mkey
->
key
=
mlx5_idx_to_mkey
(
mkey_index
)
|
key
;
mkey
->
pd
=
MLX5_GET
(
mkc
,
mkc
,
pd
);
mlx5_core_dbg
(
dev
,
"out 0x%x, key 0x%x, mkey 0x%x
\n
"
,
mlx5_core_dbg
(
dev
,
"out 0x%x, key 0x%x, mkey 0x%x
\n
"
,
be32_to_cpu
(
lout
.
mkey
)
,
key
,
mkey
->
key
);
mkey_index
,
key
,
mkey
->
key
);
/* connect to mkey tree */
/* connect to mkey tree */
write_lock_irq
(
&
table
->
lock
);
write_lock_irq
(
&
table
->
lock
);
...
@@ -104,20 +99,25 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
...
@@ -104,20 +99,25 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
return
err
;
return
err
;
}
}
EXPORT_SYMBOL
(
mlx5_core_create_mkey_cb
);
int
mlx5_core_create_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
mkey
,
u32
*
in
,
int
inlen
)
{
return
mlx5_core_create_mkey_cb
(
dev
,
mkey
,
in
,
inlen
,
NULL
,
0
,
NULL
,
NULL
);
}
EXPORT_SYMBOL
(
mlx5_core_create_mkey
);
EXPORT_SYMBOL
(
mlx5_core_create_mkey
);
int
mlx5_core_destroy_mkey
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_core_destroy_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
mkey
)
struct
mlx5_core_mkey
*
mkey
)
{
{
struct
mlx5_mkey_table
*
table
=
&
dev
->
priv
.
mkey_table
;
struct
mlx5_mkey_table
*
table
=
&
dev
->
priv
.
mkey_table
;
struct
mlx5_destroy_mkey_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
destroy_mkey_out
)]
=
{
0
}
;
struct
mlx5_destroy_mkey_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
destroy_mkey_in
)]
=
{
0
}
;
struct
mlx5_core_mkey
*
deleted_mkey
;
struct
mlx5_core_mkey
*
deleted_mkey
;
unsigned
long
flags
;
unsigned
long
flags
;
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
memset
(
&
out
,
0
,
sizeof
(
out
));
write_lock_irqsave
(
&
table
->
lock
,
flags
);
write_lock_irqsave
(
&
table
->
lock
,
flags
);
deleted_mkey
=
radix_tree_delete
(
&
table
->
tree
,
mlx5_base_mkey
(
mkey
->
key
));
deleted_mkey
=
radix_tree_delete
(
&
table
->
tree
,
mlx5_base_mkey
(
mkey
->
key
));
...
@@ -128,94 +128,71 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
...
@@ -128,94 +128,71 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
return
-
ENOENT
;
return
-
ENOENT
;
}
}
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_DESTROY_MKEY
);
MLX5_SET
(
destroy_mkey_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_MKEY
);
in
.
mkey
=
cpu_to_be32
(
mlx5_mkey_to_idx
(
mkey
->
key
));
MLX5_SET
(
destroy_mkey_in
,
in
,
mkey_index
,
mlx5_mkey_to_idx
(
mkey
->
key
));
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
if
(
out
.
hdr
.
status
)
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
return
err
;
}
}
EXPORT_SYMBOL
(
mlx5_core_destroy_mkey
);
EXPORT_SYMBOL
(
mlx5_core_destroy_mkey
);
int
mlx5_core_query_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
mkey
,
int
mlx5_core_query_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
mkey
,
struct
mlx5_query_mkey_mbox_out
*
out
,
int
outlen
)
u32
*
out
,
int
outlen
)
{
{
struct
mlx5_query_mkey_mbox_in
in
;
u32
in
[
MLX5_ST_SZ_DW
(
query_mkey_in
)]
=
{
0
};
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
outlen
);
memset
(
out
,
0
,
outlen
);
MLX5_SET
(
query_mkey_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_MKEY
);
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_QUERY_MKEY
);
MLX5_SET
(
query_mkey_in
,
in
,
mkey_index
,
mlx5_mkey_to_idx
(
mkey
->
key
));
in
.
mkey
=
cpu_to_be32
(
mlx5_mkey_to_idx
(
mkey
->
key
));
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
out
,
outlen
);
if
(
err
)
return
err
;
if
(
out
->
hdr
.
status
)
return
mlx5_cmd_status_to_err
(
&
out
->
hdr
);
return
err
;
}
}
EXPORT_SYMBOL
(
mlx5_core_query_mkey
);
EXPORT_SYMBOL
(
mlx5_core_query_mkey
);
int
mlx5_core_dump_fill_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
_mkey
,
int
mlx5_core_dump_fill_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
_mkey
,
u32
*
mkey
)
u32
*
mkey
)
{
{
struct
mlx5_query_special_ctxs_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
query_special_contexts_out
)]
=
{
0
}
;
struct
mlx5_query_special_ctxs_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
query_special_contexts_in
)]
=
{
0
}
;
int
err
;
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
query_special_contexts_in
,
in
,
opcode
,
memset
(
&
out
,
0
,
sizeof
(
out
));
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
);
if
(
!
err
)
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
*
mkey
=
MLX5_GET
(
query_special_contexts_out
,
out
,
if
(
err
)
dump_fill_mkey
);
return
err
;
if
(
out
.
hdr
.
status
)
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
*
mkey
=
be32_to_cpu
(
out
.
dump_fill_mkey
);
return
err
;
return
err
;
}
}
EXPORT_SYMBOL
(
mlx5_core_dump_fill_mkey
);
EXPORT_SYMBOL
(
mlx5_core_dump_fill_mkey
);
static
inline
u32
mlx5_get_psv
(
u32
*
out
,
int
psv_index
)
{
switch
(
psv_index
)
{
case
1
:
return
MLX5_GET
(
create_psv_out
,
out
,
psv1_index
);
case
2
:
return
MLX5_GET
(
create_psv_out
,
out
,
psv2_index
);
case
3
:
return
MLX5_GET
(
create_psv_out
,
out
,
psv3_index
);
default:
return
MLX5_GET
(
create_psv_out
,
out
,
psv0_index
);
}
}
int
mlx5_core_create_psv
(
struct
mlx5_core_dev
*
dev
,
u32
pdn
,
int
mlx5_core_create_psv
(
struct
mlx5_core_dev
*
dev
,
u32
pdn
,
int
npsvs
,
u32
*
sig_index
)
int
npsvs
,
u32
*
sig_index
)
{
{
struct
mlx5_allocate_psv_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
create_psv_out
)]
=
{
0
}
;
struct
mlx5_allocate_psv_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
create_psv_in
)]
=
{
0
}
;
int
i
,
err
;
int
i
,
err
;
if
(
npsvs
>
MLX5_MAX_PSVS
)
if
(
npsvs
>
MLX5_MAX_PSVS
)
return
-
EINVAL
;
return
-
EINVAL
;
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
create_psv_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_PSV
);
memset
(
&
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
create_psv_in
,
in
,
pd
,
pdn
);
MLX5_SET
(
create_psv_in
,
in
,
num_psv
,
npsvs
);
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_CREATE_PSV
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
in
.
npsv_pd
=
cpu_to_be32
((
npsvs
<<
28
)
|
pdn
);
if
(
err
)
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
{
mlx5_core_err
(
dev
,
"cmd exec failed %d
\n
"
,
err
);
return
err
;
return
err
;
}
if
(
out
.
hdr
.
status
)
{
mlx5_core_err
(
dev
,
"create_psv bad status %d
\n
"
,
out
.
hdr
.
status
);
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
}
for
(
i
=
0
;
i
<
npsvs
;
i
++
)
for
(
i
=
0
;
i
<
npsvs
;
i
++
)
sig_index
[
i
]
=
be32_to_cpu
(
out
.
psv_idx
[
i
])
&
0xffffff
;
sig_index
[
i
]
=
mlx5_get_psv
(
out
,
i
)
;
return
err
;
return
err
;
}
}
...
@@ -223,29 +200,11 @@ EXPORT_SYMBOL(mlx5_core_create_psv);
...
@@ -223,29 +200,11 @@ EXPORT_SYMBOL(mlx5_core_create_psv);
int
mlx5_core_destroy_psv
(
struct
mlx5_core_dev
*
dev
,
int
psv_num
)
int
mlx5_core_destroy_psv
(
struct
mlx5_core_dev
*
dev
,
int
psv_num
)
{
{
struct
mlx5_destroy_psv_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
destroy_psv_out
)]
=
{
0
};
struct
mlx5_destroy_psv_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
destroy_psv_in
)]
=
{
0
};
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
memset
(
&
out
,
0
,
sizeof
(
out
));
in
.
psv_number
=
cpu_to_be32
(
psv_num
);
MLX5_SET
(
destroy_psv_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_PSV
);
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_DESTROY_PSV
);
MLX5_SET
(
destroy_psv_in
,
in
,
psvn
,
psv_num
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
err
)
{
mlx5_core_err
(
dev
,
"destroy_psv cmd exec failed %d
\n
"
,
err
);
goto
out
;
}
if
(
out
.
hdr
.
status
)
{
mlx5_core_err
(
dev
,
"destroy_psv bad status %d
\n
"
,
out
.
hdr
.
status
);
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
goto
out
;
}
out:
return
err
;
}
}
EXPORT_SYMBOL
(
mlx5_core_destroy_psv
);
EXPORT_SYMBOL
(
mlx5_core_destroy_psv
);
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
浏览文件 @
d68478da
...
@@ -44,12 +44,6 @@ enum {
...
@@ -44,12 +44,6 @@ enum {
MLX5_PAGES_TAKE
=
2
MLX5_PAGES_TAKE
=
2
};
};
enum
{
MLX5_BOOT_PAGES
=
1
,
MLX5_INIT_PAGES
=
2
,
MLX5_POST_INIT_PAGES
=
3
};
struct
mlx5_pages_req
{
struct
mlx5_pages_req
{
struct
mlx5_core_dev
*
dev
;
struct
mlx5_core_dev
*
dev
;
u16
func_id
;
u16
func_id
;
...
@@ -67,33 +61,6 @@ struct fw_page {
...
@@ -67,33 +61,6 @@ struct fw_page {
unsigned
free_count
;
unsigned
free_count
;
};
};
struct
mlx5_query_pages_inbox
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_query_pages_outbox
{
struct
mlx5_outbox_hdr
hdr
;
__be16
rsvd
;
__be16
func_id
;
__be32
num_pages
;
};
struct
mlx5_manage_pages_inbox
{
struct
mlx5_inbox_hdr
hdr
;
__be16
rsvd
;
__be16
func_id
;
__be32
num_entries
;
__be64
pas
[
0
];
};
struct
mlx5_manage_pages_outbox
{
struct
mlx5_outbox_hdr
hdr
;
__be32
num_entries
;
u8
rsvd
[
4
];
__be64
pas
[
0
];
};
enum
{
enum
{
MAX_RECLAIM_TIME_MSECS
=
5000
,
MAX_RECLAIM_TIME_MSECS
=
5000
,
MAX_RECLAIM_VFS_PAGES_TIME_MSECS
=
2
*
1000
*
60
,
MAX_RECLAIM_VFS_PAGES_TIME_MSECS
=
2
*
1000
*
60
,
...
@@ -167,24 +134,21 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
...
@@ -167,24 +134,21 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
static
int
mlx5_cmd_query_pages
(
struct
mlx5_core_dev
*
dev
,
u16
*
func_id
,
static
int
mlx5_cmd_query_pages
(
struct
mlx5_core_dev
*
dev
,
u16
*
func_id
,
s32
*
npages
,
int
boot
)
s32
*
npages
,
int
boot
)
{
{
struct
mlx5_query_pages_inbox
in
;
u32
out
[
MLX5_ST_SZ_DW
(
query_pages_out
)]
=
{
0
}
;
struct
mlx5_query_pages_outbox
out
;
u32
in
[
MLX5_ST_SZ_DW
(
query_pages_in
)]
=
{
0
}
;
int
err
;
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
)
);
MLX5_SET
(
query_pages_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_PAGES
);
memset
(
&
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
query_pages_in
,
in
,
op_mod
,
boot
?
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_QUERY_PAGES
);
MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES
:
in
.
hdr
.
opmod
=
boot
?
cpu_to_be16
(
MLX5_BOOT_PAGES
)
:
cpu_to_be16
(
MLX5
_INIT_PAGES
);
MLX5_QUERY_PAGES_IN_OP_MOD
_INIT_PAGES
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
err
)
if
(
err
)
return
err
;
return
err
;
if
(
out
.
hdr
.
status
)
*
npages
=
MLX5_GET
(
query_pages_out
,
out
,
num_pages
);
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
*
func_id
=
MLX5_GET
(
query_pages_out
,
out
,
function_id
);
*
npages
=
be32_to_cpu
(
out
.
num_pages
);
*
func_id
=
be16_to_cpu
(
out
.
func_id
);
return
err
;
return
err
;
}
}
...
@@ -280,46 +244,37 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
...
@@ -280,46 +244,37 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
static
void
page_notify_fail
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
)
static
void
page_notify_fail
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
)
{
{
struct
mlx5_manage_pages_inbox
*
in
;
u32
out
[
MLX5_ST_SZ_DW
(
manage_pages_out
)]
=
{
0
}
;
struct
mlx5_manage_pages_outbox
out
;
u32
in
[
MLX5_ST_SZ_DW
(
manage_pages_in
)]
=
{
0
}
;
int
err
;
int
err
;
in
=
kzalloc
(
sizeof
(
*
in
),
GFP_KERNEL
);
MLX5_SET
(
manage_pages_in
,
in
,
opcode
,
MLX5_CMD_OP_MANAGE_PAGES
);
if
(
!
in
)
MLX5_SET
(
manage_pages_in
,
in
,
op_mod
,
MLX5_PAGES_CANT_GIVE
);
return
;
MLX5_SET
(
manage_pages_in
,
in
,
function_id
,
func_id
);
memset
(
&
out
,
0
,
sizeof
(
out
));
in
->
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_MANAGE_PAGES
);
in
->
hdr
.
opmod
=
cpu_to_be16
(
MLX5_PAGES_CANT_GIVE
);
in
->
func_id
=
cpu_to_be16
(
func_id
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
*
in
),
&
out
,
sizeof
(
out
));
if
(
!
err
)
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
err
)
if
(
err
)
mlx5_core_warn
(
dev
,
"page notify failed
\n
"
);
mlx5_core_warn
(
dev
,
"page notify failed func_id(%d) err(%d)
\n
"
,
func_id
,
err
);
kfree
(
in
);
}
}
static
int
give_pages
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
,
int
npages
,
static
int
give_pages
(
struct
mlx5_core_dev
*
dev
,
u16
func_id
,
int
npages
,
int
notify_fail
)
int
notify_fail
)
{
{
struct
mlx5_manage_pages_inbox
*
in
;
u32
out
[
MLX5_ST_SZ_DW
(
manage_pages_out
)]
=
{
0
};
struct
mlx5_manage_pages_outbox
out
;
int
inlen
=
MLX5_ST_SZ_BYTES
(
manage_pages_in
);
int
inlen
;
u64
addr
;
u64
addr
;
int
err
;
int
err
;
u32
*
in
;
int
i
;
int
i
;
inlen
=
sizeof
(
*
in
)
+
npages
*
sizeof
(
in
->
pas
[
0
]);
inlen
+=
npages
*
MLX5_FLD_SZ_BYTES
(
manage_pages_in
,
pas
[
0
]);
in
=
mlx5_vzalloc
(
inlen
);
in
=
mlx5_vzalloc
(
inlen
);
if
(
!
in
)
{
if
(
!
in
)
{
err
=
-
ENOMEM
;
err
=
-
ENOMEM
;
mlx5_core_warn
(
dev
,
"vzalloc failed %d
\n
"
,
inlen
);
mlx5_core_warn
(
dev
,
"vzalloc failed %d
\n
"
,
inlen
);
goto
out_free
;
goto
out_free
;
}
}
memset
(
&
out
,
0
,
sizeof
(
out
));
for
(
i
=
0
;
i
<
npages
;
i
++
)
{
for
(
i
=
0
;
i
<
npages
;
i
++
)
{
retry:
retry:
...
@@ -332,27 +287,21 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
...
@@ -332,27 +287,21 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
goto
retry
;
goto
retry
;
}
}
in
->
pas
[
i
]
=
cpu_to_be64
(
addr
);
MLX5_SET64
(
manage_pages_in
,
in
,
pas
[
i
],
addr
);
}
}
in
->
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_MANAGE_PAGES
);
MLX5_SET
(
manage_pages_in
,
in
,
opcode
,
MLX5_CMD_OP_MANAGE_PAGES
);
in
->
hdr
.
opmod
=
cpu_to_be16
(
MLX5_PAGES_GIVE
);
MLX5_SET
(
manage_pages_in
,
in
,
op_mod
,
MLX5_PAGES_GIVE
);
in
->
func_id
=
cpu_to_be16
(
func_id
);
MLX5_SET
(
manage_pages_in
,
in
,
function_id
,
func_id
);
in
->
num_entries
=
cpu_to_be32
(
npages
);
MLX5_SET
(
manage_pages_in
,
in
,
input_num_entries
,
npages
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
&
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
if
(
err
)
{
if
(
err
)
{
mlx5_core_warn
(
dev
,
"func_id 0x%x, npages %d, err %d
\n
"
,
mlx5_core_warn
(
dev
,
"func_id 0x%x, npages %d, err %d
\n
"
,
func_id
,
npages
,
err
);
func_id
,
npages
,
err
);
goto
out_4k
;
goto
out_4k
;
}
}
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
if
(
err
)
{
mlx5_core_warn
(
dev
,
"func_id 0x%x, npages %d, status %d
\n
"
,
func_id
,
npages
,
out
.
hdr
.
status
);
goto
out_4k
;
}
dev
->
priv
.
fw_pages
+=
npages
;
dev
->
priv
.
fw_pages
+=
npages
;
if
(
func_id
)
if
(
func_id
)
dev
->
priv
.
vfs_pages
+=
npages
;
dev
->
priv
.
vfs_pages
+=
npages
;
...
@@ -364,7 +313,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
...
@@ -364,7 +313,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
out_4k:
out_4k:
for
(
i
--
;
i
>=
0
;
i
--
)
for
(
i
--
;
i
>=
0
;
i
--
)
free_4k
(
dev
,
be64_to_cpu
(
in
->
pas
[
i
]));
free_4k
(
dev
,
MLX5_GET64
(
manage_pages_in
,
in
,
pas
[
i
]));
out_free:
out_free:
kvfree
(
in
);
kvfree
(
in
);
if
(
notify_fail
)
if
(
notify_fail
)
...
@@ -373,8 +322,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
...
@@ -373,8 +322,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
}
}
static
int
reclaim_pages_cmd
(
struct
mlx5_core_dev
*
dev
,
static
int
reclaim_pages_cmd
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_manage_pages_inbox
*
in
,
int
in_size
,
u32
*
in
,
int
in_size
,
u32
*
out
,
int
out_size
)
struct
mlx5_manage_pages_outbox
*
out
,
int
out_size
)
{
{
struct
fw_page
*
fwp
;
struct
fw_page
*
fwp
;
struct
rb_node
*
p
;
struct
rb_node
*
p
;
...
@@ -382,55 +330,54 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
...
@@ -382,55 +330,54 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32
i
=
0
;
u32
i
=
0
;
if
(
dev
->
state
!=
MLX5_DEVICE_STATE_INTERNAL_ERROR
)
if
(
dev
->
state
!=
MLX5_DEVICE_STATE_INTERNAL_ERROR
)
return
mlx5_cmd_exec_check_status
(
dev
,
(
u32
*
)
in
,
in_size
,
return
mlx5_cmd_exec
(
dev
,
in
,
in_size
,
out
,
out_size
);
(
u32
*
)
out
,
out_size
);
npages
=
be32_to_cpu
(
in
->
num_entries
);
/* No hard feelings, we want our pages back! */
npages
=
MLX5_GET
(
manage_pages_in
,
in
,
input_num_entries
);
p
=
rb_first
(
&
dev
->
priv
.
page_root
);
p
=
rb_first
(
&
dev
->
priv
.
page_root
);
while
(
p
&&
i
<
npages
)
{
while
(
p
&&
i
<
npages
)
{
fwp
=
rb_entry
(
p
,
struct
fw_page
,
rb_node
);
fwp
=
rb_entry
(
p
,
struct
fw_page
,
rb_node
);
out
->
pas
[
i
]
=
cpu_to_be64
(
fwp
->
addr
);
MLX5_SET64
(
manage_pages_out
,
out
,
pas
[
i
],
fwp
->
addr
);
p
=
rb_next
(
p
);
p
=
rb_next
(
p
);
i
++
;
i
++
;
}
}
out
->
num_entries
=
cpu_to_be32
(
i
);
MLX5_SET
(
manage_pages_out
,
out
,
output_num_entries
,
i
);
return
0
;
return
0
;
}
}
static
int
reclaim_pages
(
struct
mlx5_core_dev
*
dev
,
u32
func_id
,
int
npages
,
static
int
reclaim_pages
(
struct
mlx5_core_dev
*
dev
,
u32
func_id
,
int
npages
,
int
*
nclaimed
)
int
*
nclaimed
)
{
{
struct
mlx5_manage_pages_inbox
in
;
int
outlen
=
MLX5_ST_SZ_BYTES
(
manage_pages_out
)
;
struct
mlx5_manage_pages_outbox
*
out
;
u32
in
[
MLX5_ST_SZ_DW
(
manage_pages_in
)]
=
{
0
}
;
int
num_claimed
;
int
num_claimed
;
int
outlen
;
u32
*
out
;
u64
addr
;
int
err
;
int
err
;
int
i
;
int
i
;
if
(
nclaimed
)
if
(
nclaimed
)
*
nclaimed
=
0
;
*
nclaimed
=
0
;
memset
(
&
in
,
0
,
sizeof
(
in
));
outlen
+=
npages
*
MLX5_FLD_SZ_BYTES
(
manage_pages_out
,
pas
[
0
]);
outlen
=
sizeof
(
*
out
)
+
npages
*
sizeof
(
out
->
pas
[
0
]);
out
=
mlx5_vzalloc
(
outlen
);
out
=
mlx5_vzalloc
(
outlen
);
if
(
!
out
)
if
(
!
out
)
return
-
ENOMEM
;
return
-
ENOMEM
;
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_MANAGE_PAGES
);
MLX5_SET
(
manage_pages_in
,
in
,
opcode
,
MLX5_CMD_OP_MANAGE_PAGES
);
in
.
hdr
.
opmod
=
cpu_to_be16
(
MLX5_PAGES_TAKE
);
MLX5_SET
(
manage_pages_in
,
in
,
op_mod
,
MLX5_PAGES_TAKE
);
in
.
func_id
=
cpu_to_be16
(
func_id
);
MLX5_SET
(
manage_pages_in
,
in
,
function_id
,
func_id
);
in
.
num_entries
=
cpu_to_be32
(
npages
);
MLX5_SET
(
manage_pages_in
,
in
,
input_num_entries
,
npages
);
mlx5_core_dbg
(
dev
,
"npages %d, outlen %d
\n
"
,
npages
,
outlen
);
mlx5_core_dbg
(
dev
,
"npages %d, outlen %d
\n
"
,
npages
,
outlen
);
err
=
reclaim_pages_cmd
(
dev
,
&
in
,
sizeof
(
in
),
out
,
outlen
);
err
=
reclaim_pages_cmd
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
if
(
err
)
{
if
(
err
)
{
mlx5_core_err
(
dev
,
"failed reclaiming pages: err %d
\n
"
,
err
);
mlx5_core_err
(
dev
,
"failed reclaiming pages: err %d
\n
"
,
err
);
goto
out_free
;
goto
out_free
;
}
}
num_claimed
=
be32_to_cpu
(
out
->
num_entries
);
num_claimed
=
MLX5_GET
(
manage_pages_out
,
out
,
output_
num_entries
);
if
(
num_claimed
>
npages
)
{
if
(
num_claimed
>
npages
)
{
mlx5_core_warn
(
dev
,
"fw returned %d, driver asked %d => corruption
\n
"
,
mlx5_core_warn
(
dev
,
"fw returned %d, driver asked %d => corruption
\n
"
,
num_claimed
,
npages
);
num_claimed
,
npages
);
...
@@ -438,10 +385,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
...
@@ -438,10 +385,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
goto
out_free
;
goto
out_free
;
}
}
for
(
i
=
0
;
i
<
num_claimed
;
i
++
)
{
for
(
i
=
0
;
i
<
num_claimed
;
i
++
)
addr
=
be64_to_cpu
(
out
->
pas
[
i
]);
free_4k
(
dev
,
MLX5_GET64
(
manage_pages_out
,
out
,
pas
[
i
]));
free_4k
(
dev
,
addr
);
}
if
(
nclaimed
)
if
(
nclaimed
)
*
nclaimed
=
num_claimed
;
*
nclaimed
=
num_claimed
;
...
@@ -518,8 +464,8 @@ static int optimal_reclaimed_pages(void)
...
@@ -518,8 +464,8 @@ static int optimal_reclaimed_pages(void)
int
ret
;
int
ret
;
ret
=
(
sizeof
(
lay
->
out
)
+
MLX5_BLKS_FOR_RECLAIM_PAGES
*
sizeof
(
block
->
data
)
-
ret
=
(
sizeof
(
lay
->
out
)
+
MLX5_BLKS_FOR_RECLAIM_PAGES
*
sizeof
(
block
->
data
)
-
sizeof
(
struct
mlx5_manage_pages_outbox
))
/
MLX5_ST_SZ_BYTES
(
manage_pages_out
))
/
FIELD_SIZEOF
(
struct
mlx5_manage_pages_outbox
,
pas
[
0
]);
MLX5_FLD_SZ_BYTES
(
manage_pages_out
,
pas
[
0
]);
return
ret
;
return
ret
;
}
}
...
...
drivers/net/ethernet/mellanox/mlx5/core/pd.c
浏览文件 @
d68478da
...
@@ -36,66 +36,27 @@
...
@@ -36,66 +36,27 @@
#include <linux/mlx5/cmd.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
#include "mlx5_core.h"
struct
mlx5_alloc_pd_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_alloc_pd_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
__be32
pdn
;
u8
rsvd
[
4
];
};
struct
mlx5_dealloc_pd_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
pdn
;
u8
rsvd
[
4
];
};
struct
mlx5_dealloc_pd_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
int
mlx5_core_alloc_pd
(
struct
mlx5_core_dev
*
dev
,
u32
*
pdn
)
int
mlx5_core_alloc_pd
(
struct
mlx5_core_dev
*
dev
,
u32
*
pdn
)
{
{
struct
mlx5_alloc_pd_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
alloc_pd_out
)]
=
{
0
}
;
struct
mlx5_alloc_pd_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
alloc_pd_in
)]
=
{
0
}
;
int
err
;
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
alloc_pd_in
,
in
,
opcode
,
MLX5_CMD_OP_ALLOC_PD
);
memset
(
&
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_ALLOC_PD
);
if
(
!
err
)
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
*
pdn
=
MLX5_GET
(
alloc_pd_out
,
out
,
pd
);
if
(
err
)
return
err
;
if
(
out
.
hdr
.
status
)
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
*
pdn
=
be32_to_cpu
(
out
.
pdn
)
&
0xffffff
;
return
err
;
return
err
;
}
}
EXPORT_SYMBOL
(
mlx5_core_alloc_pd
);
EXPORT_SYMBOL
(
mlx5_core_alloc_pd
);
int
mlx5_core_dealloc_pd
(
struct
mlx5_core_dev
*
dev
,
u32
pdn
)
int
mlx5_core_dealloc_pd
(
struct
mlx5_core_dev
*
dev
,
u32
pdn
)
{
{
struct
mlx5_dealloc_pd_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
dealloc_pd_out
)]
=
{
0
};
struct
mlx5_dealloc_pd_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
dealloc_pd_in
)]
=
{
0
};
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
memset
(
&
out
,
0
,
sizeof
(
out
));
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_DEALLOC_PD
);
in
.
pdn
=
cpu_to_be32
(
pdn
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
if
(
out
.
hdr
.
status
)
MLX5_SET
(
dealloc_pd_in
,
in
,
opcode
,
MLX5_CMD_OP_DEALLOC_PD
);
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
MLX5_SET
(
dealloc_pd_in
,
in
,
pd
,
pdn
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
err
;
}
}
EXPORT_SYMBOL
(
mlx5_core_dealloc_pd
);
EXPORT_SYMBOL
(
mlx5_core_dealloc_pd
);
drivers/net/ethernet/mellanox/mlx5/core/port.c
浏览文件 @
d68478da
...
@@ -38,45 +38,42 @@
...
@@ -38,45 +38,42 @@
int
mlx5_core_access_reg
(
struct
mlx5_core_dev
*
dev
,
void
*
data_in
,
int
mlx5_core_access_reg
(
struct
mlx5_core_dev
*
dev
,
void
*
data_in
,
int
size_in
,
void
*
data_out
,
int
size_out
,
int
size_in
,
void
*
data_out
,
int
size_out
,
u16
reg_
num
,
int
arg
,
int
write
)
u16
reg_
id
,
int
arg
,
int
write
)
{
{
struct
mlx5_access_reg_mbox_in
*
in
=
NULL
;
int
outlen
=
MLX5_ST_SZ_BYTES
(
access_register_out
)
+
size_out
;
struct
mlx5_access_reg_mbox_out
*
out
=
NULL
;
int
inlen
=
MLX5_ST_SZ_BYTES
(
access_register_in
)
+
size_in
;
int
err
=
-
ENOMEM
;
int
err
=
-
ENOMEM
;
u32
*
out
=
NULL
;
u32
*
in
=
NULL
;
void
*
data
;
in
=
mlx5_vzalloc
(
sizeof
(
*
in
)
+
size_in
);
in
=
mlx5_vzalloc
(
inlen
);
if
(
!
in
)
out
=
mlx5_vzalloc
(
outlen
);
return
-
ENOMEM
;
if
(
!
in
||
!
out
)
goto
out
;
out
=
mlx5_vzalloc
(
sizeof
(
*
out
)
+
size_out
);
if
(
!
out
)
goto
ex1
;
memcpy
(
in
->
data
,
data_in
,
size_in
);
in
->
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_ACCESS_REG
);
in
->
hdr
.
opmod
=
cpu_to_be16
(
!
write
);
in
->
arg
=
cpu_to_be32
(
arg
);
in
->
register_id
=
cpu_to_be16
(
reg_num
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
*
in
)
+
size_in
,
out
,
sizeof
(
*
out
)
+
size_out
);
if
(
err
)
goto
ex2
;
if
(
out
->
hdr
.
status
)
data
=
MLX5_ADDR_OF
(
access_register_in
,
in
,
register_data
);
err
=
mlx5_cmd_status_to_err
(
&
out
->
hdr
);
memcpy
(
data
,
data_in
,
size_in
);
if
(
!
err
)
MLX5_SET
(
access_register_in
,
in
,
opcode
,
MLX5_CMD_OP_ACCESS_REG
);
memcpy
(
data_out
,
out
->
data
,
size_out
);
MLX5_SET
(
access_register_in
,
in
,
op_mod
,
!
write
);
MLX5_SET
(
access_register_in
,
in
,
argument
,
arg
);
MLX5_SET
(
access_register_in
,
in
,
register_id
,
reg_id
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
outlen
);
if
(
err
)
goto
out
;
data
=
MLX5_ADDR_OF
(
access_register_out
,
out
,
register_data
);
memcpy
(
data_out
,
data
,
size_out
);
ex2
:
out
:
kvfree
(
out
);
kvfree
(
out
);
ex1:
kvfree
(
in
);
kvfree
(
in
);
return
err
;
return
err
;
}
}
EXPORT_SYMBOL_GPL
(
mlx5_core_access_reg
);
EXPORT_SYMBOL_GPL
(
mlx5_core_access_reg
);
struct
mlx5_reg_pcap
{
struct
mlx5_reg_pcap
{
u8
rsvd0
;
u8
rsvd0
;
u8
port_num
;
u8
port_num
;
...
@@ -104,12 +101,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
...
@@ -104,12 +101,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
int
mlx5_query_port_ptys
(
struct
mlx5_core_dev
*
dev
,
u32
*
ptys
,
int
mlx5_query_port_ptys
(
struct
mlx5_core_dev
*
dev
,
u32
*
ptys
,
int
ptys_size
,
int
proto_mask
,
u8
local_port
)
int
ptys_size
,
int
proto_mask
,
u8
local_port
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
ptys_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
ptys_reg
)]
=
{
0
}
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
ptys_reg
,
in
,
local_port
,
local_port
);
MLX5_SET
(
ptys_reg
,
in
,
local_port
,
local_port
);
MLX5_SET
(
ptys_reg
,
in
,
proto_mask
,
proto_mask
);
MLX5_SET
(
ptys_reg
,
in
,
proto_mask
,
proto_mask
);
return
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
ptys
,
return
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
ptys
,
ptys_size
,
MLX5_REG_PTYS
,
0
,
0
);
ptys_size
,
MLX5_REG_PTYS
,
0
,
0
);
}
}
...
@@ -117,13 +112,11 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
...
@@ -117,13 +112,11 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
int
mlx5_set_port_beacon
(
struct
mlx5_core_dev
*
dev
,
u16
beacon_duration
)
int
mlx5_set_port_beacon
(
struct
mlx5_core_dev
*
dev
,
u16
beacon_duration
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
mlcr_reg
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
mlcr_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
mlcr_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
mlcr_reg
)];
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
mlcr_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
mlcr_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
mlcr_reg
,
in
,
beacon_duration
,
beacon_duration
);
MLX5_SET
(
mlcr_reg
,
in
,
beacon_duration
,
beacon_duration
);
return
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
return
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
),
MLX5_REG_MLCR
,
0
,
1
);
sizeof
(
out
),
MLX5_REG_MLCR
,
0
,
1
);
}
}
...
@@ -182,25 +175,39 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
...
@@ -182,25 +175,39 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
}
}
EXPORT_SYMBOL_GPL
(
mlx5_query_port_link_width_oper
);
EXPORT_SYMBOL_GPL
(
mlx5_query_port_link_width_oper
);
int
mlx5_query_port_proto_oper
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_query_port_eth_proto_oper
(
struct
mlx5_core_dev
*
dev
,
u8
*
proto_oper
,
int
proto_mask
,
u32
*
proto_oper
,
u8
local_port
)
u8
local_port
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
ptys_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
ptys_reg
)];
int
err
;
int
err
;
err
=
mlx5_query_port_ptys
(
dev
,
out
,
sizeof
(
out
),
proto_mask
,
local_port
);
err
=
mlx5_query_port_ptys
(
dev
,
out
,
sizeof
(
out
),
MLX5_PTYS_EN
,
local_port
);
if
(
err
)
if
(
err
)
return
err
;
return
err
;
if
(
proto_mask
==
MLX5_PTYS_EN
)
*
proto_oper
=
MLX5_GET
(
ptys_reg
,
out
,
eth_proto_oper
);
*
proto_oper
=
MLX5_GET
(
ptys_reg
,
out
,
eth_proto_oper
);
else
return
0
;
*
proto_oper
=
MLX5_GET
(
ptys_reg
,
out
,
ib_proto_oper
);
}
EXPORT_SYMBOL
(
mlx5_query_port_eth_proto_oper
);
int
mlx5_query_port_ib_proto_oper
(
struct
mlx5_core_dev
*
dev
,
u8
*
proto_oper
,
u8
local_port
)
{
u32
out
[
MLX5_ST_SZ_DW
(
ptys_reg
)];
int
err
;
err
=
mlx5_query_port_ptys
(
dev
,
out
,
sizeof
(
out
),
MLX5_PTYS_IB
,
local_port
);
if
(
err
)
return
err
;
*
proto_oper
=
MLX5_GET
(
ptys_reg
,
out
,
ib_proto_oper
);
return
0
;
return
0
;
}
}
EXPORT_SYMBOL
_GPL
(
mlx5_query_port
_proto_oper
);
EXPORT_SYMBOL
(
mlx5_query_port_ib
_proto_oper
);
int
mlx5_set_port_ptys
(
struct
mlx5_core_dev
*
dev
,
bool
an_disable
,
int
mlx5_set_port_ptys
(
struct
mlx5_core_dev
*
dev
,
bool
an_disable
,
u32
proto_admin
,
int
proto_mask
)
u32
proto_admin
,
int
proto_mask
)
...
@@ -246,15 +253,12 @@ EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
...
@@ -246,15 +253,12 @@ EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int
mlx5_set_port_admin_status
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_set_port_admin_status
(
struct
mlx5_core_dev
*
dev
,
enum
mlx5_port_status
status
)
enum
mlx5_port_status
status
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
paos_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
paos_reg
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
paos_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
paos_reg
)];
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
paos_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
paos_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
paos_reg
,
in
,
admin_status
,
status
);
MLX5_SET
(
paos_reg
,
in
,
admin_status
,
status
);
MLX5_SET
(
paos_reg
,
in
,
ase
,
1
);
MLX5_SET
(
paos_reg
,
in
,
ase
,
1
);
return
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
return
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
),
MLX5_REG_PAOS
,
0
,
1
);
sizeof
(
out
),
MLX5_REG_PAOS
,
0
,
1
);
}
}
...
@@ -263,19 +267,15 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
...
@@ -263,19 +267,15 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
int
mlx5_query_port_admin_status
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_query_port_admin_status
(
struct
mlx5_core_dev
*
dev
,
enum
mlx5_port_status
*
status
)
enum
mlx5_port_status
*
status
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
paos_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
paos_reg
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
paos_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
paos_reg
)];
int
err
;
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
paos_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
paos_reg
,
in
,
local_port
,
1
);
err
=
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
err
=
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
),
MLX5_REG_PAOS
,
0
,
0
);
sizeof
(
out
),
MLX5_REG_PAOS
,
0
,
0
);
if
(
err
)
if
(
err
)
return
err
;
return
err
;
*
status
=
MLX5_GET
(
paos_reg
,
out
,
admin_status
);
*
status
=
MLX5_GET
(
paos_reg
,
out
,
admin_status
);
return
0
;
return
0
;
}
}
...
@@ -284,13 +284,10 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
...
@@ -284,13 +284,10 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static
void
mlx5_query_port_mtu
(
struct
mlx5_core_dev
*
dev
,
u16
*
admin_mtu
,
static
void
mlx5_query_port_mtu
(
struct
mlx5_core_dev
*
dev
,
u16
*
admin_mtu
,
u16
*
max_mtu
,
u16
*
oper_mtu
,
u8
port
)
u16
*
max_mtu
,
u16
*
oper_mtu
,
u8
port
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
pmtu_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
pmtu_reg
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
pmtu_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
pmtu_reg
)];
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
pmtu_reg
,
in
,
local_port
,
port
);
MLX5_SET
(
pmtu_reg
,
in
,
local_port
,
port
);
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
),
MLX5_REG_PMTU
,
0
,
0
);
sizeof
(
out
),
MLX5_REG_PMTU
,
0
,
0
);
...
@@ -304,14 +301,11 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
...
@@ -304,14 +301,11 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
int
mlx5_set_port_mtu
(
struct
mlx5_core_dev
*
dev
,
u16
mtu
,
u8
port
)
int
mlx5_set_port_mtu
(
struct
mlx5_core_dev
*
dev
,
u16
mtu
,
u8
port
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
pmtu_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
pmtu_reg
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
pmtu_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
pmtu_reg
)];
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
pmtu_reg
,
in
,
admin_mtu
,
mtu
);
MLX5_SET
(
pmtu_reg
,
in
,
admin_mtu
,
mtu
);
MLX5_SET
(
pmtu_reg
,
in
,
local_port
,
port
);
MLX5_SET
(
pmtu_reg
,
in
,
local_port
,
port
);
return
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
return
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
),
MLX5_REG_PMTU
,
0
,
1
);
sizeof
(
out
),
MLX5_REG_PMTU
,
0
,
1
);
}
}
...
@@ -333,15 +327,12 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
...
@@ -333,15 +327,12 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
static
int
mlx5_query_module_num
(
struct
mlx5_core_dev
*
dev
,
int
*
module_num
)
static
int
mlx5_query_module_num
(
struct
mlx5_core_dev
*
dev
,
int
*
module_num
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
pmlp_reg
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
pmlp_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
pmlp_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
pmlp_reg
)];
int
module_mapping
;
int
module_mapping
;
int
err
;
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
pmlp_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
pmlp_reg
,
in
,
local_port
,
1
);
err
=
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
),
err
=
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
),
MLX5_REG_PMLP
,
0
,
0
);
MLX5_REG_PMLP
,
0
,
0
);
if
(
err
)
if
(
err
)
...
@@ -410,11 +401,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
...
@@ -410,11 +401,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
static
int
mlx5_query_port_pvlc
(
struct
mlx5_core_dev
*
dev
,
u32
*
pvlc
,
static
int
mlx5_query_port_pvlc
(
struct
mlx5_core_dev
*
dev
,
u32
*
pvlc
,
int
pvlc_size
,
u8
local_port
)
int
pvlc_size
,
u8
local_port
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
pvlc_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
pvlc_reg
)]
=
{
0
}
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
pvlc_reg
,
in
,
local_port
,
local_port
);
MLX5_SET
(
pvlc_reg
,
in
,
local_port
,
local_port
);
return
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
pvlc
,
return
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
pvlc
,
pvlc_size
,
MLX5_REG_PVLC
,
0
,
0
);
pvlc_size
,
MLX5_REG_PVLC
,
0
,
0
);
}
}
...
@@ -460,10 +449,9 @@ EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
...
@@ -460,10 +449,9 @@ EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
int
mlx5_set_port_pause
(
struct
mlx5_core_dev
*
dev
,
u32
rx_pause
,
u32
tx_pause
)
int
mlx5_set_port_pause
(
struct
mlx5_core_dev
*
dev
,
u32
rx_pause
,
u32
tx_pause
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
pfcc_reg
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
pfcc_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
pfcc_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
pfcc_reg
,
in
,
pptx
,
tx_pause
);
MLX5_SET
(
pfcc_reg
,
in
,
pptx
,
tx_pause
);
MLX5_SET
(
pfcc_reg
,
in
,
pprx
,
rx_pause
);
MLX5_SET
(
pfcc_reg
,
in
,
pprx
,
rx_pause
);
...
@@ -476,13 +464,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
...
@@ -476,13 +464,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int
mlx5_query_port_pause
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_query_port_pause
(
struct
mlx5_core_dev
*
dev
,
u32
*
rx_pause
,
u32
*
tx_pause
)
u32
*
rx_pause
,
u32
*
tx_pause
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
pfcc_reg
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
int
err
;
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
pfcc_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
pfcc_reg
,
in
,
local_port
,
1
);
err
=
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
err
=
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
),
MLX5_REG_PFCC
,
0
,
0
);
sizeof
(
out
),
MLX5_REG_PFCC
,
0
,
0
);
if
(
err
)
if
(
err
)
...
@@ -500,10 +486,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
...
@@ -500,10 +486,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
int
mlx5_set_port_pfc
(
struct
mlx5_core_dev
*
dev
,
u8
pfc_en_tx
,
u8
pfc_en_rx
)
int
mlx5_set_port_pfc
(
struct
mlx5_core_dev
*
dev
,
u8
pfc_en_tx
,
u8
pfc_en_rx
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
pfcc_reg
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
pfcc_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
pfcc_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
pfcc_reg
,
in
,
pfctx
,
pfc_en_tx
);
MLX5_SET
(
pfcc_reg
,
in
,
pfctx
,
pfc_en_tx
);
MLX5_SET
(
pfcc_reg
,
in
,
pfcrx
,
pfc_en_rx
);
MLX5_SET
(
pfcc_reg
,
in
,
pfcrx
,
pfc_en_rx
);
...
@@ -517,13 +502,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
...
@@ -517,13 +502,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int
mlx5_query_port_pfc
(
struct
mlx5_core_dev
*
dev
,
u8
*
pfc_en_tx
,
u8
*
pfc_en_rx
)
int
mlx5_query_port_pfc
(
struct
mlx5_core_dev
*
dev
,
u8
*
pfc_en_tx
,
u8
*
pfc_en_rx
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
pfcc_reg
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
pfcc_reg
)];
int
err
;
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
pfcc_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
pfcc_reg
,
in
,
local_port
,
1
);
err
=
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
err
=
mlx5_core_access_reg
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
),
MLX5_REG_PFCC
,
0
,
0
);
sizeof
(
out
),
MLX5_REG_PFCC
,
0
,
0
);
if
(
err
)
if
(
err
)
...
@@ -567,12 +550,11 @@ int mlx5_max_tc(struct mlx5_core_dev *mdev)
...
@@ -567,12 +550,11 @@ int mlx5_max_tc(struct mlx5_core_dev *mdev)
int
mlx5_set_port_prio_tc
(
struct
mlx5_core_dev
*
mdev
,
u8
*
prio_tc
)
int
mlx5_set_port_prio_tc
(
struct
mlx5_core_dev
*
mdev
,
u8
*
prio_tc
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
qtct_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
qtct_reg
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
qtct_reg
)];
u32
out
[
MLX5_ST_SZ_DW
(
qtct_reg
)];
int
err
;
int
err
;
int
i
;
int
i
;
memset
(
in
,
0
,
sizeof
(
in
));
for
(
i
=
0
;
i
<
8
;
i
++
)
{
for
(
i
=
0
;
i
<
8
;
i
++
)
{
if
(
prio_tc
[
i
]
>
mlx5_max_tc
(
mdev
))
if
(
prio_tc
[
i
]
>
mlx5_max_tc
(
mdev
))
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -617,11 +599,9 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
...
@@ -617,11 +599,9 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
int
mlx5_set_port_tc_group
(
struct
mlx5_core_dev
*
mdev
,
u8
*
tc_group
)
int
mlx5_set_port_tc_group
(
struct
mlx5_core_dev
*
mdev
,
u8
*
tc_group
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
qetc_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
qetc_reg
)]
=
{
0
}
;
int
i
;
int
i
;
memset
(
in
,
0
,
sizeof
(
in
));
for
(
i
=
0
;
i
<=
mlx5_max_tc
(
mdev
);
i
++
)
{
for
(
i
=
0
;
i
<=
mlx5_max_tc
(
mdev
);
i
++
)
{
MLX5_SET
(
qetc_reg
,
in
,
tc_configuration
[
i
].
g
,
1
);
MLX5_SET
(
qetc_reg
,
in
,
tc_configuration
[
i
].
g
,
1
);
MLX5_SET
(
qetc_reg
,
in
,
tc_configuration
[
i
].
group
,
tc_group
[
i
]);
MLX5_SET
(
qetc_reg
,
in
,
tc_configuration
[
i
].
group
,
tc_group
[
i
]);
...
@@ -633,11 +613,9 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
...
@@ -633,11 +613,9 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int
mlx5_set_port_tc_bw_alloc
(
struct
mlx5_core_dev
*
mdev
,
u8
*
tc_bw
)
int
mlx5_set_port_tc_bw_alloc
(
struct
mlx5_core_dev
*
mdev
,
u8
*
tc_bw
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
qetc_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
qetc_reg
)]
=
{
0
}
;
int
i
;
int
i
;
memset
(
in
,
0
,
sizeof
(
in
));
for
(
i
=
0
;
i
<=
mlx5_max_tc
(
mdev
);
i
++
)
{
for
(
i
=
0
;
i
<=
mlx5_max_tc
(
mdev
);
i
++
)
{
MLX5_SET
(
qetc_reg
,
in
,
tc_configuration
[
i
].
b
,
1
);
MLX5_SET
(
qetc_reg
,
in
,
tc_configuration
[
i
].
b
,
1
);
MLX5_SET
(
qetc_reg
,
in
,
tc_configuration
[
i
].
bw_allocation
,
tc_bw
[
i
]);
MLX5_SET
(
qetc_reg
,
in
,
tc_configuration
[
i
].
bw_allocation
,
tc_bw
[
i
]);
...
@@ -651,12 +629,10 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
...
@@ -651,12 +629,10 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8
*
max_bw_value
,
u8
*
max_bw_value
,
u8
*
max_bw_units
)
u8
*
max_bw_units
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
qetc_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
qetc_reg
)]
=
{
0
}
;
void
*
ets_tcn_conf
;
void
*
ets_tcn_conf
;
int
i
;
int
i
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
qetc_reg
,
in
,
port_number
,
1
);
MLX5_SET
(
qetc_reg
,
in
,
port_number
,
1
);
for
(
i
=
0
;
i
<=
mlx5_max_tc
(
mdev
);
i
++
)
{
for
(
i
=
0
;
i
<=
mlx5_max_tc
(
mdev
);
i
++
)
{
...
@@ -701,35 +677,24 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
...
@@ -701,35 +677,24 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int
mlx5_set_port_wol
(
struct
mlx5_core_dev
*
mdev
,
u8
wol_mode
)
int
mlx5_set_port_wol
(
struct
mlx5_core_dev
*
mdev
,
u8
wol_mode
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
set_wol_rol_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
set_wol_rol_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
set_wol_rol_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
set_wol_rol_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
set_wol_rol_in
,
in
,
opcode
,
MLX5_CMD_OP_SET_WOL_ROL
);
MLX5_SET
(
set_wol_rol_in
,
in
,
opcode
,
MLX5_CMD_OP_SET_WOL_ROL
);
MLX5_SET
(
set_wol_rol_in
,
in
,
wol_mode_valid
,
1
);
MLX5_SET
(
set_wol_rol_in
,
in
,
wol_mode_valid
,
1
);
MLX5_SET
(
set_wol_rol_in
,
in
,
wol_mode
,
wol_mode
);
MLX5_SET
(
set_wol_rol_in
,
in
,
wol_mode
,
wol_mode
);
return
mlx5_cmd_exec
(
mdev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
mdev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
EXPORT_SYMBOL_GPL
(
mlx5_set_port_wol
);
EXPORT_SYMBOL_GPL
(
mlx5_set_port_wol
);
int
mlx5_query_port_wol
(
struct
mlx5_core_dev
*
mdev
,
u8
*
wol_mode
)
int
mlx5_query_port_wol
(
struct
mlx5_core_dev
*
mdev
,
u8
*
wol_mode
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
query_wol_rol_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_wol_rol_in
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
query_wol_rol_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
query_wol_rol_out
)]
=
{
0
}
;
int
err
;
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
query_wol_rol_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_WOL_ROL
);
MLX5_SET
(
query_wol_rol_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_WOL_ROL
);
err
=
mlx5_cmd_exec
(
mdev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec_check_status
(
mdev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
wol_mode
=
MLX5_GET
(
query_wol_rol_out
,
out
,
wol_mode
);
*
wol_mode
=
MLX5_GET
(
query_wol_rol_out
,
out
,
wol_mode
);
...
@@ -740,11 +705,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
...
@@ -740,11 +705,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
static
int
mlx5_query_ports_check
(
struct
mlx5_core_dev
*
mdev
,
u32
*
out
,
static
int
mlx5_query_ports_check
(
struct
mlx5_core_dev
*
mdev
,
u32
*
out
,
int
outlen
)
int
outlen
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
pcmr_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
pcmr_reg
)]
=
{
0
}
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
pcmr_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
pcmr_reg
,
in
,
local_port
,
1
);
return
mlx5_core_access_reg
(
mdev
,
in
,
sizeof
(
in
),
out
,
return
mlx5_core_access_reg
(
mdev
,
in
,
sizeof
(
in
),
out
,
outlen
,
MLX5_REG_PCMR
,
0
,
0
);
outlen
,
MLX5_REG_PCMR
,
0
,
0
);
}
}
...
@@ -759,12 +722,10 @@ static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
...
@@ -759,12 +722,10 @@ static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
int
mlx5_set_port_fcs
(
struct
mlx5_core_dev
*
mdev
,
u8
enable
)
int
mlx5_set_port_fcs
(
struct
mlx5_core_dev
*
mdev
,
u8
enable
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
pcmr_reg
)];
u32
in
[
MLX5_ST_SZ_DW
(
pcmr_reg
)]
=
{
0
}
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
pcmr_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
pcmr_reg
,
in
,
local_port
,
1
);
MLX5_SET
(
pcmr_reg
,
in
,
fcs_chk
,
enable
);
MLX5_SET
(
pcmr_reg
,
in
,
fcs_chk
,
enable
);
return
mlx5_set_ports_check
(
mdev
,
in
,
sizeof
(
in
));
return
mlx5_set_ports_check
(
mdev
,
in
,
sizeof
(
in
));
}
}
...
...
drivers/net/ethernet/mellanox/mlx5/core/qp.c
浏览文件 @
d68478da
...
@@ -271,30 +271,20 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
...
@@ -271,30 +271,20 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
int
mlx5_core_create_qp
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_core_create_qp
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_qp
*
qp
,
struct
mlx5_core_qp
*
qp
,
struct
mlx5_create_qp_mbox_in
*
in
,
u32
*
in
,
int
inlen
)
int
inlen
)
{
{
struct
mlx5_create_qp_mbox_out
out
;
u32
out
[
MLX5_ST_SZ_DW
(
create_qp_out
)]
=
{
0
}
;
struct
mlx5_destroy_qp_mbox_in
din
;
u32
dout
[
MLX5_ST_SZ_DW
(
destroy_qp_out
)]
;
struct
mlx5_destroy_qp_mbox_out
dout
;
u32
din
[
MLX5_ST_SZ_DW
(
destroy_qp_in
)]
;
int
err
;
int
err
;
memset
(
&
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
create_qp_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_QP
);
in
->
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_CREATE_QP
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
&
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
if
(
err
)
{
if
(
err
)
mlx5_core_warn
(
dev
,
"ret %d
\n
"
,
err
);
return
err
;
return
err
;
}
if
(
out
.
hdr
.
status
)
{
mlx5_core_warn
(
dev
,
"current num of QPs 0x%x
\n
"
,
atomic_read
(
&
dev
->
num_qps
));
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
}
qp
->
qpn
=
be32_to_cpu
(
out
.
qpn
)
&
0xffffff
;
qp
->
qpn
=
MLX5_GET
(
create_qp_out
,
out
,
qpn
)
;
mlx5_core_dbg
(
dev
,
"qpn = 0x%x
\n
"
,
qp
->
qpn
);
mlx5_core_dbg
(
dev
,
"qpn = 0x%x
\n
"
,
qp
->
qpn
);
err
=
create_qprqsq_common
(
dev
,
qp
,
MLX5_RES_QP
);
err
=
create_qprqsq_common
(
dev
,
qp
,
MLX5_RES_QP
);
...
@@ -311,12 +301,11 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
...
@@ -311,12 +301,11 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
return
0
;
return
0
;
err_cmd:
err_cmd:
memset
(
&
din
,
0
,
sizeof
(
din
));
memset
(
din
,
0
,
sizeof
(
din
));
memset
(
&
dout
,
0
,
sizeof
(
dout
));
memset
(
dout
,
0
,
sizeof
(
dout
));
din
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_DESTROY_QP
);
MLX5_SET
(
destroy_qp_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_QP
);
din
.
qpn
=
cpu_to_be32
(
qp
->
qpn
);
MLX5_SET
(
destroy_qp_in
,
in
,
qpn
,
qp
->
qpn
);
mlx5_cmd_exec
(
dev
,
&
din
,
sizeof
(
din
),
&
out
,
sizeof
(
dout
));
mlx5_cmd_exec
(
dev
,
din
,
sizeof
(
din
),
dout
,
sizeof
(
dout
));
return
err
;
return
err
;
}
}
EXPORT_SYMBOL_GPL
(
mlx5_core_create_qp
);
EXPORT_SYMBOL_GPL
(
mlx5_core_create_qp
);
...
@@ -324,45 +313,145 @@ EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
...
@@ -324,45 +313,145 @@ EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
int
mlx5_core_destroy_qp
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_core_destroy_qp
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_qp
*
qp
)
struct
mlx5_core_qp
*
qp
)
{
{
struct
mlx5_destroy_qp_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
destroy_qp_out
)]
=
{
0
}
;
struct
mlx5_destroy_qp_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
destroy_qp_in
)]
=
{
0
}
;
int
err
;
int
err
;
mlx5_debug_qp_remove
(
dev
,
qp
);
mlx5_debug_qp_remove
(
dev
,
qp
);
destroy_qprqsq_common
(
dev
,
qp
);
destroy_qprqsq_common
(
dev
,
qp
);
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
destroy_qp_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_QP
);
memset
(
&
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
destroy_qp_in
,
in
,
qpn
,
qp
->
qpn
);
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_DESTROY_QP
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
in
.
qpn
=
cpu_to_be32
(
qp
->
qpn
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
if
(
err
)
return
err
;
return
err
;
if
(
out
.
hdr
.
status
)
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
atomic_dec
(
&
dev
->
num_qps
);
atomic_dec
(
&
dev
->
num_qps
);
return
0
;
return
0
;
}
}
EXPORT_SYMBOL_GPL
(
mlx5_core_destroy_qp
);
EXPORT_SYMBOL_GPL
(
mlx5_core_destroy_qp
);
int
mlx5_core_qp_modify
(
struct
mlx5_core_dev
*
dev
,
u16
operation
,
struct
mbox_info
{
struct
mlx5_modify_qp_mbox_in
*
in
,
int
sqd_event
,
u32
*
in
;
u32
*
out
;
int
inlen
;
int
outlen
;
};
static
int
mbox_alloc
(
struct
mbox_info
*
mbox
,
int
inlen
,
int
outlen
)
{
mbox
->
inlen
=
inlen
;
mbox
->
outlen
=
outlen
;
mbox
->
in
=
kzalloc
(
mbox
->
inlen
,
GFP_KERNEL
);
mbox
->
out
=
kzalloc
(
mbox
->
outlen
,
GFP_KERNEL
);
if
(
!
mbox
->
in
||
!
mbox
->
out
)
{
kfree
(
mbox
->
in
);
kfree
(
mbox
->
out
);
return
-
ENOMEM
;
}
return
0
;
}
static
void
mbox_free
(
struct
mbox_info
*
mbox
)
{
kfree
(
mbox
->
in
);
kfree
(
mbox
->
out
);
}
static
int
modify_qp_mbox_alloc
(
struct
mlx5_core_dev
*
dev
,
u16
opcode
,
int
qpn
,
u32
opt_param_mask
,
void
*
qpc
,
struct
mbox_info
*
mbox
)
{
mbox
->
out
=
NULL
;
mbox
->
in
=
NULL
;
#define MBOX_ALLOC(mbox, typ) \
mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
MLX5_SET(typ##_in, in, opcode, _opcode); \
MLX5_SET(typ##_in, in, qpn, _qpn)
#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
switch
(
opcode
)
{
/* 2RST & 2ERR */
case
MLX5_CMD_OP_2RST_QP
:
if
(
MBOX_ALLOC
(
mbox
,
qp_2rst
))
return
-
ENOMEM
;
MOD_QP_IN_SET
(
qp_2rst
,
mbox
->
in
,
opcode
,
qpn
);
break
;
case
MLX5_CMD_OP_2ERR_QP
:
if
(
MBOX_ALLOC
(
mbox
,
qp_2err
))
return
-
ENOMEM
;
MOD_QP_IN_SET
(
qp_2err
,
mbox
->
in
,
opcode
,
qpn
);
break
;
/* MODIFY with QPC */
case
MLX5_CMD_OP_RST2INIT_QP
:
if
(
MBOX_ALLOC
(
mbox
,
rst2init_qp
))
return
-
ENOMEM
;
MOD_QP_IN_SET_QPC
(
rst2init_qp
,
mbox
->
in
,
opcode
,
qpn
,
opt_param_mask
,
qpc
);
break
;
case
MLX5_CMD_OP_INIT2RTR_QP
:
if
(
MBOX_ALLOC
(
mbox
,
init2rtr_qp
))
return
-
ENOMEM
;
MOD_QP_IN_SET_QPC
(
init2rtr_qp
,
mbox
->
in
,
opcode
,
qpn
,
opt_param_mask
,
qpc
);
break
;
case
MLX5_CMD_OP_RTR2RTS_QP
:
if
(
MBOX_ALLOC
(
mbox
,
rtr2rts_qp
))
return
-
ENOMEM
;
MOD_QP_IN_SET_QPC
(
rtr2rts_qp
,
mbox
->
in
,
opcode
,
qpn
,
opt_param_mask
,
qpc
);
break
;
case
MLX5_CMD_OP_RTS2RTS_QP
:
if
(
MBOX_ALLOC
(
mbox
,
rts2rts_qp
))
return
-
ENOMEM
;
MOD_QP_IN_SET_QPC
(
rts2rts_qp
,
mbox
->
in
,
opcode
,
qpn
,
opt_param_mask
,
qpc
);
break
;
case
MLX5_CMD_OP_SQERR2RTS_QP
:
if
(
MBOX_ALLOC
(
mbox
,
sqerr2rts_qp
))
return
-
ENOMEM
;
MOD_QP_IN_SET_QPC
(
sqerr2rts_qp
,
mbox
->
in
,
opcode
,
qpn
,
opt_param_mask
,
qpc
);
break
;
case
MLX5_CMD_OP_INIT2INIT_QP
:
if
(
MBOX_ALLOC
(
mbox
,
init2init_qp
))
return
-
ENOMEM
;
MOD_QP_IN_SET_QPC
(
init2init_qp
,
mbox
->
in
,
opcode
,
qpn
,
opt_param_mask
,
qpc
);
break
;
default:
mlx5_core_err
(
dev
,
"Unknown transition for modify QP: OP(0x%x) QPN(0x%x)
\n
"
,
opcode
,
qpn
);
return
-
EINVAL
;
}
return
0
;
}
int
mlx5_core_qp_modify
(
struct
mlx5_core_dev
*
dev
,
u16
opcode
,
u32
opt_param_mask
,
void
*
qpc
,
struct
mlx5_core_qp
*
qp
)
struct
mlx5_core_qp
*
qp
)
{
{
struct
m
lx5_modify_qp_mbox_out
out
;
struct
m
box_info
mbox
;
int
err
=
0
;
int
err
;
memset
(
&
out
,
0
,
sizeof
(
out
));
err
=
modify_qp_mbox_alloc
(
dev
,
opcode
,
qp
->
qpn
,
in
->
hdr
.
opcode
=
cpu_to_be16
(
operation
);
opt_param_mask
,
qpc
,
&
mbox
);
in
->
qpn
=
cpu_to_be32
(
qp
->
qpn
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
*
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
if
(
err
)
return
err
;
return
err
;
return
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
err
=
mlx5_cmd_exec
(
dev
,
mbox
.
in
,
mbox
.
inlen
,
mbox
.
out
,
mbox
.
outlen
);
mbox_free
(
&
mbox
);
return
err
;
}
}
EXPORT_SYMBOL_GPL
(
mlx5_core_qp_modify
);
EXPORT_SYMBOL_GPL
(
mlx5_core_qp_modify
);
...
@@ -382,66 +471,38 @@ void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
...
@@ -382,66 +471,38 @@ void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
}
}
int
mlx5_core_qp_query
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_qp
*
qp
,
int
mlx5_core_qp_query
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_qp
*
qp
,
struct
mlx5_query_qp_mbox_out
*
out
,
int
outlen
)
u32
*
out
,
int
outlen
)
{
{
struct
mlx5_query_qp_mbox_in
in
;
u32
in
[
MLX5_ST_SZ_DW
(
query_qp_in
)]
=
{
0
};
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
outlen
);
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_QUERY_QP
);
in
.
qpn
=
cpu_to_be32
(
qp
->
qpn
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
out
,
outlen
);
if
(
err
)
return
err
;
if
(
out
->
hdr
.
status
)
MLX5_SET
(
query_qp_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_QP
);
return
mlx5_cmd_status_to_err
(
&
out
->
hdr
);
MLX5_SET
(
query_qp_in
,
in
,
qpn
,
qp
->
qpn
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
return
err
;
}
}
EXPORT_SYMBOL_GPL
(
mlx5_core_qp_query
);
EXPORT_SYMBOL_GPL
(
mlx5_core_qp_query
);
int
mlx5_core_xrcd_alloc
(
struct
mlx5_core_dev
*
dev
,
u32
*
xrcdn
)
int
mlx5_core_xrcd_alloc
(
struct
mlx5_core_dev
*
dev
,
u32
*
xrcdn
)
{
{
struct
mlx5_alloc_xrcd_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
alloc_xrcd_out
)]
=
{
0
}
;
struct
mlx5_alloc_xrcd_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
alloc_xrcd_in
)]
=
{
0
}
;
int
err
;
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
alloc_xrcd_in
,
in
,
opcode
,
MLX5_CMD_OP_ALLOC_XRCD
);
memset
(
&
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_ALLOC_XRCD
);
if
(
!
err
)
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
*
xrcdn
=
MLX5_GET
(
alloc_xrcd_out
,
out
,
xrcd
);
if
(
err
)
return
err
;
if
(
out
.
hdr
.
status
)
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
else
*
xrcdn
=
be32_to_cpu
(
out
.
xrcdn
)
&
0xffffff
;
return
err
;
return
err
;
}
}
EXPORT_SYMBOL_GPL
(
mlx5_core_xrcd_alloc
);
EXPORT_SYMBOL_GPL
(
mlx5_core_xrcd_alloc
);
int
mlx5_core_xrcd_dealloc
(
struct
mlx5_core_dev
*
dev
,
u32
xrcdn
)
int
mlx5_core_xrcd_dealloc
(
struct
mlx5_core_dev
*
dev
,
u32
xrcdn
)
{
{
struct
mlx5_dealloc_xrcd_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
dealloc_xrcd_out
)]
=
{
0
};
struct
mlx5_dealloc_xrcd_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
dealloc_xrcd_in
)]
=
{
0
};
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
dealloc_xrcd_in
,
in
,
opcode
,
MLX5_CMD_OP_DEALLOC_XRCD
);
memset
(
&
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
dealloc_xrcd_in
,
in
,
xrcd
,
xrcdn
);
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_DEALLOC_XRCD
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
in
.
xrcdn
=
cpu_to_be32
(
xrcdn
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
if
(
out
.
hdr
.
status
)
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
return
err
;
}
}
EXPORT_SYMBOL_GPL
(
mlx5_core_xrcd_dealloc
);
EXPORT_SYMBOL_GPL
(
mlx5_core_xrcd_dealloc
);
...
@@ -449,28 +510,23 @@ EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
...
@@ -449,28 +510,23 @@ EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
int
mlx5_core_page_fault_resume
(
struct
mlx5_core_dev
*
dev
,
u32
qpn
,
int
mlx5_core_page_fault_resume
(
struct
mlx5_core_dev
*
dev
,
u32
qpn
,
u8
flags
,
int
error
)
u8
flags
,
int
error
)
{
{
struct
mlx5_page_fault_resume_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
page_fault_resume_out
)]
=
{
0
};
struct
mlx5_page_fault_resume_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
page_fault_resume_in
)]
=
{
0
};
int
err
;
MLX5_SET
(
page_fault_resume_in
,
in
,
opcode
,
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_CMD_OP_PAGE_FAULT_RESUME
);
memset
(
&
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
page_fault_resume_in
,
in
,
qpn
,
qpn
);
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_PAGE_FAULT_RESUME
);
in
.
hdr
.
opmod
=
0
;
if
(
flags
&
MLX5_PAGE_FAULT_RESUME_REQUESTOR
)
flags
&=
(
MLX5_PAGE_FAULT_RESUME_REQUESTOR
|
MLX5_SET
(
page_fault_resume_in
,
in
,
req_res
,
1
);
MLX5_PAGE_FAULT_RESUME_WRITE
|
if
(
flags
&
MLX5_PAGE_FAULT_RESUME_WRITE
)
MLX5_PAGE_FAULT_RESUME_RDMA
);
MLX5_SET
(
page_fault_resume_in
,
in
,
read_write
,
1
);
flags
|=
(
error
?
MLX5_PAGE_FAULT_RESUME_ERROR
:
0
);
if
(
flags
&
MLX5_PAGE_FAULT_RESUME_RDMA
)
in
.
flags_qpn
=
cpu_to_be32
((
qpn
&
MLX5_QPN_MASK
)
|
MLX5_SET
(
page_fault_resume_in
,
in
,
rdma
,
1
);
(
flags
<<
MLX5_QPN_BITS
));
if
(
error
)
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
MLX5_SET
(
page_fault_resume_in
,
in
,
error
,
1
);
if
(
err
)
return
err
;
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
out
.
hdr
.
status
)
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
return
err
;
}
}
EXPORT_SYMBOL_GPL
(
mlx5_core_page_fault_resume
);
EXPORT_SYMBOL_GPL
(
mlx5_core_page_fault_resume
);
#endif
#endif
...
@@ -541,15 +597,12 @@ EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
...
@@ -541,15 +597,12 @@ EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
int
mlx5_core_alloc_q_counter
(
struct
mlx5_core_dev
*
dev
,
u16
*
counter_id
)
int
mlx5_core_alloc_q_counter
(
struct
mlx5_core_dev
*
dev
,
u16
*
counter_id
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
alloc_q_counter_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
alloc_q_counter_in
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
alloc_q_counter_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
alloc_q_counter_out
)]
=
{
0
}
;
int
err
;
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
alloc_q_counter_in
,
in
,
opcode
,
MLX5_CMD_OP_ALLOC_Q_COUNTER
);
MLX5_SET
(
alloc_q_counter_in
,
in
,
opcode
,
MLX5_CMD_OP_ALLOC_Q_COUNTER
);
err
=
mlx5_cmd_exec
_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
counter_id
=
MLX5_GET
(
alloc_q_counter_out
,
out
,
*
counter_id
=
MLX5_GET
(
alloc_q_counter_out
,
out
,
counter_set_id
);
counter_set_id
);
...
@@ -559,31 +612,25 @@ EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
...
@@ -559,31 +612,25 @@ EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
int
mlx5_core_dealloc_q_counter
(
struct
mlx5_core_dev
*
dev
,
u16
counter_id
)
int
mlx5_core_dealloc_q_counter
(
struct
mlx5_core_dev
*
dev
,
u16
counter_id
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
dealloc_q_counter_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
dealloc_q_counter_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
dealloc_q_counter_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
dealloc_q_counter_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
dealloc_q_counter_in
,
in
,
opcode
,
MLX5_SET
(
dealloc_q_counter_in
,
in
,
opcode
,
MLX5_CMD_OP_DEALLOC_Q_COUNTER
);
MLX5_CMD_OP_DEALLOC_Q_COUNTER
);
MLX5_SET
(
dealloc_q_counter_in
,
in
,
counter_set_id
,
counter_id
);
MLX5_SET
(
dealloc_q_counter_in
,
in
,
counter_set_id
,
counter_id
);
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
sizeof
(
out
));
}
}
EXPORT_SYMBOL_GPL
(
mlx5_core_dealloc_q_counter
);
EXPORT_SYMBOL_GPL
(
mlx5_core_dealloc_q_counter
);
int
mlx5_core_query_q_counter
(
struct
mlx5_core_dev
*
dev
,
u16
counter_id
,
int
mlx5_core_query_q_counter
(
struct
mlx5_core_dev
*
dev
,
u16
counter_id
,
int
reset
,
void
*
out
,
int
out_size
)
int
reset
,
void
*
out
,
int
out_size
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
query_q_counter_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_q_counter_in
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
query_q_counter_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_Q_COUNTER
);
MLX5_SET
(
query_q_counter_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_Q_COUNTER
);
MLX5_SET
(
query_q_counter_in
,
in
,
clear
,
reset
);
MLX5_SET
(
query_q_counter_in
,
in
,
clear
,
reset
);
MLX5_SET
(
query_q_counter_in
,
in
,
counter_set_id
,
counter_id
);
MLX5_SET
(
query_q_counter_in
,
in
,
counter_set_id
,
counter_id
);
return
mlx5_cmd_exec
_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
out_size
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
out_size
);
}
}
EXPORT_SYMBOL_GPL
(
mlx5_core_query_q_counter
);
EXPORT_SYMBOL_GPL
(
mlx5_core_query_q_counter
);
...
...
drivers/net/ethernet/mellanox/mlx5/core/rl.c
浏览文件 @
d68478da
...
@@ -63,19 +63,14 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
...
@@ -63,19 +63,14 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
static
int
mlx5_set_rate_limit_cmd
(
struct
mlx5_core_dev
*
dev
,
static
int
mlx5_set_rate_limit_cmd
(
struct
mlx5_core_dev
*
dev
,
u32
rate
,
u16
index
)
u32
rate
,
u16
index
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
set_rate_limit_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
set_rate_limit_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
set_rate_limit_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
set_rate_limit_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
set_rate_limit_in
,
in
,
opcode
,
MLX5_SET
(
set_rate_limit_in
,
in
,
opcode
,
MLX5_CMD_OP_SET_RATE_LIMIT
);
MLX5_CMD_OP_SET_RATE_LIMIT
);
MLX5_SET
(
set_rate_limit_in
,
in
,
rate_limit_index
,
index
);
MLX5_SET
(
set_rate_limit_in
,
in
,
rate_limit_index
,
index
);
MLX5_SET
(
set_rate_limit_in
,
in
,
rate_limit
,
rate
);
MLX5_SET
(
set_rate_limit_in
,
in
,
rate_limit
,
rate
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
bool
mlx5_rl_is_in_range
(
struct
mlx5_core_dev
*
dev
,
u32
rate
)
bool
mlx5_rl_is_in_range
(
struct
mlx5_core_dev
*
dev
,
u32
rate
)
...
...
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
浏览文件 @
d68478da
...
@@ -37,6 +37,13 @@
...
@@ -37,6 +37,13 @@
#include "eswitch.h"
#include "eswitch.h"
#endif
#endif
bool
mlx5_sriov_is_enabled
(
struct
mlx5_core_dev
*
dev
)
{
struct
mlx5_core_sriov
*
sriov
=
&
dev
->
priv
.
sriov
;
return
!!
sriov
->
num_vfs
;
}
static
void
enable_vfs
(
struct
mlx5_core_dev
*
dev
,
int
num_vfs
)
static
void
enable_vfs
(
struct
mlx5_core_dev
*
dev
,
int
num_vfs
)
{
{
struct
mlx5_core_sriov
*
sriov
=
&
dev
->
priv
.
sriov
;
struct
mlx5_core_sriov
*
sriov
=
&
dev
->
priv
.
sriov
;
...
@@ -144,6 +151,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
...
@@ -144,6 +151,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
if
(
!
mlx5_core_is_pf
(
dev
))
if
(
!
mlx5_core_is_pf
(
dev
))
return
-
EPERM
;
return
-
EPERM
;
if
(
num_vfs
&&
mlx5_lag_is_active
(
dev
))
{
mlx5_core_warn
(
dev
,
"can't turn sriov on while LAG is active"
);
return
-
EINVAL
;
}
mlx5_core_cleanup_vfs
(
dev
);
mlx5_core_cleanup_vfs
(
dev
);
if
(
!
num_vfs
)
{
if
(
!
num_vfs
)
{
...
@@ -155,13 +167,13 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
...
@@ -155,13 +167,13 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
if
(
!
pci_vfs_assigned
(
pdev
))
if
(
!
pci_vfs_assigned
(
pdev
))
pci_disable_sriov
(
pdev
);
pci_disable_sriov
(
pdev
);
else
else
pr_info
(
"unloading PF driver while leaving orphan VFs
\n
"
);
mlx5_core_info
(
dev
,
"unloading PF driver while leaving orphan VFs
\n
"
);
return
0
;
return
0
;
}
}
err
=
mlx5_core_sriov_enable
(
pdev
,
num_vfs
);
err
=
mlx5_core_sriov_enable
(
pdev
,
num_vfs
);
if
(
err
)
{
if
(
err
)
{
dev_warn
(
&
pdev
->
dev
,
"mlx5_core_sriov_enable failed %d
\n
"
,
err
);
mlx5_core_warn
(
dev
,
"mlx5_core_sriov_enable failed %d
\n
"
,
err
);
return
err
;
return
err
;
}
}
...
@@ -180,7 +192,8 @@ static int sync_required(struct pci_dev *pdev)
...
@@ -180,7 +192,8 @@ static int sync_required(struct pci_dev *pdev)
int
cur_vfs
=
pci_num_vf
(
pdev
);
int
cur_vfs
=
pci_num_vf
(
pdev
);
if
(
cur_vfs
!=
sriov
->
num_vfs
)
{
if
(
cur_vfs
!=
sriov
->
num_vfs
)
{
pr_info
(
"current VFs %d, registered %d - sync needed
\n
"
,
cur_vfs
,
sriov
->
num_vfs
);
mlx5_core_warn
(
dev
,
"current VFs %d, registered %d - sync needed
\n
"
,
cur_vfs
,
sriov
->
num_vfs
);
return
1
;
return
1
;
}
}
...
...
drivers/net/ethernet/mellanox/mlx5/core/srq.c
浏览文件 @
d68478da
...
@@ -175,8 +175,8 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
...
@@ -175,8 +175,8 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET
(
create_srq_in
,
create_in
,
opcode
,
MLX5_SET
(
create_srq_in
,
create_in
,
opcode
,
MLX5_CMD_OP_CREATE_SRQ
);
MLX5_CMD_OP_CREATE_SRQ
);
err
=
mlx5_cmd_exec
_check_status
(
dev
,
create_in
,
inlen
,
create_out
,
err
=
mlx5_cmd_exec
(
dev
,
create_in
,
inlen
,
create_out
,
sizeof
(
create_out
));
sizeof
(
create_out
));
kvfree
(
create_in
);
kvfree
(
create_in
);
if
(
!
err
)
if
(
!
err
)
srq
->
srqn
=
MLX5_GET
(
create_srq_out
,
create_out
,
srqn
);
srq
->
srqn
=
MLX5_GET
(
create_srq_out
,
create_out
,
srqn
);
...
@@ -194,8 +194,8 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
...
@@ -194,8 +194,8 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_DESTROY_SRQ
);
MLX5_CMD_OP_DESTROY_SRQ
);
MLX5_SET
(
destroy_srq_in
,
srq_in
,
srqn
,
srq
->
srqn
);
MLX5_SET
(
destroy_srq_in
,
srq_in
,
srqn
,
srq
->
srqn
);
return
mlx5_cmd_exec
_check_status
(
dev
,
srq_in
,
sizeof
(
srq_in
),
return
mlx5_cmd_exec
(
dev
,
srq_in
,
sizeof
(
srq_in
),
srq_out
,
sizeof
(
srq_out
));
srq_out
,
sizeof
(
srq_out
));
}
}
static
int
arm_srq_cmd
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_srq
*
srq
,
static
int
arm_srq_cmd
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_srq
*
srq
,
...
@@ -209,8 +209,8 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
...
@@ -209,8 +209,8 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET
(
arm_xrc_srq_in
,
srq_in
,
xrc_srqn
,
srq
->
srqn
);
MLX5_SET
(
arm_xrc_srq_in
,
srq_in
,
xrc_srqn
,
srq
->
srqn
);
MLX5_SET
(
arm_xrc_srq_in
,
srq_in
,
lwm
,
lwm
);
MLX5_SET
(
arm_xrc_srq_in
,
srq_in
,
lwm
,
lwm
);
return
mlx5_cmd_exec
_check_status
(
dev
,
srq_in
,
sizeof
(
srq_in
),
return
mlx5_cmd_exec
(
dev
,
srq_in
,
sizeof
(
srq_in
),
srq_out
,
sizeof
(
srq_out
));
srq_out
,
sizeof
(
srq_out
));
}
}
static
int
query_srq_cmd
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_srq
*
srq
,
static
int
query_srq_cmd
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_srq
*
srq
,
...
@@ -228,9 +228,8 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
...
@@ -228,9 +228,8 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET
(
query_srq_in
,
srq_in
,
opcode
,
MLX5_SET
(
query_srq_in
,
srq_in
,
opcode
,
MLX5_CMD_OP_QUERY_SRQ
);
MLX5_CMD_OP_QUERY_SRQ
);
MLX5_SET
(
query_srq_in
,
srq_in
,
srqn
,
srq
->
srqn
);
MLX5_SET
(
query_srq_in
,
srq_in
,
srqn
,
srq
->
srqn
);
err
=
mlx5_cmd_exec_check_status
(
dev
,
srq_in
,
sizeof
(
srq_in
),
err
=
mlx5_cmd_exec
(
dev
,
srq_in
,
sizeof
(
srq_in
),
srq_out
,
srq_out
,
MLX5_ST_SZ_BYTES
(
query_srq_out
));
MLX5_ST_SZ_BYTES
(
query_srq_out
));
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
...
@@ -272,8 +271,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
...
@@ -272,8 +271,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_XRC_SRQ
);
MLX5_CMD_OP_CREATE_XRC_SRQ
);
memset
(
create_out
,
0
,
sizeof
(
create_out
));
memset
(
create_out
,
0
,
sizeof
(
create_out
));
err
=
mlx5_cmd_exec
_check_status
(
dev
,
create_in
,
inlen
,
create_out
,
err
=
mlx5_cmd_exec
(
dev
,
create_in
,
inlen
,
create_out
,
sizeof
(
create_out
));
sizeof
(
create_out
));
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
...
@@ -286,36 +285,30 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
...
@@ -286,36 +285,30 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
static
int
destroy_xrc_srq_cmd
(
struct
mlx5_core_dev
*
dev
,
static
int
destroy_xrc_srq_cmd
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_srq
*
srq
)
struct
mlx5_core_srq
*
srq
)
{
{
u32
xrcsrq_in
[
MLX5_ST_SZ_DW
(
destroy_xrc_srq_in
)];
u32
xrcsrq_in
[
MLX5_ST_SZ_DW
(
destroy_xrc_srq_in
)]
=
{
0
};
u32
xrcsrq_out
[
MLX5_ST_SZ_DW
(
destroy_xrc_srq_out
)];
u32
xrcsrq_out
[
MLX5_ST_SZ_DW
(
destroy_xrc_srq_out
)]
=
{
0
};
memset
(
xrcsrq_in
,
0
,
sizeof
(
xrcsrq_in
));
memset
(
xrcsrq_out
,
0
,
sizeof
(
xrcsrq_out
));
MLX5_SET
(
destroy_xrc_srq_in
,
xrcsrq_in
,
opcode
,
MLX5_SET
(
destroy_xrc_srq_in
,
xrcsrq_in
,
opcode
,
MLX5_CMD_OP_DESTROY_XRC_SRQ
);
MLX5_CMD_OP_DESTROY_XRC_SRQ
);
MLX5_SET
(
destroy_xrc_srq_in
,
xrcsrq_in
,
xrc_srqn
,
srq
->
srqn
);
MLX5_SET
(
destroy_xrc_srq_in
,
xrcsrq_in
,
xrc_srqn
,
srq
->
srqn
);
return
mlx5_cmd_exec
_check_status
(
dev
,
xrcsrq_in
,
sizeof
(
xrcsrq_in
),
return
mlx5_cmd_exec
(
dev
,
xrcsrq_in
,
sizeof
(
xrcsrq_in
),
xrcsrq_out
,
sizeof
(
xrcsrq_out
));
xrcsrq_out
,
sizeof
(
xrcsrq_out
));
}
}
static
int
arm_xrc_srq_cmd
(
struct
mlx5_core_dev
*
dev
,
static
int
arm_xrc_srq_cmd
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_srq
*
srq
,
u16
lwm
)
struct
mlx5_core_srq
*
srq
,
u16
lwm
)
{
{
u32
xrcsrq_in
[
MLX5_ST_SZ_DW
(
arm_xrc_srq_in
)];
u32
xrcsrq_in
[
MLX5_ST_SZ_DW
(
arm_xrc_srq_in
)]
=
{
0
};
u32
xrcsrq_out
[
MLX5_ST_SZ_DW
(
arm_xrc_srq_out
)];
u32
xrcsrq_out
[
MLX5_ST_SZ_DW
(
arm_xrc_srq_out
)]
=
{
0
};
memset
(
xrcsrq_in
,
0
,
sizeof
(
xrcsrq_in
));
memset
(
xrcsrq_out
,
0
,
sizeof
(
xrcsrq_out
));
MLX5_SET
(
arm_xrc_srq_in
,
xrcsrq_in
,
opcode
,
MLX5_CMD_OP_ARM_XRC_SRQ
);
MLX5_SET
(
arm_xrc_srq_in
,
xrcsrq_in
,
opcode
,
MLX5_CMD_OP_ARM_XRC_SRQ
);
MLX5_SET
(
arm_xrc_srq_in
,
xrcsrq_in
,
op_mod
,
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ
);
MLX5_SET
(
arm_xrc_srq_in
,
xrcsrq_in
,
op_mod
,
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ
);
MLX5_SET
(
arm_xrc_srq_in
,
xrcsrq_in
,
xrc_srqn
,
srq
->
srqn
);
MLX5_SET
(
arm_xrc_srq_in
,
xrcsrq_in
,
xrc_srqn
,
srq
->
srqn
);
MLX5_SET
(
arm_xrc_srq_in
,
xrcsrq_in
,
lwm
,
lwm
);
MLX5_SET
(
arm_xrc_srq_in
,
xrcsrq_in
,
lwm
,
lwm
);
return
mlx5_cmd_exec
_check_status
(
dev
,
xrcsrq_in
,
sizeof
(
xrcsrq_in
),
return
mlx5_cmd_exec
(
dev
,
xrcsrq_in
,
sizeof
(
xrcsrq_in
),
xrcsrq_out
,
sizeof
(
xrcsrq_out
));
xrcsrq_out
,
sizeof
(
xrcsrq_out
));
}
}
static
int
query_xrc_srq_cmd
(
struct
mlx5_core_dev
*
dev
,
static
int
query_xrc_srq_cmd
(
struct
mlx5_core_dev
*
dev
,
...
@@ -335,9 +328,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
...
@@ -335,9 +328,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET
(
query_xrc_srq_in
,
xrcsrq_in
,
opcode
,
MLX5_SET
(
query_xrc_srq_in
,
xrcsrq_in
,
opcode
,
MLX5_CMD_OP_QUERY_XRC_SRQ
);
MLX5_CMD_OP_QUERY_XRC_SRQ
);
MLX5_SET
(
query_xrc_srq_in
,
xrcsrq_in
,
xrc_srqn
,
srq
->
srqn
);
MLX5_SET
(
query_xrc_srq_in
,
xrcsrq_in
,
xrc_srqn
,
srq
->
srqn
);
err
=
mlx5_cmd_exec_check_status
(
dev
,
xrcsrq_in
,
sizeof
(
xrcsrq_in
),
xrcsrq_out
,
err
=
mlx5_cmd_exec
(
dev
,
xrcsrq_in
,
sizeof
(
xrcsrq_in
),
xrcsrq_out
,
MLX5_ST_SZ_BYTES
(
query_xrc_srq_out
));
MLX5_ST_SZ_BYTES
(
query_xrc_srq_out
));
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
...
...
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
浏览文件 @
d68478da
...
@@ -36,17 +36,14 @@
...
@@ -36,17 +36,14 @@
int
mlx5_core_alloc_transport_domain
(
struct
mlx5_core_dev
*
dev
,
u32
*
tdn
)
int
mlx5_core_alloc_transport_domain
(
struct
mlx5_core_dev
*
dev
,
u32
*
tdn
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
alloc_transport_domain_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
alloc_transport_domain_in
)]
=
{
0
}
;
u32
out
[
MLX5_ST_SZ_DW
(
alloc_transport_domain_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
alloc_transport_domain_out
)]
=
{
0
}
;
int
err
;
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
alloc_transport_domain_in
,
in
,
opcode
,
MLX5_SET
(
alloc_transport_domain_in
,
in
,
opcode
,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN
);
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN
);
err
=
mlx5_cmd_exec
_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
tdn
=
MLX5_GET
(
alloc_transport_domain_out
,
out
,
*
tdn
=
MLX5_GET
(
alloc_transport_domain_out
,
out
,
transport_domain
);
transport_domain
);
...
@@ -57,29 +54,23 @@ EXPORT_SYMBOL(mlx5_core_alloc_transport_domain);
...
@@ -57,29 +54,23 @@ EXPORT_SYMBOL(mlx5_core_alloc_transport_domain);
void
mlx5_core_dealloc_transport_domain
(
struct
mlx5_core_dev
*
dev
,
u32
tdn
)
void
mlx5_core_dealloc_transport_domain
(
struct
mlx5_core_dev
*
dev
,
u32
tdn
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
dealloc_transport_domain_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
dealloc_transport_domain_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
dealloc_transport_domain_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
dealloc_transport_domain_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
dealloc_transport_domain_in
,
in
,
opcode
,
MLX5_SET
(
dealloc_transport_domain_in
,
in
,
opcode
,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN
);
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN
);
MLX5_SET
(
dealloc_transport_domain_in
,
in
,
transport_domain
,
tdn
);
MLX5_SET
(
dealloc_transport_domain_in
,
in
,
transport_domain
,
tdn
);
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
EXPORT_SYMBOL
(
mlx5_core_dealloc_transport_domain
);
EXPORT_SYMBOL
(
mlx5_core_dealloc_transport_domain
);
int
mlx5_core_create_rq
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
u32
*
rqn
)
int
mlx5_core_create_rq
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
u32
*
rqn
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
create_rq_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
create_rq_out
)]
=
{
0
}
;
int
err
;
int
err
;
MLX5_SET
(
create_rq_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_RQ
);
MLX5_SET
(
create_rq_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_RQ
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
rqn
=
MLX5_GET
(
create_rq_out
,
out
,
rqn
);
*
rqn
=
MLX5_GET
(
create_rq_out
,
out
,
rqn
);
...
@@ -95,21 +86,18 @@ int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
...
@@ -95,21 +86,18 @@ int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
MLX5_SET
(
modify_rq_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_RQ
);
MLX5_SET
(
modify_rq_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_RQ
);
memset
(
out
,
0
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
return
mlx5_cmd_exec
_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
}
}
EXPORT_SYMBOL
(
mlx5_core_modify_rq
);
EXPORT_SYMBOL
(
mlx5_core_modify_rq
);
void
mlx5_core_destroy_rq
(
struct
mlx5_core_dev
*
dev
,
u32
rqn
)
void
mlx5_core_destroy_rq
(
struct
mlx5_core_dev
*
dev
,
u32
rqn
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
destroy_rq_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
destroy_rq_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
destroy_rq_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
destroy_rq_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
destroy_rq_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_RQ
);
MLX5_SET
(
destroy_rq_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_RQ
);
MLX5_SET
(
destroy_rq_in
,
in
,
rqn
,
rqn
);
MLX5_SET
(
destroy_rq_in
,
in
,
rqn
,
rqn
);
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
EXPORT_SYMBOL
(
mlx5_core_destroy_rq
);
EXPORT_SYMBOL
(
mlx5_core_destroy_rq
);
...
@@ -121,19 +109,17 @@ int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
...
@@ -121,19 +109,17 @@ int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
MLX5_SET
(
query_rq_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_RQ
);
MLX5_SET
(
query_rq_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_RQ
);
MLX5_SET
(
query_rq_in
,
in
,
rqn
,
rqn
);
MLX5_SET
(
query_rq_in
,
in
,
rqn
,
rqn
);
return
mlx5_cmd_exec
_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
}
}
EXPORT_SYMBOL
(
mlx5_core_query_rq
);
EXPORT_SYMBOL
(
mlx5_core_query_rq
);
int
mlx5_core_create_sq
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
u32
*
sqn
)
int
mlx5_core_create_sq
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
u32
*
sqn
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
create_sq_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
create_sq_out
)]
=
{
0
}
;
int
err
;
int
err
;
MLX5_SET
(
create_sq_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_SQ
);
MLX5_SET
(
create_sq_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_SQ
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
sqn
=
MLX5_GET
(
create_sq_out
,
out
,
sqn
);
*
sqn
=
MLX5_GET
(
create_sq_out
,
out
,
sqn
);
...
@@ -142,27 +128,22 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
...
@@ -142,27 +128,22 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
int
mlx5_core_modify_sq
(
struct
mlx5_core_dev
*
dev
,
u32
sqn
,
u32
*
in
,
int
inlen
)
int
mlx5_core_modify_sq
(
struct
mlx5_core_dev
*
dev
,
u32
sqn
,
u32
*
in
,
int
inlen
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
modify_sq_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
modify_sq_out
)]
=
{
0
}
;
MLX5_SET
(
modify_sq_in
,
in
,
sqn
,
sqn
);
MLX5_SET
(
modify_sq_in
,
in
,
sqn
,
sqn
);
MLX5_SET
(
modify_sq_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_SQ
);
MLX5_SET
(
modify_sq_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_SQ
);
return
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
}
}
EXPORT_SYMBOL
(
mlx5_core_modify_sq
);
EXPORT_SYMBOL
(
mlx5_core_modify_sq
);
void
mlx5_core_destroy_sq
(
struct
mlx5_core_dev
*
dev
,
u32
sqn
)
void
mlx5_core_destroy_sq
(
struct
mlx5_core_dev
*
dev
,
u32
sqn
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
destroy_sq_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
destroy_sq_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
destroy_sq_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
destroy_sq_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
destroy_sq_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_SQ
);
MLX5_SET
(
destroy_sq_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_SQ
);
MLX5_SET
(
destroy_sq_in
,
in
,
sqn
,
sqn
);
MLX5_SET
(
destroy_sq_in
,
in
,
sqn
,
sqn
);
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
int
mlx5_core_query_sq
(
struct
mlx5_core_dev
*
dev
,
u32
sqn
,
u32
*
out
)
int
mlx5_core_query_sq
(
struct
mlx5_core_dev
*
dev
,
u32
sqn
,
u32
*
out
)
...
@@ -172,21 +153,20 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
...
@@ -172,21 +153,20 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
MLX5_SET
(
query_sq_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_SQ
);
MLX5_SET
(
query_sq_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_SQ
);
MLX5_SET
(
query_sq_in
,
in
,
sqn
,
sqn
);
MLX5_SET
(
query_sq_in
,
in
,
sqn
,
sqn
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
}
}
EXPORT_SYMBOL
(
mlx5_core_query_sq
);
EXPORT_SYMBOL
(
mlx5_core_query_sq
);
int
mlx5_core_create_tir
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
int
mlx5_core_create_tir
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
u32
*
tirn
)
u32
*
tirn
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
create_tir_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
create_tir_out
)]
=
{
0
}
;
int
err
;
int
err
;
MLX5_SET
(
create_tir_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_TIR
);
MLX5_SET
(
create_tir_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_TIR
);
memset
(
out
,
0
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
tirn
=
MLX5_GET
(
create_tir_out
,
out
,
tirn
);
*
tirn
=
MLX5_GET
(
create_tir_out
,
out
,
tirn
);
...
@@ -197,39 +177,32 @@ EXPORT_SYMBOL(mlx5_core_create_tir);
...
@@ -197,39 +177,32 @@ EXPORT_SYMBOL(mlx5_core_create_tir);
int
mlx5_core_modify_tir
(
struct
mlx5_core_dev
*
dev
,
u32
tirn
,
u32
*
in
,
int
mlx5_core_modify_tir
(
struct
mlx5_core_dev
*
dev
,
u32
tirn
,
u32
*
in
,
int
inlen
)
int
inlen
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
modify_tir_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
modify_tir_out
)]
=
{
0
}
;
MLX5_SET
(
modify_tir_in
,
in
,
tirn
,
tirn
);
MLX5_SET
(
modify_tir_in
,
in
,
tirn
,
tirn
);
MLX5_SET
(
modify_tir_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_TIR
);
MLX5_SET
(
modify_tir_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_TIR
);
return
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
}
}
void
mlx5_core_destroy_tir
(
struct
mlx5_core_dev
*
dev
,
u32
tirn
)
void
mlx5_core_destroy_tir
(
struct
mlx5_core_dev
*
dev
,
u32
tirn
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
destroy_tir_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
destroy_tir_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
destroy_tir_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
destroy_tir_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
destroy_tir_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_TIR
);
MLX5_SET
(
destroy_tir_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_TIR
);
MLX5_SET
(
destroy_tir_in
,
in
,
tirn
,
tirn
);
MLX5_SET
(
destroy_tir_in
,
in
,
tirn
,
tirn
);
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
EXPORT_SYMBOL
(
mlx5_core_destroy_tir
);
EXPORT_SYMBOL
(
mlx5_core_destroy_tir
);
int
mlx5_core_create_tis
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
int
mlx5_core_create_tis
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
u32
*
tisn
)
u32
*
tisn
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
create_tis_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
create_tis_out
)]
=
{
0
}
;
int
err
;
int
err
;
MLX5_SET
(
create_tis_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_TIS
);
MLX5_SET
(
create_tis_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_TIS
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
tisn
=
MLX5_GET
(
create_tis_out
,
out
,
tisn
);
*
tisn
=
MLX5_GET
(
create_tis_out
,
out
,
tisn
);
...
@@ -245,34 +218,29 @@ int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
...
@@ -245,34 +218,29 @@ int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
MLX5_SET
(
modify_tis_in
,
in
,
tisn
,
tisn
);
MLX5_SET
(
modify_tis_in
,
in
,
tisn
,
tisn
);
MLX5_SET
(
modify_tis_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_TIS
);
MLX5_SET
(
modify_tis_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_TIS
);
return
mlx5_cmd_exec
_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
return
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
}
}
EXPORT_SYMBOL
(
mlx5_core_modify_tis
);
EXPORT_SYMBOL
(
mlx5_core_modify_tis
);
void
mlx5_core_destroy_tis
(
struct
mlx5_core_dev
*
dev
,
u32
tisn
)
void
mlx5_core_destroy_tis
(
struct
mlx5_core_dev
*
dev
,
u32
tisn
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
destroy_tis_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
destroy_tis_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
destroy_tis_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
destroy_tis_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
destroy_tis_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_TIS
);
MLX5_SET
(
destroy_tis_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_TIS
);
MLX5_SET
(
destroy_tis_in
,
in
,
tisn
,
tisn
);
MLX5_SET
(
destroy_tis_in
,
in
,
tisn
,
tisn
);
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
EXPORT_SYMBOL
(
mlx5_core_destroy_tis
);
EXPORT_SYMBOL
(
mlx5_core_destroy_tis
);
int
mlx5_core_create_rmp
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
int
mlx5_core_create_rmp
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
u32
*
rmpn
)
u32
*
rmpn
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
create_rmp_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
create_rmp_out
)]
=
{
0
}
;
int
err
;
int
err
;
MLX5_SET
(
create_rmp_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_RMP
);
MLX5_SET
(
create_rmp_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_RMP
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
rmpn
=
MLX5_GET
(
create_rmp_out
,
out
,
rmpn
);
*
rmpn
=
MLX5_GET
(
create_rmp_out
,
out
,
rmpn
);
...
@@ -281,38 +249,31 @@ int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
...
@@ -281,38 +249,31 @@ int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
int
mlx5_core_modify_rmp
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
)
int
mlx5_core_modify_rmp
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
modify_rmp_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
modify_rmp_out
)]
=
{
0
}
;
MLX5_SET
(
modify_rmp_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_RMP
);
MLX5_SET
(
modify_rmp_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_RMP
);
return
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
}
}
int
mlx5_core_destroy_rmp
(
struct
mlx5_core_dev
*
dev
,
u32
rmpn
)
int
mlx5_core_destroy_rmp
(
struct
mlx5_core_dev
*
dev
,
u32
rmpn
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
destroy_rmp_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
destroy_rmp_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
destroy_rmp_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
destroy_rmp_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
destroy_rmp_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_RMP
);
MLX5_SET
(
destroy_rmp_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_RMP
);
MLX5_SET
(
destroy_rmp_in
,
in
,
rmpn
,
rmpn
);
MLX5_SET
(
destroy_rmp_in
,
in
,
rmpn
,
rmpn
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
sizeof
(
out
));
}
}
int
mlx5_core_query_rmp
(
struct
mlx5_core_dev
*
dev
,
u32
rmpn
,
u32
*
out
)
int
mlx5_core_query_rmp
(
struct
mlx5_core_dev
*
dev
,
u32
rmpn
,
u32
*
out
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
query_rmp_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_rmp_in
)]
=
{
0
}
;
int
outlen
=
MLX5_ST_SZ_BYTES
(
query_rmp_out
);
int
outlen
=
MLX5_ST_SZ_BYTES
(
query_rmp_out
);
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
query_rmp_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_RMP
);
MLX5_SET
(
query_rmp_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_RMP
);
MLX5_SET
(
query_rmp_in
,
in
,
rmpn
,
rmpn
);
MLX5_SET
(
query_rmp_in
,
in
,
rmpn
,
rmpn
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
outlen
);
}
}
int
mlx5_core_arm_rmp
(
struct
mlx5_core_dev
*
dev
,
u32
rmpn
,
u16
lwm
)
int
mlx5_core_arm_rmp
(
struct
mlx5_core_dev
*
dev
,
u32
rmpn
,
u16
lwm
)
...
@@ -347,13 +308,11 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
...
@@ -347,13 +308,11 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
int
mlx5_core_create_xsrq
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
int
mlx5_core_create_xsrq
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
u32
*
xsrqn
)
u32
*
xsrqn
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
create_xrc_srq_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
create_xrc_srq_out
)]
=
{
0
}
;
int
err
;
int
err
;
MLX5_SET
(
create_xrc_srq_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_XRC_SRQ
);
MLX5_SET
(
create_xrc_srq_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_XRC_SRQ
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
xsrqn
=
MLX5_GET
(
create_xrc_srq_out
,
out
,
xrc_srqn
);
*
xsrqn
=
MLX5_GET
(
create_xrc_srq_out
,
out
,
xrc_srqn
);
...
@@ -362,33 +321,25 @@ int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
...
@@ -362,33 +321,25 @@ int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
int
mlx5_core_destroy_xsrq
(
struct
mlx5_core_dev
*
dev
,
u32
xsrqn
)
int
mlx5_core_destroy_xsrq
(
struct
mlx5_core_dev
*
dev
,
u32
xsrqn
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
destroy_xrc_srq_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
destroy_xrc_srq_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
destroy_xrc_srq_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
destroy_xrc_srq_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
destroy_xrc_srq_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_XRC_SRQ
);
MLX5_SET
(
destroy_xrc_srq_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_XRC_SRQ
);
MLX5_SET
(
destroy_xrc_srq_in
,
in
,
xrc_srqn
,
xsrqn
);
MLX5_SET
(
destroy_xrc_srq_in
,
in
,
xrc_srqn
,
xsrqn
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
int
mlx5_core_query_xsrq
(
struct
mlx5_core_dev
*
dev
,
u32
xsrqn
,
u32
*
out
)
int
mlx5_core_query_xsrq
(
struct
mlx5_core_dev
*
dev
,
u32
xsrqn
,
u32
*
out
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
query_xrc_srq_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_xrc_srq_in
)]
=
{
0
}
;
void
*
srqc
;
void
*
srqc
;
void
*
xrc_srqc
;
void
*
xrc_srqc
;
int
err
;
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
query_xrc_srq_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_XRC_SRQ
);
MLX5_SET
(
query_xrc_srq_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_XRC_SRQ
);
MLX5_SET
(
query_xrc_srq_in
,
in
,
xrc_srqn
,
xsrqn
);
MLX5_SET
(
query_xrc_srq_in
,
in
,
xrc_srqn
,
xsrqn
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
MLX5_ST_SZ_BYTES
(
query_xrc_srq_out
));
out
,
MLX5_ST_SZ_BYTES
(
query_xrc_srq_out
));
if
(
!
err
)
{
if
(
!
err
)
{
xrc_srqc
=
MLX5_ADDR_OF
(
query_xrc_srq_out
,
out
,
xrc_srqc
=
MLX5_ADDR_OF
(
query_xrc_srq_out
,
out
,
xrc_srq_context_entry
);
xrc_srq_context_entry
);
...
@@ -401,32 +352,25 @@ int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
...
@@ -401,32 +352,25 @@ int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
int
mlx5_core_arm_xsrq
(
struct
mlx5_core_dev
*
dev
,
u32
xsrqn
,
u16
lwm
)
int
mlx5_core_arm_xsrq
(
struct
mlx5_core_dev
*
dev
,
u32
xsrqn
,
u16
lwm
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
arm_xrc_srq_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
arm_xrc_srq_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
arm_xrc_srq_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
arm_xrc_srq_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
arm_xrc_srq_in
,
in
,
opcode
,
MLX5_CMD_OP_ARM_XRC_SRQ
);
MLX5_SET
(
arm_xrc_srq_in
,
in
,
opcode
,
MLX5_CMD_OP_ARM_XRC_SRQ
);
MLX5_SET
(
arm_xrc_srq_in
,
in
,
xrc_srqn
,
xsrqn
);
MLX5_SET
(
arm_xrc_srq_in
,
in
,
xrc_srqn
,
xsrqn
);
MLX5_SET
(
arm_xrc_srq_in
,
in
,
lwm
,
lwm
);
MLX5_SET
(
arm_xrc_srq_in
,
in
,
lwm
,
lwm
);
MLX5_SET
(
arm_xrc_srq_in
,
in
,
op_mod
,
MLX5_SET
(
arm_xrc_srq_in
,
in
,
op_mod
,
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ
);
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
int
mlx5_core_create_rqt
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
int
mlx5_core_create_rqt
(
struct
mlx5_core_dev
*
dev
,
u32
*
in
,
int
inlen
,
u32
*
rqtn
)
u32
*
rqtn
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
create_rqt_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
create_rqt_out
)]
=
{
0
}
;
int
err
;
int
err
;
MLX5_SET
(
create_rqt_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_RQT
);
MLX5_SET
(
create_rqt_in
,
in
,
opcode
,
MLX5_CMD_OP_CREATE_RQT
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
if
(
!
err
)
if
(
!
err
)
*
rqtn
=
MLX5_GET
(
create_rqt_out
,
out
,
rqtn
);
*
rqtn
=
MLX5_GET
(
create_rqt_out
,
out
,
rqtn
);
...
@@ -437,25 +381,20 @@ EXPORT_SYMBOL(mlx5_core_create_rqt);
...
@@ -437,25 +381,20 @@ EXPORT_SYMBOL(mlx5_core_create_rqt);
int
mlx5_core_modify_rqt
(
struct
mlx5_core_dev
*
dev
,
u32
rqtn
,
u32
*
in
,
int
mlx5_core_modify_rqt
(
struct
mlx5_core_dev
*
dev
,
u32
rqtn
,
u32
*
in
,
int
inlen
)
int
inlen
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
modify_rqt_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
modify_rqt_out
)]
=
{
0
}
;
MLX5_SET
(
modify_rqt_in
,
in
,
rqtn
,
rqtn
);
MLX5_SET
(
modify_rqt_in
,
in
,
rqtn
,
rqtn
);
MLX5_SET
(
modify_rqt_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_RQT
);
MLX5_SET
(
modify_rqt_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_RQT
);
return
mlx5_cmd_exec
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
dev
,
in
,
inlen
,
out
,
sizeof
(
out
));
}
}
void
mlx5_core_destroy_rqt
(
struct
mlx5_core_dev
*
dev
,
u32
rqtn
)
void
mlx5_core_destroy_rqt
(
struct
mlx5_core_dev
*
dev
,
u32
rqtn
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
destroy_rqt_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
destroy_rqt_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
destroy_rqt_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
destroy_rqt_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
destroy_rqt_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_RQT
);
MLX5_SET
(
destroy_rqt_in
,
in
,
opcode
,
MLX5_CMD_OP_DESTROY_RQT
);
MLX5_SET
(
destroy_rqt_in
,
in
,
rqtn
,
rqtn
);
MLX5_SET
(
destroy_rqt_in
,
in
,
rqtn
,
rqtn
);
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
mlx5_cmd_exec_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
EXPORT_SYMBOL
(
mlx5_core_destroy_rqt
);
EXPORT_SYMBOL
(
mlx5_core_destroy_rqt
);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
浏览文件 @
d68478da
...
@@ -42,73 +42,28 @@ enum {
...
@@ -42,73 +42,28 @@ enum {
NUM_LOW_LAT_UUARS
=
4
,
NUM_LOW_LAT_UUARS
=
4
,
};
};
struct
mlx5_alloc_uar_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_alloc_uar_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
__be32
uarn
;
u8
rsvd
[
4
];
};
struct
mlx5_free_uar_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
uarn
;
u8
rsvd
[
4
];
};
struct
mlx5_free_uar_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
int
mlx5_cmd_alloc_uar
(
struct
mlx5_core_dev
*
dev
,
u32
*
uarn
)
int
mlx5_cmd_alloc_uar
(
struct
mlx5_core_dev
*
dev
,
u32
*
uarn
)
{
{
struct
mlx5_alloc_uar_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
alloc_uar_out
)]
=
{
0
}
;
struct
mlx5_alloc_uar_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
alloc_uar_in
)]
=
{
0
}
;
int
err
;
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
alloc_uar_in
,
in
,
opcode
,
MLX5_CMD_OP_ALLOC_UAR
);
memset
(
&
out
,
0
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_ALLOC_UAR
);
if
(
!
err
)
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
*
uarn
=
MLX5_GET
(
alloc_uar_out
,
out
,
uar
);
if
(
err
)
goto
ex
;
if
(
out
.
hdr
.
status
)
{
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
goto
ex
;
}
*
uarn
=
be32_to_cpu
(
out
.
uarn
)
&
0xffffff
;
ex:
return
err
;
return
err
;
}
}
EXPORT_SYMBOL
(
mlx5_cmd_alloc_uar
);
EXPORT_SYMBOL
(
mlx5_cmd_alloc_uar
);
int
mlx5_cmd_free_uar
(
struct
mlx5_core_dev
*
dev
,
u32
uarn
)
int
mlx5_cmd_free_uar
(
struct
mlx5_core_dev
*
dev
,
u32
uarn
)
{
{
struct
mlx5_free_uar_mbox_in
in
;
u32
out
[
MLX5_ST_SZ_DW
(
dealloc_uar_out
)]
=
{
0
};
struct
mlx5_free_uar_mbox_out
out
;
u32
in
[
MLX5_ST_SZ_DW
(
dealloc_uar_in
)]
=
{
0
};
int
err
;
memset
(
&
in
,
0
,
sizeof
(
in
));
memset
(
&
out
,
0
,
sizeof
(
out
));
in
.
hdr
.
opcode
=
cpu_to_be16
(
MLX5_CMD_OP_DEALLOC_UAR
);
in
.
uarn
=
cpu_to_be32
(
uarn
);
err
=
mlx5_cmd_exec
(
dev
,
&
in
,
sizeof
(
in
),
&
out
,
sizeof
(
out
));
if
(
err
)
goto
ex
;
if
(
out
.
hdr
.
status
)
MLX5_SET
(
dealloc_uar_in
,
in
,
opcode
,
MLX5_CMD_OP_DEALLOC_UAR
);
err
=
mlx5_cmd_status_to_err
(
&
out
.
hdr
);
MLX5_SET
(
dealloc_uar_in
,
in
,
uar
,
uarn
);
return
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
ex:
return
err
;
}
}
EXPORT_SYMBOL
(
mlx5_cmd_free_uar
);
EXPORT_SYMBOL
(
mlx5_cmd_free_uar
);
...
...
drivers/net/ethernet/mellanox/mlx5/core/vport.c
浏览文件 @
d68478da
...
@@ -39,10 +39,7 @@
...
@@ -39,10 +39,7 @@
static
int
_mlx5_query_vport_state
(
struct
mlx5_core_dev
*
mdev
,
u8
opmod
,
static
int
_mlx5_query_vport_state
(
struct
mlx5_core_dev
*
mdev
,
u8
opmod
,
u16
vport
,
u32
*
out
,
int
outlen
)
u16
vport
,
u32
*
out
,
int
outlen
)
{
{
int
err
;
u32
in
[
MLX5_ST_SZ_DW
(
query_vport_state_in
)]
=
{
0
};
u32
in
[
MLX5_ST_SZ_DW
(
query_vport_state_in
)];
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
query_vport_state_in
,
in
,
opcode
,
MLX5_SET
(
query_vport_state_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_VPORT_STATE
);
MLX5_CMD_OP_QUERY_VPORT_STATE
);
...
@@ -51,11 +48,7 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
...
@@ -51,11 +48,7 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
if
(
vport
)
if
(
vport
)
MLX5_SET
(
query_vport_state_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
query_vport_state_in
,
in
,
other_vport
,
1
);
err
=
mlx5_cmd_exec_check_status
(
mdev
,
in
,
sizeof
(
in
),
out
,
outlen
);
return
mlx5_cmd_exec
(
mdev
,
in
,
sizeof
(
in
),
out
,
outlen
);
if
(
err
)
mlx5_core_warn
(
mdev
,
"MLX5_CMD_OP_QUERY_VPORT_STATE failed
\n
"
);
return
err
;
}
}
u8
mlx5_query_vport_state
(
struct
mlx5_core_dev
*
mdev
,
u8
opmod
,
u16
vport
)
u8
mlx5_query_vport_state
(
struct
mlx5_core_dev
*
mdev
,
u8
opmod
,
u16
vport
)
...
@@ -81,58 +74,43 @@ EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
...
@@ -81,58 +74,43 @@ EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
int
mlx5_modify_vport_admin_state
(
struct
mlx5_core_dev
*
mdev
,
u8
opmod
,
int
mlx5_modify_vport_admin_state
(
struct
mlx5_core_dev
*
mdev
,
u8
opmod
,
u16
vport
,
u8
state
)
u16
vport
,
u8
state
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
modify_vport_state_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
modify_vport_state_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
modify_vport_state_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
modify_vport_state_out
)]
=
{
0
};
int
err
;
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
modify_vport_state_in
,
in
,
opcode
,
MLX5_SET
(
modify_vport_state_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_VPORT_STATE
);
MLX5_CMD_OP_MODIFY_VPORT_STATE
);
MLX5_SET
(
modify_vport_state_in
,
in
,
op_mod
,
opmod
);
MLX5_SET
(
modify_vport_state_in
,
in
,
op_mod
,
opmod
);
MLX5_SET
(
modify_vport_state_in
,
in
,
vport_number
,
vport
);
MLX5_SET
(
modify_vport_state_in
,
in
,
vport_number
,
vport
);
if
(
vport
)
if
(
vport
)
MLX5_SET
(
modify_vport_state_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
modify_vport_state_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
modify_vport_state_in
,
in
,
admin_state
,
state
);
MLX5_SET
(
modify_vport_state_in
,
in
,
admin_state
,
state
);
err
=
mlx5_cmd_exec_check_status
(
mdev
,
in
,
sizeof
(
in
),
out
,
return
mlx5_cmd_exec
(
mdev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
sizeof
(
out
));
if
(
err
)
mlx5_core_warn
(
mdev
,
"MLX5_CMD_OP_MODIFY_VPORT_STATE failed
\n
"
);
return
err
;
}
}
EXPORT_SYMBOL_GPL
(
mlx5_modify_vport_admin_state
);
EXPORT_SYMBOL_GPL
(
mlx5_modify_vport_admin_state
);
static
int
mlx5_query_nic_vport_context
(
struct
mlx5_core_dev
*
mdev
,
u16
vport
,
static
int
mlx5_query_nic_vport_context
(
struct
mlx5_core_dev
*
mdev
,
u16
vport
,
u32
*
out
,
int
outlen
)
u32
*
out
,
int
outlen
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
query_nic_vport_context_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_nic_vport_context_in
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
MLX5_SET
(
query_nic_vport_context_in
,
in
,
opcode
,
MLX5_SET
(
query_nic_vport_context_in
,
in
,
opcode
,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
MLX5_SET
(
query_nic_vport_context_in
,
in
,
vport_number
,
vport
);
MLX5_SET
(
query_nic_vport_context_in
,
in
,
vport_number
,
vport
);
if
(
vport
)
if
(
vport
)
MLX5_SET
(
query_nic_vport_context_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
query_nic_vport_context_in
,
in
,
other_vport
,
1
);
return
mlx5_cmd_exec
_check_status
(
mdev
,
in
,
sizeof
(
in
),
out
,
outlen
);
return
mlx5_cmd_exec
(
mdev
,
in
,
sizeof
(
in
),
out
,
outlen
);
}
}
static
int
mlx5_modify_nic_vport_context
(
struct
mlx5_core_dev
*
mdev
,
void
*
in
,
static
int
mlx5_modify_nic_vport_context
(
struct
mlx5_core_dev
*
mdev
,
void
*
in
,
int
inlen
)
int
inlen
)
{
{
u32
out
[
MLX5_ST_SZ_DW
(
modify_nic_vport_context_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
modify_nic_vport_context_out
)]
=
{
0
}
;
MLX5_SET
(
modify_nic_vport_context_in
,
in
,
opcode
,
MLX5_SET
(
modify_nic_vport_context_in
,
in
,
opcode
,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
return
mlx5_cmd_exec
(
mdev
,
in
,
inlen
,
out
,
sizeof
(
out
));
memset
(
out
,
0
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
mdev
,
in
,
inlen
,
out
,
sizeof
(
out
));
}
}
void
mlx5_query_nic_vport_min_inline
(
struct
mlx5_core_dev
*
mdev
,
void
mlx5_query_nic_vport_min_inline
(
struct
mlx5_core_dev
*
mdev
,
...
@@ -147,6 +125,26 @@ void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
...
@@ -147,6 +125,26 @@ void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
}
}
EXPORT_SYMBOL_GPL
(
mlx5_query_nic_vport_min_inline
);
EXPORT_SYMBOL_GPL
(
mlx5_query_nic_vport_min_inline
);
int
mlx5_modify_nic_vport_min_inline
(
struct
mlx5_core_dev
*
mdev
,
u16
vport
,
u8
min_inline
)
{
u32
in
[
MLX5_ST_SZ_DW
(
modify_nic_vport_context_in
)]
=
{
0
};
int
inlen
=
MLX5_ST_SZ_BYTES
(
modify_nic_vport_context_in
);
void
*
nic_vport_ctx
;
MLX5_SET
(
modify_nic_vport_context_in
,
in
,
field_select
.
min_inline
,
1
);
MLX5_SET
(
modify_nic_vport_context_in
,
in
,
vport_number
,
vport
);
MLX5_SET
(
modify_nic_vport_context_in
,
in
,
other_vport
,
1
);
nic_vport_ctx
=
MLX5_ADDR_OF
(
modify_nic_vport_context_in
,
in
,
nic_vport_context
);
MLX5_SET
(
nic_vport_context
,
nic_vport_ctx
,
min_wqe_inline_mode
,
min_inline
);
return
mlx5_modify_nic_vport_context
(
mdev
,
in
,
inlen
);
}
int
mlx5_query_nic_vport_mac_address
(
struct
mlx5_core_dev
*
mdev
,
int
mlx5_query_nic_vport_mac_address
(
struct
mlx5_core_dev
*
mdev
,
u16
vport
,
u8
*
addr
)
u16
vport
,
u8
*
addr
)
{
{
...
@@ -254,7 +252,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
...
@@ -254,7 +252,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8
addr_list
[][
ETH_ALEN
],
u8
addr_list
[][
ETH_ALEN
],
int
*
list_size
)
int
*
list_size
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
query_nic_vport_context_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
query_nic_vport_context_in
)]
=
{
0
}
;
void
*
nic_vport_ctx
;
void
*
nic_vport_ctx
;
int
max_list_size
;
int
max_list_size
;
int
req_list_size
;
int
req_list_size
;
...
@@ -278,7 +276,6 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
...
@@ -278,7 +276,6 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
out_sz
=
MLX5_ST_SZ_BYTES
(
modify_nic_vport_context_in
)
+
out_sz
=
MLX5_ST_SZ_BYTES
(
modify_nic_vport_context_in
)
+
req_list_size
*
MLX5_ST_SZ_BYTES
(
mac_address_layout
);
req_list_size
*
MLX5_ST_SZ_BYTES
(
mac_address_layout
);
memset
(
in
,
0
,
sizeof
(
in
));
out
=
kzalloc
(
out_sz
,
GFP_KERNEL
);
out
=
kzalloc
(
out_sz
,
GFP_KERNEL
);
if
(
!
out
)
if
(
!
out
)
return
-
ENOMEM
;
return
-
ENOMEM
;
...
@@ -291,7 +288,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
...
@@ -291,7 +288,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
if
(
vport
)
if
(
vport
)
MLX5_SET
(
query_nic_vport_context_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
query_nic_vport_context_in
,
in
,
other_vport
,
1
);
err
=
mlx5_cmd_exec
_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
out_sz
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
out_sz
);
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
...
@@ -361,7 +358,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
...
@@ -361,7 +358,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
ether_addr_copy
(
curr_mac
,
addr_list
[
i
]);
ether_addr_copy
(
curr_mac
,
addr_list
[
i
]);
}
}
err
=
mlx5_cmd_exec
_check_status
(
dev
,
in
,
in_sz
,
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
in_sz
,
out
,
sizeof
(
out
));
kfree
(
in
);
kfree
(
in
);
return
err
;
return
err
;
}
}
...
@@ -406,7 +403,7 @@ int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
...
@@ -406,7 +403,7 @@ int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
if
(
vport
)
if
(
vport
)
MLX5_SET
(
query_nic_vport_context_in
,
in
,
other_vport
,
1
);
MLX5_SET
(
query_nic_vport_context_in
,
in
,
other_vport
,
1
);
err
=
mlx5_cmd_exec
_check_status
(
dev
,
in
,
sizeof
(
in
),
out
,
out_sz
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
out_sz
);
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
...
@@ -473,7 +470,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
...
@@ -473,7 +470,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
MLX5_SET
(
vlan_layout
,
vlan_addr
,
vlan
,
vlans
[
i
]);
MLX5_SET
(
vlan_layout
,
vlan_addr
,
vlan
,
vlans
[
i
]);
}
}
err
=
mlx5_cmd_exec
_check_status
(
dev
,
in
,
in_sz
,
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
in_sz
,
out
,
sizeof
(
out
));
kfree
(
in
);
kfree
(
in
);
return
err
;
return
err
;
}
}
...
@@ -631,10 +628,6 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
...
@@ -631,10 +628,6 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
err
=
mlx5_cmd_status_to_err_v2
(
out
);
if
(
err
)
goto
out
;
tmp
=
out
+
MLX5_ST_SZ_BYTES
(
query_hca_vport_gid_out
);
tmp
=
out
+
MLX5_ST_SZ_BYTES
(
query_hca_vport_gid_out
);
gid
->
global
.
subnet_prefix
=
tmp
->
global
.
subnet_prefix
;
gid
->
global
.
subnet_prefix
=
tmp
->
global
.
subnet_prefix
;
gid
->
global
.
interface_id
=
tmp
->
global
.
interface_id
;
gid
->
global
.
interface_id
=
tmp
->
global
.
interface_id
;
...
@@ -700,10 +693,6 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
...
@@ -700,10 +693,6 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
err
=
mlx5_cmd_status_to_err_v2
(
out
);
if
(
err
)
goto
out
;
pkarr
=
MLX5_ADDR_OF
(
query_hca_vport_pkey_out
,
out
,
pkey
);
pkarr
=
MLX5_ADDR_OF
(
query_hca_vport_pkey_out
,
out
,
pkey
);
for
(
i
=
0
;
i
<
nout
;
i
++
,
pkey
++
,
pkarr
+=
MLX5_ST_SZ_BYTES
(
pkey
))
for
(
i
=
0
;
i
<
nout
;
i
++
,
pkey
++
,
pkarr
+=
MLX5_ST_SZ_BYTES
(
pkey
))
*
pkey
=
MLX5_GET_PR
(
pkey
,
pkarr
,
pkey
);
*
pkey
=
MLX5_GET_PR
(
pkey
,
pkarr
,
pkey
);
...
@@ -721,7 +710,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
...
@@ -721,7 +710,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
struct
mlx5_hca_vport_context
*
rep
)
struct
mlx5_hca_vport_context
*
rep
)
{
{
int
out_sz
=
MLX5_ST_SZ_BYTES
(
query_hca_vport_context_out
);
int
out_sz
=
MLX5_ST_SZ_BYTES
(
query_hca_vport_context_out
);
int
in
[
MLX5_ST_SZ_DW
(
query_hca_vport_context_in
)];
int
in
[
MLX5_ST_SZ_DW
(
query_hca_vport_context_in
)]
=
{
0
}
;
int
is_group_manager
;
int
is_group_manager
;
void
*
out
;
void
*
out
;
void
*
ctx
;
void
*
ctx
;
...
@@ -729,7 +718,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
...
@@ -729,7 +718,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
is_group_manager
=
MLX5_CAP_GEN
(
dev
,
vport_group_manager
);
is_group_manager
=
MLX5_CAP_GEN
(
dev
,
vport_group_manager
);
memset
(
in
,
0
,
sizeof
(
in
));
out
=
kzalloc
(
out_sz
,
GFP_KERNEL
);
out
=
kzalloc
(
out_sz
,
GFP_KERNEL
);
if
(
!
out
)
if
(
!
out
)
return
-
ENOMEM
;
return
-
ENOMEM
;
...
@@ -750,9 +738,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
...
@@ -750,9 +738,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
MLX5_SET
(
query_hca_vport_context_in
,
in
,
port_num
,
port_num
);
MLX5_SET
(
query_hca_vport_context_in
,
in
,
port_num
,
port_num
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
out_sz
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
sizeof
(
in
),
out
,
out_sz
);
if
(
err
)
goto
ex
;
err
=
mlx5_cmd_status_to_err_v2
(
out
);
if
(
err
)
if
(
err
)
goto
ex
;
goto
ex
;
...
@@ -969,10 +954,6 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
...
@@ -969,10 +954,6 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
MLX5_SET
(
query_vport_counter_in
,
in
,
port_num
,
port_num
);
MLX5_SET
(
query_vport_counter_in
,
in
,
port_num
,
port_num
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
in_sz
,
out
,
out_sz
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
in_sz
,
out
,
out_sz
);
if
(
err
)
goto
free
;
err
=
mlx5_cmd_status_to_err_v2
(
out
);
free:
free:
kvfree
(
in
);
kvfree
(
in
);
return
err
;
return
err
;
...
@@ -1035,11 +1016,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
...
@@ -1035,11 +1016,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
MLX5_SET
(
hca_vport_context
,
ctx
,
qkey_violation_counter
,
req
->
qkey_violation_counter
);
MLX5_SET
(
hca_vport_context
,
ctx
,
qkey_violation_counter
,
req
->
qkey_violation_counter
);
MLX5_SET
(
hca_vport_context
,
ctx
,
pkey_violation_counter
,
req
->
pkey_violation_counter
);
MLX5_SET
(
hca_vport_context
,
ctx
,
pkey_violation_counter
,
req
->
pkey_violation_counter
);
err
=
mlx5_cmd_exec
(
dev
,
in
,
in_sz
,
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
dev
,
in
,
in_sz
,
out
,
sizeof
(
out
));
if
(
err
)
goto
ex
;
err
=
mlx5_cmd_status_to_err_v2
(
out
);
ex:
ex:
kfree
(
in
);
kfree
(
in
);
return
err
;
return
err
;
...
...
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
浏览文件 @
d68478da
...
@@ -46,41 +46,24 @@ void mlx5e_vxlan_init(struct mlx5e_priv *priv)
...
@@ -46,41 +46,24 @@ void mlx5e_vxlan_init(struct mlx5e_priv *priv)
static
int
mlx5e_vxlan_core_add_port_cmd
(
struct
mlx5_core_dev
*
mdev
,
u16
port
)
static
int
mlx5e_vxlan_core_add_port_cmd
(
struct
mlx5_core_dev
*
mdev
,
u16
port
)
{
{
struct
mlx5_outbox_hdr
*
hdr
;
u32
in
[
MLX5_ST_SZ_DW
(
add_vxlan_udp_dport_in
)]
=
{
0
};
int
err
;
u32
out
[
MLX5_ST_SZ_DW
(
add_vxlan_udp_dport_out
)]
=
{
0
};
u32
in
[
MLX5_ST_SZ_DW
(
add_vxlan_udp_dport_in
)];
u32
out
[
MLX5_ST_SZ_DW
(
add_vxlan_udp_dport_out
)];
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
add_vxlan_udp_dport_in
,
in
,
opcode
,
MLX5_SET
(
add_vxlan_udp_dport_in
,
in
,
opcode
,
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
);
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
);
MLX5_SET
(
add_vxlan_udp_dport_in
,
in
,
vxlan_udp_port
,
port
);
MLX5_SET
(
add_vxlan_udp_dport_in
,
in
,
vxlan_udp_port
,
port
);
return
mlx5_cmd_exec
(
mdev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
err
=
mlx5_cmd_exec
(
mdev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
if
(
err
)
return
err
;
hdr
=
(
struct
mlx5_outbox_hdr
*
)
out
;
return
hdr
->
status
?
-
ENOMEM
:
0
;
}
}
static
int
mlx5e_vxlan_core_del_port_cmd
(
struct
mlx5_core_dev
*
mdev
,
u16
port
)
static
int
mlx5e_vxlan_core_del_port_cmd
(
struct
mlx5_core_dev
*
mdev
,
u16
port
)
{
{
u32
in
[
MLX5_ST_SZ_DW
(
delete_vxlan_udp_dport_in
)];
u32
in
[
MLX5_ST_SZ_DW
(
delete_vxlan_udp_dport_in
)]
=
{
0
};
u32
out
[
MLX5_ST_SZ_DW
(
delete_vxlan_udp_dport_out
)];
u32
out
[
MLX5_ST_SZ_DW
(
delete_vxlan_udp_dport_out
)]
=
{
0
};
memset
(
in
,
0
,
sizeof
(
in
));
memset
(
out
,
0
,
sizeof
(
out
));
MLX5_SET
(
delete_vxlan_udp_dport_in
,
in
,
opcode
,
MLX5_SET
(
delete_vxlan_udp_dport_in
,
in
,
opcode
,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT
);
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT
);
MLX5_SET
(
delete_vxlan_udp_dport_in
,
in
,
vxlan_udp_port
,
port
);
MLX5_SET
(
delete_vxlan_udp_dport_in
,
in
,
vxlan_udp_port
,
port
);
return
mlx5_cmd_exec
(
mdev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
return
mlx5_cmd_exec_check_status
(
mdev
,
in
,
sizeof
(
in
),
out
,
sizeof
(
out
));
}
}
struct
mlx5e_vxlan
*
mlx5e_vxlan_lookup_port
(
struct
mlx5e_priv
*
priv
,
u16
port
)
struct
mlx5e_vxlan
*
mlx5e_vxlan_lookup_port
(
struct
mlx5e_priv
*
priv
,
u16
port
)
...
...
include/linux/mlx5/cq.h
浏览文件 @
d68478da
...
@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
...
@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
int
mlx5_init_cq_table
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_init_cq_table
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_cleanup_cq_table
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_cleanup_cq_table
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_core_create_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
int
mlx5_core_create_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
struct
mlx5_create_cq_mbox_in
*
in
,
int
inlen
);
u32
*
in
,
int
inlen
);
int
mlx5_core_destroy_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
);
int
mlx5_core_destroy_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
);
int
mlx5_core_query_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
int
mlx5_core_query_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
struct
mlx5_query_cq_mbox_out
*
out
);
u32
*
out
,
int
outlen
);
int
mlx5_core_modify_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
int
mlx5_core_modify_cq
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
struct
mlx5_modify_cq_mbox_in
*
in
,
int
in_sz
);
u32
*
in
,
int
inlen
);
int
mlx5_core_modify_cq_moderation
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_core_modify_cq_moderation
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_cq
*
cq
,
u16
cq_period
,
struct
mlx5_core_cq
*
cq
,
u16
cq_period
,
u16
cq_max_count
);
u16
cq_max_count
);
...
...
include/linux/mlx5/device.h
浏览文件 @
d68478da
...
@@ -197,19 +197,6 @@ enum {
...
@@ -197,19 +197,6 @@ enum {
MLX5_PCIE_CTRL_TPH_MASK
=
3
<<
4
,
MLX5_PCIE_CTRL_TPH_MASK
=
3
<<
4
,
};
};
enum
{
MLX5_ACCESS_MODE_PA
=
0
,
MLX5_ACCESS_MODE_MTT
=
1
,
MLX5_ACCESS_MODE_KLM
=
2
};
enum
{
MLX5_MKEY_REMOTE_INVAL
=
1
<<
24
,
MLX5_MKEY_FLAG_SYNC_UMR
=
1
<<
29
,
MLX5_MKEY_BSF_EN
=
1
<<
30
,
MLX5_MKEY_LEN64
=
1
<<
31
,
};
enum
{
enum
{
MLX5_EN_RD
=
(
u64
)
1
,
MLX5_EN_RD
=
(
u64
)
1
,
MLX5_EN_WR
=
(
u64
)
2
MLX5_EN_WR
=
(
u64
)
2
...
@@ -411,33 +398,6 @@ enum {
...
@@ -411,33 +398,6 @@ enum {
MLX5_MAX_SGE_RD
=
(
512
-
16
-
16
)
/
16
MLX5_MAX_SGE_RD
=
(
512
-
16
-
16
)
/
16
};
};
struct
mlx5_inbox_hdr
{
__be16
opcode
;
u8
rsvd
[
4
];
__be16
opmod
;
};
struct
mlx5_outbox_hdr
{
u8
status
;
u8
rsvd
[
3
];
__be32
syndrome
;
};
struct
mlx5_cmd_query_adapter_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_cmd_query_adapter_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd0
[
24
];
u8
intapin
;
u8
rsvd1
[
13
];
__be16
vsd_vendor_id
;
u8
vsd
[
208
];
u8
vsd_psid
[
16
];
};
enum
mlx5_odp_transport_cap_bits
{
enum
mlx5_odp_transport_cap_bits
{
MLX5_ODP_SUPPORT_SEND
=
1
<<
31
,
MLX5_ODP_SUPPORT_SEND
=
1
<<
31
,
MLX5_ODP_SUPPORT_RECV
=
1
<<
30
,
MLX5_ODP_SUPPORT_RECV
=
1
<<
30
,
...
@@ -455,30 +415,6 @@ struct mlx5_odp_caps {
...
@@ -455,30 +415,6 @@ struct mlx5_odp_caps {
char
reserved2
[
0xe4
];
char
reserved2
[
0xe4
];
};
};
struct
mlx5_cmd_init_hca_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd0
[
2
];
__be16
profile
;
u8
rsvd1
[
4
];
};
struct
mlx5_cmd_init_hca_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_cmd_teardown_hca_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd0
[
2
];
__be16
profile
;
u8
rsvd1
[
4
];
};
struct
mlx5_cmd_teardown_hca_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_cmd_layout
{
struct
mlx5_cmd_layout
{
u8
type
;
u8
type
;
u8
rsvd0
[
3
];
u8
rsvd0
[
3
];
...
@@ -494,7 +430,6 @@ struct mlx5_cmd_layout {
...
@@ -494,7 +430,6 @@ struct mlx5_cmd_layout {
u8
status_own
;
u8
status_own
;
};
};
struct
health_buffer
{
struct
health_buffer
{
__be32
assert_var
[
5
];
__be32
assert_var
[
5
];
__be32
rsvd0
[
3
];
__be32
rsvd0
[
3
];
...
@@ -856,245 +791,15 @@ struct mlx5_cqe128 {
...
@@ -856,245 +791,15 @@ struct mlx5_cqe128 {
struct
mlx5_cqe64
cqe64
;
struct
mlx5_cqe64
cqe64
;
};
};
struct
mlx5_srq_ctx
{
enum
{
u8
state_log_sz
;
MLX5_MKEY_STATUS_FREE
=
1
<<
6
,
u8
rsvd0
[
3
];
__be32
flags_xrcd
;
__be32
pgoff_cqn
;
u8
rsvd1
[
4
];
u8
log_pg_sz
;
u8
rsvd2
[
7
];
__be32
pd
;
__be16
lwm
;
__be16
wqe_cnt
;
u8
rsvd3
[
8
];
__be64
db_record
;
};
struct
mlx5_create_srq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
input_srqn
;
u8
rsvd0
[
4
];
struct
mlx5_srq_ctx
ctx
;
u8
rsvd1
[
208
];
__be64
pas
[
0
];
};
struct
mlx5_create_srq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
__be32
srqn
;
u8
rsvd
[
4
];
};
struct
mlx5_destroy_srq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
srqn
;
u8
rsvd
[
4
];
};
struct
mlx5_destroy_srq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_query_srq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
srqn
;
u8
rsvd0
[
4
];
};
struct
mlx5_query_srq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd0
[
8
];
struct
mlx5_srq_ctx
ctx
;
u8
rsvd1
[
32
];
__be64
pas
[
0
];
};
struct
mlx5_arm_srq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
srqn
;
__be16
rsvd
;
__be16
lwm
;
};
struct
mlx5_arm_srq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_cq_context
{
u8
status
;
u8
cqe_sz_flags
;
u8
st
;
u8
rsvd3
;
u8
rsvd4
[
6
];
__be16
page_offset
;
__be32
log_sz_usr_page
;
__be16
cq_period
;
__be16
cq_max_count
;
__be16
rsvd20
;
__be16
c_eqn
;
u8
log_pg_sz
;
u8
rsvd25
[
7
];
__be32
last_notified_index
;
__be32
solicit_producer_index
;
__be32
consumer_counter
;
__be32
producer_counter
;
u8
rsvd48
[
8
];
__be64
db_record_addr
;
};
struct
mlx5_create_cq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
input_cqn
;
u8
rsvdx
[
4
];
struct
mlx5_cq_context
ctx
;
u8
rsvd6
[
192
];
__be64
pas
[
0
];
};
struct
mlx5_create_cq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
__be32
cqn
;
u8
rsvd0
[
4
];
};
struct
mlx5_destroy_cq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
cqn
;
u8
rsvd0
[
4
];
};
struct
mlx5_destroy_cq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd0
[
8
];
};
struct
mlx5_query_cq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
cqn
;
u8
rsvd0
[
4
];
};
struct
mlx5_query_cq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd0
[
8
];
struct
mlx5_cq_context
ctx
;
u8
rsvd6
[
16
];
__be64
pas
[
0
];
};
struct
mlx5_modify_cq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
cqn
;
__be32
field_select
;
struct
mlx5_cq_context
ctx
;
u8
rsvd
[
192
];
__be64
pas
[
0
];
};
struct
mlx5_modify_cq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_enable_hca_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_enable_hca_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_disable_hca_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_disable_hca_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_eq_context
{
u8
status
;
u8
ec_oi
;
u8
st
;
u8
rsvd2
[
7
];
__be16
page_pffset
;
__be32
log_sz_usr_page
;
u8
rsvd3
[
7
];
u8
intr
;
u8
log_page_size
;
u8
rsvd4
[
15
];
__be32
consumer_counter
;
__be32
produser_counter
;
u8
rsvd5
[
16
];
};
struct
mlx5_create_eq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd0
[
3
];
u8
input_eqn
;
u8
rsvd1
[
4
];
struct
mlx5_eq_context
ctx
;
u8
rsvd2
[
8
];
__be64
events_mask
;
u8
rsvd3
[
176
];
__be64
pas
[
0
];
};
struct
mlx5_create_eq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd0
[
3
];
u8
eq_number
;
u8
rsvd1
[
4
];
};
struct
mlx5_destroy_eq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd0
[
3
];
u8
eqn
;
u8
rsvd1
[
4
];
};
struct
mlx5_destroy_eq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_map_eq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be64
mask
;
u8
mu
;
u8
rsvd0
[
2
];
u8
eqn
;
u8
rsvd1
[
24
];
};
struct
mlx5_map_eq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_query_eq_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd0
[
3
];
u8
eqn
;
u8
rsvd1
[
4
];
};
struct
mlx5_query_eq_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
struct
mlx5_eq_context
ctx
;
};
};
enum
{
enum
{
MLX5_MKEY_STATUS_FREE
=
1
<<
6
,
MLX5_MKEY_REMOTE_INVAL
=
1
<<
24
,
MLX5_MKEY_FLAG_SYNC_UMR
=
1
<<
29
,
MLX5_MKEY_BSF_EN
=
1
<<
30
,
MLX5_MKEY_LEN64
=
1
<<
31
,
};
};
struct
mlx5_mkey_seg
{
struct
mlx5_mkey_seg
{
...
@@ -1119,134 +824,12 @@ struct mlx5_mkey_seg {
...
@@ -1119,134 +824,12 @@ struct mlx5_mkey_seg {
u8
rsvd4
[
4
];
u8
rsvd4
[
4
];
};
};
struct
mlx5_query_special_ctxs_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_query_special_ctxs_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
__be32
dump_fill_mkey
;
__be32
reserved_lkey
;
};
struct
mlx5_create_mkey_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
input_mkey_index
;
__be32
flags
;
struct
mlx5_mkey_seg
seg
;
u8
rsvd1
[
16
];
__be32
xlat_oct_act_size
;
__be32
rsvd2
;
u8
rsvd3
[
168
];
__be64
pas
[
0
];
};
struct
mlx5_create_mkey_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
__be32
mkey
;
u8
rsvd
[
4
];
};
struct
mlx5_destroy_mkey_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
mkey
;
u8
rsvd
[
4
];
};
struct
mlx5_destroy_mkey_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_query_mkey_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
mkey
;
};
struct
mlx5_query_mkey_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
__be64
pas
[
0
];
};
struct
mlx5_modify_mkey_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
mkey
;
__be64
pas
[
0
];
};
struct
mlx5_modify_mkey_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_dump_mkey_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
};
struct
mlx5_dump_mkey_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
__be32
mkey
;
};
struct
mlx5_mad_ifc_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be16
remote_lid
;
u8
rsvd0
;
u8
port
;
u8
rsvd1
[
4
];
u8
data
[
256
];
};
struct
mlx5_mad_ifc_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
u8
data
[
256
];
};
struct
mlx5_access_reg_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd0
[
2
];
__be16
register_id
;
__be32
arg
;
__be32
data
[
0
];
};
struct
mlx5_access_reg_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
__be32
data
[
0
];
};
#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
enum
{
enum
{
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO
=
1
<<
0
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO
=
1
<<
0
};
};
struct
mlx5_allocate_psv_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
npsv_pd
;
__be32
rsvd_psv0
;
};
struct
mlx5_allocate_psv_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
__be32
psv_idx
[
4
];
};
struct
mlx5_destroy_psv_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
psv_number
;
u8
rsvd
[
4
];
};
struct
mlx5_destroy_psv_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
enum
{
enum
{
VPORT_STATE_DOWN
=
0x0
,
VPORT_STATE_DOWN
=
0x0
,
VPORT_STATE_UP
=
0x1
,
VPORT_STATE_UP
=
0x1
,
...
@@ -1381,6 +964,18 @@ enum mlx5_cap_type {
...
@@ -1381,6 +964,18 @@ enum mlx5_cap_type {
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
...
...
include/linux/mlx5/driver.h
浏览文件 @
d68478da
...
@@ -48,10 +48,6 @@
...
@@ -48,10 +48,6 @@
#include <linux/mlx5/doorbell.h>
#include <linux/mlx5/doorbell.h>
#include <linux/mlx5/srq.h>
#include <linux/mlx5/srq.h>
enum
{
MLX5_RQ_BITMASK_VSD
=
1
<<
1
,
};
enum
{
enum
{
MLX5_BOARD_ID_LEN
=
64
,
MLX5_BOARD_ID_LEN
=
64
,
MLX5_MAX_NAME_LEN
=
16
,
MLX5_MAX_NAME_LEN
=
16
,
...
@@ -481,6 +477,7 @@ struct mlx5_fc_stats {
...
@@ -481,6 +477,7 @@ struct mlx5_fc_stats {
};
};
struct
mlx5_eswitch
;
struct
mlx5_eswitch
;
struct
mlx5_lag
;
struct
mlx5_rl_entry
{
struct
mlx5_rl_entry
{
u32
rate
;
u32
rate
;
...
@@ -554,6 +551,7 @@ struct mlx5_priv {
...
@@ -554,6 +551,7 @@ struct mlx5_priv {
struct
mlx5_flow_steering
*
steering
;
struct
mlx5_flow_steering
*
steering
;
struct
mlx5_eswitch
*
eswitch
;
struct
mlx5_eswitch
*
eswitch
;
struct
mlx5_core_sriov
sriov
;
struct
mlx5_core_sriov
sriov
;
struct
mlx5_lag
*
lag
;
unsigned
long
pci_dev_data
;
unsigned
long
pci_dev_data
;
struct
mlx5_fc_stats
fc_stats
;
struct
mlx5_fc_stats
fc_stats
;
struct
mlx5_rl_table
rl_table
;
struct
mlx5_rl_table
rl_table
;
...
@@ -771,14 +769,15 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev);
...
@@ -771,14 +769,15 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev);
void
mlx5_cmd_cleanup
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_cmd_cleanup
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_cmd_use_events
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_cmd_use_events
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_cmd_use_polling
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_cmd_use_polling
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_cmd_status_to_err
(
struct
mlx5_outbox_hdr
*
hdr
);
int
mlx5_cmd_status_to_err_v2
(
void
*
ptr
);
int
mlx5_core_get_caps
(
struct
mlx5_core_dev
*
dev
,
enum
mlx5_cap_type
cap_type
);
int
mlx5_cmd_exec
(
struct
mlx5_core_dev
*
dev
,
void
*
in
,
int
in_size
,
void
*
out
,
int
mlx5_cmd_exec
(
struct
mlx5_core_dev
*
dev
,
void
*
in
,
int
in_size
,
void
*
out
,
int
out_size
);
int
out_size
);
int
mlx5_cmd_exec_cb
(
struct
mlx5_core_dev
*
dev
,
void
*
in
,
int
in_size
,
int
mlx5_cmd_exec_cb
(
struct
mlx5_core_dev
*
dev
,
void
*
in
,
int
in_size
,
void
*
out
,
int
out_size
,
mlx5_cmd_cbk_t
callback
,
void
*
out
,
int
out_size
,
mlx5_cmd_cbk_t
callback
,
void
*
context
);
void
*
context
);
void
mlx5_cmd_mbox_status
(
void
*
out
,
u8
*
status
,
u32
*
syndrome
);
int
mlx5_core_get_caps
(
struct
mlx5_core_dev
*
dev
,
enum
mlx5_cap_type
cap_type
);
int
mlx5_cmd_alloc_uar
(
struct
mlx5_core_dev
*
dev
,
u32
*
uarn
);
int
mlx5_cmd_alloc_uar
(
struct
mlx5_core_dev
*
dev
,
u32
*
uarn
);
int
mlx5_cmd_free_uar
(
struct
mlx5_core_dev
*
dev
,
u32
uarn
);
int
mlx5_cmd_free_uar
(
struct
mlx5_core_dev
*
dev
,
u32
uarn
);
int
mlx5_alloc_uuars
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_uuar_info
*
uuari
);
int
mlx5_alloc_uuars
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_uuar_info
*
uuari
);
...
@@ -807,15 +806,18 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
...
@@ -807,15 +806,18 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16
lwm
,
int
is_srq
);
u16
lwm
,
int
is_srq
);
void
mlx5_init_mkey_table
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_init_mkey_table
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_cleanup_mkey_table
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_cleanup_mkey_table
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_core_create_mkey_cb
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
mkey
,
u32
*
in
,
int
inlen
,
u32
*
out
,
int
outlen
,
mlx5_cmd_cbk_t
callback
,
void
*
context
);
int
mlx5_core_create_mkey
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_core_create_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
mkey
,
struct
mlx5_core_mkey
*
mkey
,
struct
mlx5_create_mkey_mbox_in
*
in
,
int
inlen
,
u32
*
in
,
int
inlen
);
mlx5_cmd_cbk_t
callback
,
void
*
context
,
struct
mlx5_create_mkey_mbox_out
*
out
);
int
mlx5_core_destroy_mkey
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_core_destroy_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
mkey
);
struct
mlx5_core_mkey
*
mkey
);
int
mlx5_core_query_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
mkey
,
int
mlx5_core_query_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
mkey
,
struct
mlx5_query_mkey_mbox_out
*
out
,
int
outlen
);
u32
*
out
,
int
outlen
);
int
mlx5_core_dump_fill_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
_mkey
,
int
mlx5_core_dump_fill_mkey
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_mkey
*
_mkey
,
u32
*
mkey
);
u32
*
mkey
);
int
mlx5_core_alloc_pd
(
struct
mlx5_core_dev
*
dev
,
u32
*
pdn
);
int
mlx5_core_alloc_pd
(
struct
mlx5_core_dev
*
dev
,
u32
*
pdn
);
...
@@ -865,7 +867,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
...
@@ -865,7 +867,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int
mlx5_debug_eq_add
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
);
int
mlx5_debug_eq_add
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
);
void
mlx5_debug_eq_remove
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
);
void
mlx5_debug_eq_remove
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
);
int
mlx5_core_eq_query
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
,
int
mlx5_core_eq_query
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_eq
*
eq
,
struct
mlx5_query_eq_mbox_out
*
out
,
int
outlen
);
u32
*
out
,
int
outlen
);
int
mlx5_eq_debugfs_init
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_eq_debugfs_init
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_eq_debugfs_cleanup
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_eq_debugfs_cleanup
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_cq_debugfs_init
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_cq_debugfs_init
(
struct
mlx5_core_dev
*
dev
);
...
@@ -942,6 +944,11 @@ int mlx5_register_interface(struct mlx5_interface *intf);
...
@@ -942,6 +944,11 @@ int mlx5_register_interface(struct mlx5_interface *intf);
void
mlx5_unregister_interface
(
struct
mlx5_interface
*
intf
);
void
mlx5_unregister_interface
(
struct
mlx5_interface
*
intf
);
int
mlx5_core_query_vendor_id
(
struct
mlx5_core_dev
*
mdev
,
u32
*
vendor_id
);
int
mlx5_core_query_vendor_id
(
struct
mlx5_core_dev
*
mdev
,
u32
*
vendor_id
);
int
mlx5_cmd_create_vport_lag
(
struct
mlx5_core_dev
*
dev
);
int
mlx5_cmd_destroy_vport_lag
(
struct
mlx5_core_dev
*
dev
);
bool
mlx5_lag_is_active
(
struct
mlx5_core_dev
*
dev
);
struct
net_device
*
mlx5_lag_get_roce_netdev
(
struct
mlx5_core_dev
*
dev
);
struct
mlx5_profile
{
struct
mlx5_profile
{
u64
mask
;
u64
mask
;
u8
log_max_qp
;
u8
log_max_qp
;
...
...
include/linux/mlx5/fs.h
浏览文件 @
d68478da
...
@@ -54,6 +54,7 @@ static inline void build_leftovers_ft_param(int *priority,
...
@@ -54,6 +54,7 @@ static inline void build_leftovers_ft_param(int *priority,
enum
mlx5_flow_namespace_type
{
enum
mlx5_flow_namespace_type
{
MLX5_FLOW_NAMESPACE_BYPASS
,
MLX5_FLOW_NAMESPACE_BYPASS
,
MLX5_FLOW_NAMESPACE_LAG
,
MLX5_FLOW_NAMESPACE_OFFLOADS
,
MLX5_FLOW_NAMESPACE_OFFLOADS
,
MLX5_FLOW_NAMESPACE_ETHTOOL
,
MLX5_FLOW_NAMESPACE_ETHTOOL
,
MLX5_FLOW_NAMESPACE_KERNEL
,
MLX5_FLOW_NAMESPACE_KERNEL
,
...
@@ -62,6 +63,8 @@ enum mlx5_flow_namespace_type {
...
@@ -62,6 +63,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_FDB
,
MLX5_FLOW_NAMESPACE_FDB
,
MLX5_FLOW_NAMESPACE_ESW_EGRESS
,
MLX5_FLOW_NAMESPACE_ESW_EGRESS
,
MLX5_FLOW_NAMESPACE_ESW_INGRESS
,
MLX5_FLOW_NAMESPACE_ESW_INGRESS
,
MLX5_FLOW_NAMESPACE_SNIFFER_RX
,
MLX5_FLOW_NAMESPACE_SNIFFER_TX
,
};
};
struct
mlx5_flow_table
;
struct
mlx5_flow_table
;
...
@@ -106,6 +109,9 @@ mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
...
@@ -106,6 +109,9 @@ mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int
prio
,
int
prio
,
int
num_flow_table_entries
,
int
num_flow_table_entries
,
u32
level
,
u16
vport
);
u32
level
,
u16
vport
);
struct
mlx5_flow_table
*
mlx5_create_lag_demux_flow_table
(
struct
mlx5_flow_namespace
*
ns
,
int
prio
,
u32
level
);
int
mlx5_destroy_flow_table
(
struct
mlx5_flow_table
*
ft
);
int
mlx5_destroy_flow_table
(
struct
mlx5_flow_table
*
ft
);
/* inbox should be set with the following values:
/* inbox should be set with the following values:
...
...
include/linux/mlx5/mlx5_ifc.h
浏览文件 @
d68478da
...
@@ -152,7 +152,7 @@ enum {
...
@@ -152,7 +152,7 @@ enum {
MLX5_CMD_OP_CONFIG_INT_MODERATION
=
0x804
,
MLX5_CMD_OP_CONFIG_INT_MODERATION
=
0x804
,
MLX5_CMD_OP_ACCESS_REG
=
0x805
,
MLX5_CMD_OP_ACCESS_REG
=
0x805
,
MLX5_CMD_OP_ATTACH_TO_MCG
=
0x806
,
MLX5_CMD_OP_ATTACH_TO_MCG
=
0x806
,
MLX5_CMD_OP_DET
TACH_FROM_MCG
=
0x807
,
MLX5_CMD_OP_DET
ACH_FROM_MCG
=
0x807
,
MLX5_CMD_OP_GET_DROPPED_PACKET_LOG
=
0x80a
,
MLX5_CMD_OP_GET_DROPPED_PACKET_LOG
=
0x80a
,
MLX5_CMD_OP_MAD_IFC
=
0x50d
,
MLX5_CMD_OP_MAD_IFC
=
0x50d
,
MLX5_CMD_OP_QUERY_MAD_DEMUX
=
0x80b
,
MLX5_CMD_OP_QUERY_MAD_DEMUX
=
0x80b
,
...
@@ -174,6 +174,12 @@ enum {
...
@@ -174,6 +174,12 @@ enum {
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
=
0x82b
,
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
=
0x82b
,
MLX5_CMD_OP_SET_WOL_ROL
=
0x830
,
MLX5_CMD_OP_SET_WOL_ROL
=
0x830
,
MLX5_CMD_OP_QUERY_WOL_ROL
=
0x831
,
MLX5_CMD_OP_QUERY_WOL_ROL
=
0x831
,
MLX5_CMD_OP_CREATE_LAG
=
0x840
,
MLX5_CMD_OP_MODIFY_LAG
=
0x841
,
MLX5_CMD_OP_QUERY_LAG
=
0x842
,
MLX5_CMD_OP_DESTROY_LAG
=
0x843
,
MLX5_CMD_OP_CREATE_VPORT_LAG
=
0x844
,
MLX5_CMD_OP_DESTROY_VPORT_LAG
=
0x845
,
MLX5_CMD_OP_CREATE_TIR
=
0x900
,
MLX5_CMD_OP_CREATE_TIR
=
0x900
,
MLX5_CMD_OP_MODIFY_TIR
=
0x901
,
MLX5_CMD_OP_MODIFY_TIR
=
0x901
,
MLX5_CMD_OP_DESTROY_TIR
=
0x902
,
MLX5_CMD_OP_DESTROY_TIR
=
0x902
,
...
@@ -212,6 +218,8 @@ enum {
...
@@ -212,6 +218,8 @@ enum {
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER
=
0x93a
,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER
=
0x93a
,
MLX5_CMD_OP_QUERY_FLOW_COUNTER
=
0x93b
,
MLX5_CMD_OP_QUERY_FLOW_COUNTER
=
0x93b
,
MLX5_CMD_OP_MODIFY_FLOW_TABLE
=
0x93c
,
MLX5_CMD_OP_MODIFY_FLOW_TABLE
=
0x93c
,
MLX5_CMD_OP_ALLOC_ENCAP_HEADER
=
0x93d
,
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER
=
0x93e
,
MLX5_CMD_OP_MAX
MLX5_CMD_OP_MAX
};
};
...
@@ -281,7 +289,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
...
@@ -281,7 +289,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8
modify_root
[
0x1
];
u8
modify_root
[
0x1
];
u8
identified_miss_table_mode
[
0x1
];
u8
identified_miss_table_mode
[
0x1
];
u8
flow_table_modify
[
0x1
];
u8
flow_table_modify
[
0x1
];
u8
reserved_at_7
[
0x19
];
u8
encap
[
0x1
];
u8
decap
[
0x1
];
u8
reserved_at_9
[
0x17
];
u8
reserved_at_20
[
0x2
];
u8
reserved_at_20
[
0x2
];
u8
log_max_ft_size
[
0x6
];
u8
log_max_ft_size
[
0x6
];
...
@@ -473,7 +483,9 @@ struct mlx5_ifc_ads_bits {
...
@@ -473,7 +483,9 @@ struct mlx5_ifc_ads_bits {
struct
mlx5_ifc_flow_table_nic_cap_bits
{
struct
mlx5_ifc_flow_table_nic_cap_bits
{
u8
nic_rx_multi_path_tirs
[
0x1
];
u8
nic_rx_multi_path_tirs
[
0x1
];
u8
reserved_at_1
[
0x1ff
];
u8
nic_rx_multi_path_tirs_fts
[
0x1
];
u8
allow_sniffer_and_nic_rx_shared_tir
[
0x1
];
u8
reserved_at_3
[
0x1fd
];
struct
mlx5_ifc_flow_table_prop_layout_bits
flow_table_properties_nic_receive
;
struct
mlx5_ifc_flow_table_prop_layout_bits
flow_table_properties_nic_receive
;
...
@@ -512,7 +524,15 @@ struct mlx5_ifc_e_switch_cap_bits {
...
@@ -512,7 +524,15 @@ struct mlx5_ifc_e_switch_cap_bits {
u8
nic_vport_node_guid_modify
[
0x1
];
u8
nic_vport_node_guid_modify
[
0x1
];
u8
nic_vport_port_guid_modify
[
0x1
];
u8
nic_vport_port_guid_modify
[
0x1
];
u8
reserved_at_20
[
0x7e0
];
u8
vxlan_encap_decap
[
0x1
];
u8
nvgre_encap_decap
[
0x1
];
u8
reserved_at_22
[
0x9
];
u8
log_max_encap_headers
[
0x5
];
u8
reserved_2b
[
0x6
];
u8
max_encap_header_size
[
0xa
];
u8
reserved_40
[
0x7c0
];
};
};
struct
mlx5_ifc_qos_cap_bits
{
struct
mlx5_ifc_qos_cap_bits
{
...
@@ -767,7 +787,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
...
@@ -767,7 +787,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8
out_of_seq_cnt
[
0x1
];
u8
out_of_seq_cnt
[
0x1
];
u8
vport_counters
[
0x1
];
u8
vport_counters
[
0x1
];
u8
retransmission_q_counters
[
0x1
];
u8
retransmission_q_counters
[
0x1
];
u8
reserved_at_183
[
0x3
];
u8
reserved_at_183
[
0x1
];
u8
modify_rq_counter_set_id
[
0x1
];
u8
reserved_at_185
[
0x1
];
u8
max_qp_cnt
[
0xa
];
u8
max_qp_cnt
[
0xa
];
u8
pkey_table_size
[
0x10
];
u8
pkey_table_size
[
0x10
];
...
@@ -870,7 +892,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
...
@@ -870,7 +892,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8
pad_tx_eth_packet
[
0x1
];
u8
pad_tx_eth_packet
[
0x1
];
u8
reserved_at_263
[
0x8
];
u8
reserved_at_263
[
0x8
];
u8
log_bf_reg_size
[
0x5
];
u8
log_bf_reg_size
[
0x5
];
u8
reserved_at_270
[
0x10
];
u8
reserved_at_270
[
0xb
];
u8
lag_master
[
0x1
];
u8
num_lag_ports
[
0x4
];
u8
reserved_at_280
[
0x10
];
u8
reserved_at_280
[
0x10
];
u8
max_wqe_sz_sq
[
0x10
];
u8
max_wqe_sz_sq
[
0x10
];
...
@@ -1904,7 +1929,7 @@ enum {
...
@@ -1904,7 +1929,7 @@ enum {
struct
mlx5_ifc_qpc_bits
{
struct
mlx5_ifc_qpc_bits
{
u8
state
[
0x4
];
u8
state
[
0x4
];
u8
reserved_at_4
[
0x4
];
u8
lag_tx_port_affinity
[
0x4
];
u8
st
[
0x8
];
u8
st
[
0x8
];
u8
reserved_at_10
[
0x3
];
u8
reserved_at_10
[
0x3
];
u8
pm_state
[
0x2
];
u8
pm_state
[
0x2
];
...
@@ -1966,7 +1991,10 @@ struct mlx5_ifc_qpc_bits {
...
@@ -1966,7 +1991,10 @@ struct mlx5_ifc_qpc_bits {
u8
reserved_at_3e0
[
0x8
];
u8
reserved_at_3e0
[
0x8
];
u8
cqn_snd
[
0x18
];
u8
cqn_snd
[
0x18
];
u8
reserved_at_400
[
0x40
];
u8
reserved_at_400
[
0x8
];
u8
deth_sqpn
[
0x18
];
u8
reserved_at_420
[
0x20
];
u8
reserved_at_440
[
0x8
];
u8
reserved_at_440
[
0x8
];
u8
last_acked_psn
[
0x18
];
u8
last_acked_psn
[
0x18
];
...
@@ -2064,6 +2092,8 @@ enum {
...
@@ -2064,6 +2092,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_DROP
=
0x2
,
MLX5_FLOW_CONTEXT_ACTION_DROP
=
0x2
,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
=
0x4
,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
=
0x4
,
MLX5_FLOW_CONTEXT_ACTION_COUNT
=
0x8
,
MLX5_FLOW_CONTEXT_ACTION_COUNT
=
0x8
,
MLX5_FLOW_CONTEXT_ACTION_ENCAP
=
0x10
,
MLX5_FLOW_CONTEXT_ACTION_DECAP
=
0x20
,
};
};
struct
mlx5_ifc_flow_context_bits
{
struct
mlx5_ifc_flow_context_bits
{
...
@@ -2083,7 +2113,9 @@ struct mlx5_ifc_flow_context_bits {
...
@@ -2083,7 +2113,9 @@ struct mlx5_ifc_flow_context_bits {
u8
reserved_at_a0
[
0x8
];
u8
reserved_at_a0
[
0x8
];
u8
flow_counter_list_size
[
0x18
];
u8
flow_counter_list_size
[
0x18
];
u8
reserved_at_c0
[
0x140
];
u8
encap_id
[
0x20
];
u8
reserved_at_e0
[
0x120
];
struct
mlx5_ifc_fte_match_param_bits
match_value
;
struct
mlx5_ifc_fte_match_param_bits
match_value
;
...
@@ -2146,7 +2178,11 @@ struct mlx5_ifc_traffic_counter_bits {
...
@@ -2146,7 +2178,11 @@ struct mlx5_ifc_traffic_counter_bits {
};
};
struct
mlx5_ifc_tisc_bits
{
struct
mlx5_ifc_tisc_bits
{
u8
reserved_at_0
[
0xc
];
u8
strict_lag_tx_port_affinity
[
0x1
];
u8
reserved_at_1
[
0x3
];
u8
lag_tx_port_affinity
[
0x04
];
u8
reserved_at_8
[
0x4
];
u8
prio
[
0x4
];
u8
prio
[
0x4
];
u8
reserved_at_10
[
0x10
];
u8
reserved_at_10
[
0x10
];
...
@@ -2808,7 +2844,7 @@ struct mlx5_ifc_xrqc_bits {
...
@@ -2808,7 +2844,7 @@ struct mlx5_ifc_xrqc_bits {
struct
mlx5_ifc_tag_matching_topology_context_bits
tag_matching_topology_context
;
struct
mlx5_ifc_tag_matching_topology_context_bits
tag_matching_topology_context
;
u8
reserved_at_180
[
0x
18
0
];
u8
reserved_at_180
[
0x
20
0
];
struct
mlx5_ifc_wq_bits
wq
;
struct
mlx5_ifc_wq_bits
wq
;
};
};
...
@@ -3489,7 +3525,7 @@ struct mlx5_ifc_query_special_contexts_out_bits {
...
@@ -3489,7 +3525,7 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8
syndrome
[
0x20
];
u8
syndrome
[
0x20
];
u8
reserved_at_40
[
0x20
];
u8
dump_fill_mkey
[
0x20
];
u8
resd_lkey
[
0x20
];
u8
resd_lkey
[
0x20
];
};
};
...
@@ -4213,6 +4249,85 @@ struct mlx5_ifc_query_eq_in_bits {
...
@@ -4213,6 +4249,85 @@ struct mlx5_ifc_query_eq_in_bits {
u8
reserved_at_60
[
0x20
];
u8
reserved_at_60
[
0x20
];
};
};
struct
mlx5_ifc_encap_header_in_bits
{
u8
reserved_at_0
[
0x5
];
u8
header_type
[
0x3
];
u8
reserved_at_8
[
0xe
];
u8
encap_header_size
[
0xa
];
u8
reserved_at_20
[
0x10
];
u8
encap_header
[
2
][
0x8
];
u8
more_encap_header
[
0
][
0x8
];
};
struct
mlx5_ifc_query_encap_header_out_bits
{
u8
status
[
0x8
];
u8
reserved_at_8
[
0x18
];
u8
syndrome
[
0x20
];
u8
reserved_at_40
[
0xa0
];
struct
mlx5_ifc_encap_header_in_bits
encap_header
[
0
];
};
struct
mlx5_ifc_query_encap_header_in_bits
{
u8
opcode
[
0x10
];
u8
reserved_at_10
[
0x10
];
u8
reserved_at_20
[
0x10
];
u8
op_mod
[
0x10
];
u8
encap_id
[
0x20
];
u8
reserved_at_60
[
0xa0
];
};
struct
mlx5_ifc_alloc_encap_header_out_bits
{
u8
status
[
0x8
];
u8
reserved_at_8
[
0x18
];
u8
syndrome
[
0x20
];
u8
encap_id
[
0x20
];
u8
reserved_at_60
[
0x20
];
};
struct
mlx5_ifc_alloc_encap_header_in_bits
{
u8
opcode
[
0x10
];
u8
reserved_at_10
[
0x10
];
u8
reserved_at_20
[
0x10
];
u8
op_mod
[
0x10
];
u8
reserved_at_40
[
0xa0
];
struct
mlx5_ifc_encap_header_in_bits
encap_header
;
};
struct
mlx5_ifc_dealloc_encap_header_out_bits
{
u8
status
[
0x8
];
u8
reserved_at_8
[
0x18
];
u8
syndrome
[
0x20
];
u8
reserved_at_40
[
0x40
];
};
struct
mlx5_ifc_dealloc_encap_header_in_bits
{
u8
opcode
[
0x10
];
u8
reserved_at_10
[
0x10
];
u8
reserved_20
[
0x10
];
u8
op_mod
[
0x10
];
u8
encap_id
[
0x20
];
u8
reserved_60
[
0x20
];
};
struct
mlx5_ifc_query_dct_out_bits
{
struct
mlx5_ifc_query_dct_out_bits
{
u8
status
[
0x8
];
u8
status
[
0x8
];
u8
reserved_at_8
[
0x18
];
u8
reserved_at_8
[
0x18
];
...
@@ -4517,7 +4632,9 @@ struct mlx5_ifc_modify_tis_out_bits {
...
@@ -4517,7 +4632,9 @@ struct mlx5_ifc_modify_tis_out_bits {
struct
mlx5_ifc_modify_tis_bitmask_bits
{
struct
mlx5_ifc_modify_tis_bitmask_bits
{
u8
reserved_at_0
[
0x20
];
u8
reserved_at_0
[
0x20
];
u8
reserved_at_20
[
0x1f
];
u8
reserved_at_20
[
0x1d
];
u8
lag_tx_port_affinity
[
0x1
];
u8
strict_lag_tx_port_affinity
[
0x1
];
u8
prio
[
0x1
];
u8
prio
[
0x1
];
};
};
...
@@ -4652,6 +4769,11 @@ struct mlx5_ifc_modify_rq_out_bits {
...
@@ -4652,6 +4769,11 @@ struct mlx5_ifc_modify_rq_out_bits {
u8
reserved_at_40
[
0x40
];
u8
reserved_at_40
[
0x40
];
};
};
enum
{
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
=
1ULL
<<
1
,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID
=
1ULL
<<
3
,
};
struct
mlx5_ifc_modify_rq_in_bits
{
struct
mlx5_ifc_modify_rq_in_bits
{
u8
opcode
[
0x10
];
u8
opcode
[
0x10
];
u8
reserved_at_10
[
0x10
];
u8
reserved_at_10
[
0x10
];
...
@@ -4721,7 +4843,7 @@ struct mlx5_ifc_modify_nic_vport_field_select_bits {
...
@@ -4721,7 +4843,7 @@ struct mlx5_ifc_modify_nic_vport_field_select_bits {
u8
reserved_at_0
[
0x16
];
u8
reserved_at_0
[
0x16
];
u8
node_guid
[
0x1
];
u8
node_guid
[
0x1
];
u8
port_guid
[
0x1
];
u8
port_guid
[
0x1
];
u8
reserved_at_18
[
0x1
];
u8
min_inline
[
0x1
];
u8
mtu
[
0x1
];
u8
mtu
[
0x1
];
u8
change_event
[
0x1
];
u8
change_event
[
0x1
];
u8
promisc
[
0x1
];
u8
promisc
[
0x1
];
...
@@ -6099,7 +6221,9 @@ struct mlx5_ifc_create_flow_table_in_bits {
...
@@ -6099,7 +6221,9 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8
reserved_at_a0
[
0x20
];
u8
reserved_at_a0
[
0x20
];
u8
reserved_at_c0
[
0x4
];
u8
encap_en
[
0x1
];
u8
decap_en
[
0x1
];
u8
reserved_at_c2
[
0x2
];
u8
table_miss_mode
[
0x4
];
u8
table_miss_mode
[
0x4
];
u8
level
[
0x8
];
u8
level
[
0x8
];
u8
reserved_at_d0
[
0x8
];
u8
reserved_at_d0
[
0x8
];
...
@@ -6108,7 +6232,10 @@ struct mlx5_ifc_create_flow_table_in_bits {
...
@@ -6108,7 +6232,10 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8
reserved_at_e0
[
0x8
];
u8
reserved_at_e0
[
0x8
];
u8
table_miss_id
[
0x18
];
u8
table_miss_id
[
0x18
];
u8
reserved_at_100
[
0x100
];
u8
reserved_at_100
[
0x8
];
u8
lag_master_next_table_id
[
0x18
];
u8
reserved_at_120
[
0x80
];
};
};
struct
mlx5_ifc_create_flow_group_out_bits
{
struct
mlx5_ifc_create_flow_group_out_bits
{
...
@@ -7562,7 +7689,8 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
...
@@ -7562,7 +7689,8 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
};
};
enum
{
enum
{
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID
=
0x1
,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID
=
(
1UL
<<
0
),
MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID
=
(
1UL
<<
15
),
};
};
struct
mlx5_ifc_modify_flow_table_out_bits
{
struct
mlx5_ifc_modify_flow_table_out_bits
{
...
@@ -7601,7 +7729,10 @@ struct mlx5_ifc_modify_flow_table_in_bits {
...
@@ -7601,7 +7729,10 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8
reserved_at_e0
[
0x8
];
u8
reserved_at_e0
[
0x8
];
u8
table_miss_id
[
0x18
];
u8
table_miss_id
[
0x18
];
u8
reserved_at_100
[
0x100
];
u8
reserved_at_100
[
0x8
];
u8
lag_master_next_table_id
[
0x18
];
u8
reserved_at_120
[
0x80
];
};
};
struct
mlx5_ifc_ets_tcn_config_reg_bits
{
struct
mlx5_ifc_ets_tcn_config_reg_bits
{
...
@@ -7709,4 +7840,134 @@ struct mlx5_ifc_dcbx_param_bits {
...
@@ -7709,4 +7840,134 @@ struct mlx5_ifc_dcbx_param_bits {
u8
error
[
0x8
];
u8
error
[
0x8
];
u8
reserved_at_a0
[
0x160
];
u8
reserved_at_a0
[
0x160
];
};
};
struct
mlx5_ifc_lagc_bits
{
u8
reserved_at_0
[
0x1d
];
u8
lag_state
[
0x3
];
u8
reserved_at_20
[
0x14
];
u8
tx_remap_affinity_2
[
0x4
];
u8
reserved_at_38
[
0x4
];
u8
tx_remap_affinity_1
[
0x4
];
};
struct
mlx5_ifc_create_lag_out_bits
{
u8
status
[
0x8
];
u8
reserved_at_8
[
0x18
];
u8
syndrome
[
0x20
];
u8
reserved_at_40
[
0x40
];
};
struct
mlx5_ifc_create_lag_in_bits
{
u8
opcode
[
0x10
];
u8
reserved_at_10
[
0x10
];
u8
reserved_at_20
[
0x10
];
u8
op_mod
[
0x10
];
struct
mlx5_ifc_lagc_bits
ctx
;
};
struct
mlx5_ifc_modify_lag_out_bits
{
u8
status
[
0x8
];
u8
reserved_at_8
[
0x18
];
u8
syndrome
[
0x20
];
u8
reserved_at_40
[
0x40
];
};
struct
mlx5_ifc_modify_lag_in_bits
{
u8
opcode
[
0x10
];
u8
reserved_at_10
[
0x10
];
u8
reserved_at_20
[
0x10
];
u8
op_mod
[
0x10
];
u8
reserved_at_40
[
0x20
];
u8
field_select
[
0x20
];
struct
mlx5_ifc_lagc_bits
ctx
;
};
struct
mlx5_ifc_query_lag_out_bits
{
u8
status
[
0x8
];
u8
reserved_at_8
[
0x18
];
u8
syndrome
[
0x20
];
u8
reserved_at_40
[
0x40
];
struct
mlx5_ifc_lagc_bits
ctx
;
};
struct
mlx5_ifc_query_lag_in_bits
{
u8
opcode
[
0x10
];
u8
reserved_at_10
[
0x10
];
u8
reserved_at_20
[
0x10
];
u8
op_mod
[
0x10
];
u8
reserved_at_40
[
0x40
];
};
struct
mlx5_ifc_destroy_lag_out_bits
{
u8
status
[
0x8
];
u8
reserved_at_8
[
0x18
];
u8
syndrome
[
0x20
];
u8
reserved_at_40
[
0x40
];
};
struct
mlx5_ifc_destroy_lag_in_bits
{
u8
opcode
[
0x10
];
u8
reserved_at_10
[
0x10
];
u8
reserved_at_20
[
0x10
];
u8
op_mod
[
0x10
];
u8
reserved_at_40
[
0x40
];
};
struct
mlx5_ifc_create_vport_lag_out_bits
{
u8
status
[
0x8
];
u8
reserved_at_8
[
0x18
];
u8
syndrome
[
0x20
];
u8
reserved_at_40
[
0x40
];
};
struct
mlx5_ifc_create_vport_lag_in_bits
{
u8
opcode
[
0x10
];
u8
reserved_at_10
[
0x10
];
u8
reserved_at_20
[
0x10
];
u8
op_mod
[
0x10
];
u8
reserved_at_40
[
0x40
];
};
struct
mlx5_ifc_destroy_vport_lag_out_bits
{
u8
status
[
0x8
];
u8
reserved_at_8
[
0x18
];
u8
syndrome
[
0x20
];
u8
reserved_at_40
[
0x40
];
};
struct
mlx5_ifc_destroy_vport_lag_in_bits
{
u8
opcode
[
0x10
];
u8
reserved_at_10
[
0x10
];
u8
reserved_at_20
[
0x10
];
u8
op_mod
[
0x10
];
u8
reserved_at_40
[
0x40
];
};
#endif
/* MLX5_IFC_H */
#endif
/* MLX5_IFC_H */
include/linux/mlx5/port.h
浏览文件 @
d68478da
...
@@ -61,6 +61,39 @@ enum mlx5_an_status {
...
@@ -61,6 +61,39 @@ enum mlx5_an_status {
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
#define MLX5_EEPROM_PAGE_LENGTH 256
enum
mlx5e_link_mode
{
MLX5E_1000BASE_CX_SGMII
=
0
,
MLX5E_1000BASE_KX
=
1
,
MLX5E_10GBASE_CX4
=
2
,
MLX5E_10GBASE_KX4
=
3
,
MLX5E_10GBASE_KR
=
4
,
MLX5E_20GBASE_KR2
=
5
,
MLX5E_40GBASE_CR4
=
6
,
MLX5E_40GBASE_KR4
=
7
,
MLX5E_56GBASE_R4
=
8
,
MLX5E_10GBASE_CR
=
12
,
MLX5E_10GBASE_SR
=
13
,
MLX5E_10GBASE_ER
=
14
,
MLX5E_40GBASE_SR4
=
15
,
MLX5E_40GBASE_LR4
=
16
,
MLX5E_50GBASE_SR2
=
18
,
MLX5E_100GBASE_CR4
=
20
,
MLX5E_100GBASE_SR4
=
21
,
MLX5E_100GBASE_KR4
=
22
,
MLX5E_100GBASE_LR4
=
23
,
MLX5E_100BASE_TX
=
24
,
MLX5E_1000BASE_T
=
25
,
MLX5E_10GBASE_T
=
26
,
MLX5E_25GBASE_CR
=
27
,
MLX5E_25GBASE_KR
=
28
,
MLX5E_25GBASE_SR
=
29
,
MLX5E_50GBASE_CR2
=
30
,
MLX5E_50GBASE_KR2
=
31
,
MLX5E_LINK_MODES_NUMBER
,
};
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
int
mlx5_set_port_caps
(
struct
mlx5_core_dev
*
dev
,
u8
port_num
,
u32
caps
);
int
mlx5_set_port_caps
(
struct
mlx5_core_dev
*
dev
,
u8
port_num
,
u32
caps
);
int
mlx5_query_port_ptys
(
struct
mlx5_core_dev
*
dev
,
u32
*
ptys
,
int
mlx5_query_port_ptys
(
struct
mlx5_core_dev
*
dev
,
u32
*
ptys
,
int
ptys_size
,
int
proto_mask
,
u8
local_port
);
int
ptys_size
,
int
proto_mask
,
u8
local_port
);
...
@@ -70,9 +103,10 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
...
@@ -70,9 +103,10 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32
*
proto_admin
,
int
proto_mask
);
u32
*
proto_admin
,
int
proto_mask
);
int
mlx5_query_port_link_width_oper
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_query_port_link_width_oper
(
struct
mlx5_core_dev
*
dev
,
u8
*
link_width_oper
,
u8
local_port
);
u8
*
link_width_oper
,
u8
local_port
);
int
mlx5_query_port_proto_oper
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_query_port_ib_proto_oper
(
struct
mlx5_core_dev
*
dev
,
u8
*
proto_oper
,
int
proto_mask
,
u8
*
proto_oper
,
u8
local_port
);
u8
local_port
);
int
mlx5_query_port_eth_proto_oper
(
struct
mlx5_core_dev
*
dev
,
u32
*
proto_oper
,
u8
local_port
);
int
mlx5_set_port_ptys
(
struct
mlx5_core_dev
*
dev
,
bool
an_disable
,
int
mlx5_set_port_ptys
(
struct
mlx5_core_dev
*
dev
,
bool
an_disable
,
u32
proto_admin
,
int
proto_mask
);
u32
proto_admin
,
int
proto_mask
);
void
mlx5_toggle_port_link
(
struct
mlx5_core_dev
*
dev
);
void
mlx5_toggle_port_link
(
struct
mlx5_core_dev
*
dev
);
...
...
include/linux/mlx5/qp.h
浏览文件 @
d68478da
...
@@ -123,12 +123,13 @@ enum {
...
@@ -123,12 +123,13 @@ enum {
};
};
enum
{
enum
{
MLX5_NON_ZERO_RQ
=
0
<<
24
,
MLX5_NON_ZERO_RQ
=
0
x0
,
MLX5_SRQ_RQ
=
1
<<
24
,
MLX5_SRQ_RQ
=
0x1
,
MLX5_CRQ_RQ
=
2
<<
24
,
MLX5_CRQ_RQ
=
0x2
,
MLX5_ZERO_LEN_RQ
=
3
<<
24
MLX5_ZERO_LEN_RQ
=
0x3
};
};
/* TODO REM */
enum
{
enum
{
/* params1 */
/* params1 */
MLX5_QP_BIT_SRE
=
1
<<
15
,
MLX5_QP_BIT_SRE
=
1
<<
15
,
...
@@ -177,12 +178,6 @@ enum {
...
@@ -177,12 +178,6 @@ enum {
MLX5_FENCE_MODE_SMALL_AND_FENCE
=
4
<<
5
,
MLX5_FENCE_MODE_SMALL_AND_FENCE
=
4
<<
5
,
};
};
enum
{
MLX5_QP_LAT_SENSITIVE
=
1
<<
28
,
MLX5_QP_BLOCK_MCAST
=
1
<<
30
,
MLX5_QP_ENABLE_SIG
=
1
<<
31
,
};
enum
{
enum
{
MLX5_RCV_DBR
=
0
,
MLX5_RCV_DBR
=
0
,
MLX5_SND_DBR
=
1
,
MLX5_SND_DBR
=
1
,
...
@@ -484,6 +479,7 @@ struct mlx5_qp_path {
...
@@ -484,6 +479,7 @@ struct mlx5_qp_path {
u8
rmac
[
6
];
u8
rmac
[
6
];
};
};
/* FIXME: use mlx5_ifc.h qpc */
struct
mlx5_qp_context
{
struct
mlx5_qp_context
{
__be32
flags
;
__be32
flags
;
__be32
flags_pd
;
__be32
flags_pd
;
...
@@ -525,99 +521,6 @@ struct mlx5_qp_context {
...
@@ -525,99 +521,6 @@ struct mlx5_qp_context {
u8
rsvd1
[
24
];
u8
rsvd1
[
24
];
};
};
struct
mlx5_create_qp_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
input_qpn
;
u8
rsvd0
[
4
];
__be32
opt_param_mask
;
u8
rsvd1
[
4
];
struct
mlx5_qp_context
ctx
;
u8
rsvd3
[
16
];
__be64
pas
[
0
];
};
struct
mlx5_create_qp_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
__be32
qpn
;
u8
rsvd0
[
4
];
};
struct
mlx5_destroy_qp_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
qpn
;
u8
rsvd0
[
4
];
};
struct
mlx5_destroy_qp_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd0
[
8
];
};
struct
mlx5_modify_qp_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
qpn
;
u8
rsvd0
[
4
];
__be32
optparam
;
u8
rsvd1
[
4
];
struct
mlx5_qp_context
ctx
;
u8
rsvd2
[
16
];
};
struct
mlx5_modify_qp_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd0
[
8
];
};
struct
mlx5_query_qp_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
qpn
;
u8
rsvd
[
4
];
};
struct
mlx5_query_qp_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd1
[
8
];
__be32
optparam
;
u8
rsvd0
[
4
];
struct
mlx5_qp_context
ctx
;
u8
rsvd2
[
16
];
__be64
pas
[
0
];
};
struct
mlx5_conf_sqp_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
qpn
;
u8
rsvd
[
3
];
u8
type
;
};
struct
mlx5_conf_sqp_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_alloc_xrcd_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
u8
rsvd
[
8
];
};
struct
mlx5_alloc_xrcd_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
__be32
xrcdn
;
u8
rsvd
[
4
];
};
struct
mlx5_dealloc_xrcd_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
xrcdn
;
u8
rsvd
[
4
];
};
struct
mlx5_dealloc_xrcd_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
static
inline
struct
mlx5_core_qp
*
__mlx5_qp_lookup
(
struct
mlx5_core_dev
*
dev
,
u32
qpn
)
static
inline
struct
mlx5_core_qp
*
__mlx5_qp_lookup
(
struct
mlx5_core_dev
*
dev
,
u32
qpn
)
{
{
return
radix_tree_lookup
(
&
dev
->
priv
.
qp_table
.
tree
,
qpn
);
return
radix_tree_lookup
(
&
dev
->
priv
.
qp_table
.
tree
,
qpn
);
...
@@ -628,28 +531,17 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
...
@@ -628,28 +531,17 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
return
radix_tree_lookup
(
&
dev
->
priv
.
mkey_table
.
tree
,
key
);
return
radix_tree_lookup
(
&
dev
->
priv
.
mkey_table
.
tree
,
key
);
}
}
struct
mlx5_page_fault_resume_mbox_in
{
struct
mlx5_inbox_hdr
hdr
;
__be32
flags_qpn
;
u8
reserved
[
4
];
};
struct
mlx5_page_fault_resume_mbox_out
{
struct
mlx5_outbox_hdr
hdr
;
u8
rsvd
[
8
];
};
int
mlx5_core_create_qp
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_core_create_qp
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_qp
*
qp
,
struct
mlx5_core_qp
*
qp
,
struct
mlx5_create_qp_mbox_in
*
in
,
u32
*
in
,
int
inlen
);
int
inlen
);
int
mlx5_core_qp_modify
(
struct
mlx5_core_dev
*
dev
,
u16
op
eration
,
int
mlx5_core_qp_modify
(
struct
mlx5_core_dev
*
dev
,
u16
op
code
,
struct
mlx5_modify_qp_mbox_in
*
in
,
int
sqd_event
,
u32
opt_param_mask
,
void
*
qpc
,
struct
mlx5_core_qp
*
qp
);
struct
mlx5_core_qp
*
qp
);
int
mlx5_core_destroy_qp
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_core_destroy_qp
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_qp
*
qp
);
struct
mlx5_core_qp
*
qp
);
int
mlx5_core_qp_query
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_qp
*
qp
,
int
mlx5_core_qp_query
(
struct
mlx5_core_dev
*
dev
,
struct
mlx5_core_qp
*
qp
,
struct
mlx5_query_qp_mbox_out
*
out
,
int
outlen
);
u32
*
out
,
int
outlen
);
int
mlx5_core_xrcd_alloc
(
struct
mlx5_core_dev
*
dev
,
u32
*
xrcdn
);
int
mlx5_core_xrcd_alloc
(
struct
mlx5_core_dev
*
dev
,
u32
*
xrcdn
);
int
mlx5_core_xrcd_dealloc
(
struct
mlx5_core_dev
*
dev
,
u32
xrcdn
);
int
mlx5_core_xrcd_dealloc
(
struct
mlx5_core_dev
*
dev
,
u32
xrcdn
);
...
...
include/linux/mlx5/vport.h
浏览文件 @
d68478da
...
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
...
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16
vport
,
u8
*
addr
);
u16
vport
,
u8
*
addr
);
void
mlx5_query_nic_vport_min_inline
(
struct
mlx5_core_dev
*
mdev
,
void
mlx5_query_nic_vport_min_inline
(
struct
mlx5_core_dev
*
mdev
,
u8
*
min_inline
);
u8
*
min_inline
);
int
mlx5_modify_nic_vport_min_inline
(
struct
mlx5_core_dev
*
mdev
,
u16
vport
,
u8
min_inline
);
int
mlx5_modify_nic_vport_mac_address
(
struct
mlx5_core_dev
*
dev
,
int
mlx5_modify_nic_vport_mac_address
(
struct
mlx5_core_dev
*
dev
,
u16
vport
,
u8
*
addr
);
u16
vport
,
u8
*
addr
);
int
mlx5_query_nic_vport_mtu
(
struct
mlx5_core_dev
*
mdev
,
u16
*
mtu
);
int
mlx5_query_nic_vport_mtu
(
struct
mlx5_core_dev
*
mdev
,
u16
*
mtu
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录