Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
14a089c4
MegEngine
项目概览
MegEngine 天元
/
MegEngine
大约 1 年 前同步成功
通知
399
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
14a089c4
编写于
4月 06, 2021
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix(dnn): change ci to cudnn804, reopen testcase
GitOrigin-RevId: 90713a801b4d70df0d1da2e00fda5c2b62df6dcd
上级
c338e876
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
7 addition
and
25 deletion
+7
-25
src/gopt/test/inference.cpp
src/gopt/test/inference.cpp
+0
-18
src/tensorrt/test/opr_replace.cpp
src/tensorrt/test/opr_replace.cpp
+1
-1
src/tensorrt/test/tensorrt.cpp
src/tensorrt/test/tensorrt.cpp
+2
-2
src/tensorrt/test/tensorrt_runtime.cpp
src/tensorrt/test/tensorrt_runtime.cpp
+4
-4
未找到文件。
src/gopt/test/inference.cpp
浏览文件 @
14a089c4
...
...
@@ -1959,8 +1959,6 @@ TEST(TestEnableTensorCore, Nchw4Nchw) {
}
#endif
//! close for cu111 ci, reopen it when bug fixed
#if CUDA_VERSION < 11000
TEST
(
TestEnableTensorCore
,
ConvBiasWithZ
)
{
REQUIRE_GPU
(
1
);
auto
cn
=
CompNode
::
load
(
"gpu0"
);
...
...
@@ -2023,10 +2021,7 @@ TEST(TestEnableTensorCore, ConvBiasWithZ) {
func
->
execute
();
MGB_ASSERT_TENSOR_EQ
(
host_y
,
host_y_opt
);
}
#endif
//! close for cu111 ci, reopen it when bug fixed
#if CUDA_VERSION < 11000
TEST
(
TestEnableTensorCore
,
Pooling
)
{
REQUIRE_GPU
(
1
);
auto
cn
=
CompNode
::
load
(
"gpu0"
);
...
...
@@ -2094,7 +2089,6 @@ TEST(TestEnableTensorCore, Pooling) {
func
->
execute
();
MGB_ASSERT_TENSOR_EQ
(
host_y
,
host_y_opt
);
}
#endif
TEST
(
TestGoptInference
,
EnableTensorCore
)
{
REQUIRE_GPU
(
1
);
...
...
@@ -2296,8 +2290,6 @@ TEST(FuseConvBiasZPass, BlockFuse) {
}
#endif
//! close for cu111 ci, reopen it when bug fixed
#if CUDA_VERSION < 11000
TEST
(
TestEnableTensorCore
,
ShuffleMerge
)
{
REQUIRE_GPU
(
1
);
auto
cn
=
CompNode
::
load
(
"gpu0"
);
...
...
@@ -2391,7 +2383,6 @@ TEST(TestEnableTensorCore, ShuffleMerge) {
func
->
execute
();
MGB_ASSERT_TENSOR_EQ
(
host_y
,
host_y_opt
);
}
#endif
#endif
...
...
@@ -2575,8 +2566,6 @@ TEST(TestGoptInference, EnableCHWN4) {
}
#endif
//! close for cu111 ci, reopen it when bug fixed
#if CUDA_VERSION < 11000
TEST
(
TestGoptInference
,
EnableCHWN4WarpPespective
)
{
REQUIRE_GPU
(
1
);
auto
cn
=
CompNode
::
load
(
"gpu0"
);
...
...
@@ -2664,7 +2653,6 @@ TEST(TestGoptInference, EnableCHWN4WarpPespective) {
func
->
execute
();
MGB_ASSERT_TENSOR_EQ
(
host_y
,
host_y_opt
);
}
#endif
TEST
(
TestGoptInference
,
EnableCHWN4Pooling
)
{
REQUIRE_GPU
(
1
);
...
...
@@ -2754,8 +2742,6 @@ TEST(TestGoptInference, EnableCHWN4Pooling) {
MGB_ASSERT_TENSOR_EQ
(
host_y
,
host_y_opt
);
}
//! close for cu111 ci, reopen it when bug fixed
#if CUDA_VERSION < 11000
TEST
(
TestGoptInference
,
EnableCHWN4ShuffleRemove
)
{
REQUIRE_GPU
(
1
);
auto
cn
=
CompNode
::
load
(
"gpu0"
);
...
...
@@ -2878,7 +2864,6 @@ TEST(TestGoptInference, EnableCHWN4ShuffleRemove) {
func
->
execute
();
MGB_ASSERT_TENSOR_EQ
(
host_y
,
host_y_opt
);
}
#endif
TEST
(
TestGoptInference
,
ConvertFormatNCHW4GPU
)
{
REQUIRE_GPU
(
1
);
...
...
@@ -3977,8 +3962,6 @@ TEST(TestGoptInference, FoldingConvDimshuffle) {
func
->
execute
();
}
//! close for cu111 ci, reopen it when bug fixed
#if CUDA_VERSION < 11000
TEST
(
TestGoptInference
,
FoldingConvDimshuffleNCHW4NCHW32
)
{
REQUIRE_GPU
(
1
);
auto
cn
=
CompNode
::
load
(
"gpu0"
);
...
...
@@ -4063,7 +4046,6 @@ TEST(TestGoptInference, FoldingConvDimshuffleNCHW4NCHW32) {
func
->
execute
();
MGB_ASSERT_TENSOR_EQ
(
host_y_fuse
,
host_y_non_fuse
);
}
#endif
#if CUDA_VERSION >= 10020
TEST
(
TestGoptInference
,
FoldingConvDimshuffleNCHW32NCHW4
)
{
...
...
src/tensorrt/test/opr_replace.cpp
浏览文件 @
14a089c4
...
...
@@ -223,7 +223,7 @@ TEST(TestTensorRTReplace, ElemAddFusion) {
ASSERT_EQ
(
3u
,
trt_opr
->
cast_final_safe
<
opr
::
TensorRTOpr
>
()
.
trt_manager
()
.
iobuf_size
());
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
1
e-4
);
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
5
e-4
);
}
TEST
(
TestTensorRTReplace
,
BatchedMatrixMulBasic
)
{
...
...
src/tensorrt/test/tensorrt.cpp
浏览文件 @
14a089c4
...
...
@@ -44,14 +44,14 @@ TEST(TestOprTensorRT, Basic) {
auto
func
=
net
.
graph
->
compile
({
make_callback_copy
(
net
.
y
,
host_z1
),
make_callback_copy
(
y2
,
host_z2
)});
func
->
execute
();
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
1
e-4
);
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
2
e-4
);
auto
&&
host_x
=
net
.
host_x
;
auto
&&
gen
=
net
.
gen
;
*
host_x
=
*
gen
({
1
,
23
,
43
,
43
});
func
->
execute
();
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
1
e-4
);
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
2
e-4
);
*
host_x
=
*
gen
({
10
,
23
,
12
,
12
});
func
->
execute
();
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
1e-3
);
...
...
src/tensorrt/test/tensorrt_runtime.cpp
浏览文件 @
14a089c4
...
...
@@ -58,7 +58,7 @@ TEST(TestOprTensorRT, RuntimeBasic) {
auto
func
=
net
.
graph
->
compile
({
make_callback_copy
(
net
.
y
,
host_z1
),
make_callback_copy
(
y2
,
host_z2
)});
func
->
execute
();
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
1
e-4
);
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
5
e-4
);
}
...
...
@@ -128,13 +128,13 @@ TEST(TestOprTensorRT, RuntimeChangeBatchSize) {
auto
func
=
net
.
graph
->
compile
({
make_callback_copy
(
net
.
y
,
host_z1
),
make_callback_copy
(
y2
,
host_z2
)});
func
->
execute
();
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
1
e-4
);
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
5
e-4
);
*
net
.
host_x
=
*
net
.
gen
({
1
,
23
,
28
,
28
});
func
->
execute
();
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
1
e-4
);
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
5
e-4
);
*
net
.
host_x
=
*
net
.
gen
({
10
,
23
,
28
,
28
});
func
->
execute
();
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
1
e-4
);
MGB_ASSERT_TENSOR_NEAR
(
host_z1
,
host_z2
,
5
e-4
);
}
#endif // MGB_ENABLE_TENSOR_RT
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录