Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
b0b75169
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b0b75169
编写于
4月 13, 2022
作者:
Z
zlsh80826
提交者:
GitHub
4月 13, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Reduce trt convert unit test problem size (#41701)
上级
404c4a6b
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
122 addition
and
177 deletion
+122
-177
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_activation.py
...sts/unittests/ir/inference/test_trt_convert_activation.py
+16
-18
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py
...sts/unittests/ir/inference/test_trt_convert_batch_norm.py
+10
-10
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py
...uid/tests/unittests/ir/inference/test_trt_convert_clip.py
+16
-18
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py
...d/tests/unittests/ir/inference/test_trt_convert_conv2d.py
+17
-47
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_fusion.py
.../unittests/ir/inference/test_trt_convert_conv2d_fusion.py
+16
-31
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py
...ts/unittests/ir/inference/test_trt_convert_elementwise.py
+6
-8
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py
...uid/tests/unittests/ir/inference/test_trt_convert_gelu.py
+15
-17
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py
...s/unittests/ir/inference/test_trt_convert_hard_sigmoid.py
+7
-9
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py
...sts/unittests/ir/inference/test_trt_convert_hard_swish.py
+4
-4
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py
...id/tests/unittests/ir/inference/test_trt_convert_prelu.py
+4
-4
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py
...id/tests/unittests/ir/inference/test_trt_convert_scale.py
+3
-3
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py
...id/tests/unittests/ir/inference/test_trt_convert_stack.py
+1
-1
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py
...tests/unittests/ir/inference/test_trt_convert_yolo_box.py
+7
-7
未找到文件。
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_activation.py
浏览文件 @
b0b75169
...
...
@@ -28,16 +28,16 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
def
sample_program_configs
(
self
):
def
generate_input1
(
dims
,
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
dims
==
1
:
return
np
.
ones
([
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
32
]).
astype
(
np
.
float32
)
elif
dims
==
2
:
return
np
.
ones
([
3
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
3
,
32
]).
astype
(
np
.
float32
)
elif
dims
==
3
:
return
np
.
ones
([
3
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
3
,
32
,
32
]).
astype
(
np
.
float32
)
else
:
return
np
.
ones
([
batch
,
3
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
batch
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
for
dims
in
[
1
,
2
,
3
,
4
]:
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
op_type
in
[
"relu"
,
"sigmoid"
,
"tanh"
,
"relu6"
]:
self
.
dims
=
dims
dics
=
[{}]
...
...
@@ -70,27 +70,25 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
def
generate_dynamic_shape
(
attrs
):
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
128
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
64
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
64
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
32
]}
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
32
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
64
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
32
]}
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
32
,
32
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
10
,
64
,
64
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
64
,
64
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
16
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
32
,
32
]}
else
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
]
"input_data"
:
[
1
,
3
,
16
,
16
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
3
,
64
,
64
]
"input_data"
:
[
4
,
3
,
32
,
32
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
3
,
64
,
64
]
"input_data"
:
[
1
,
3
,
32
,
32
]
}
def
clear_dynamic_shape
():
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py
浏览文件 @
b0b75169
...
...
@@ -54,7 +54,7 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest):
for
dims
in
[
2
,
3
,
4
]:
for
num_input
in
[
0
,
1
]:
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
epsilon
in
[
1e-6
,
1e-5
,
1e-4
]:
for
data_layout
in
[
"NCHW"
]:
for
momentum
in
[
0.9
,
0.8
]:
...
...
@@ -134,33 +134,33 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest):
if
self
.
dims
==
4
:
if
attrs
[
0
][
'data_layout'
]
==
"NCHW"
:
self
.
dynamic_shape
.
min_input_shape
=
{
"batch_norm_input"
:
[
1
,
3
,
24
,
24
]
"batch_norm_input"
:
[
1
,
3
,
12
,
12
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"batch_norm_input"
:
[
4
,
3
,
48
,
48
]
"batch_norm_input"
:
[
4
,
3
,
24
,
24
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"batch_norm_input"
:
[
1
,
3
,
24
,
48
]
"batch_norm_input"
:
[
1
,
3
,
24
,
24
]
}
elif
attrs
[
0
][
'data_layout'
]
==
"NHWC"
:
self
.
dynamic_shape
.
min_input_shape
=
{
"batch_norm_input"
:
[
1
,
24
,
24
,
3
]
"batch_norm_input"
:
[
1
,
12
,
12
,
3
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"batch_norm_input"
:
[
4
,
48
,
48
,
3
]
"batch_norm_input"
:
[
4
,
24
,
24
,
3
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"batch_norm_input"
:
[
1
,
24
,
48
,
3
]
"batch_norm_input"
:
[
1
,
24
,
24
,
3
]
}
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"batch_norm_input"
:
[
1
,
3
,
24
]
"batch_norm_input"
:
[
1
,
3
,
12
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"batch_norm_input"
:
[
4
,
3
,
48
]
"batch_norm_input"
:
[
4
,
3
,
24
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"batch_norm_input"
:
[
1
,
3
,
48
]
"batch_norm_input"
:
[
1
,
3
,
24
]
}
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py
浏览文件 @
b0b75169
...
...
@@ -28,13 +28,13 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
def
sample_program_configs
(
self
):
def
generate_input1
(
dims
,
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
dims
==
1
:
return
np
.
ones
([
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
32
]).
astype
(
np
.
float32
)
elif
dims
==
2
:
return
np
.
ones
([
3
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
3
,
32
]).
astype
(
np
.
float32
)
elif
dims
==
3
:
return
np
.
ones
([
3
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
3
,
32
,
32
]).
astype
(
np
.
float32
)
else
:
return
np
.
ones
([
batch
,
3
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
batch
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
def
generate_weight1
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
array
([
np
.
random
.
uniform
(
1
,
10
)]).
astype
(
"float32"
)
...
...
@@ -43,7 +43,7 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
return
np
.
array
([
np
.
random
.
uniform
(
10
,
20
)]).
astype
(
"float32"
)
for
dims
in
[
1
,
2
,
3
,
4
]:
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
op_inputs
in
[{
"X"
:
[
"input_data"
]
},
{
...
...
@@ -89,27 +89,25 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
def
generate_dynamic_shape
(
attrs
):
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
128
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
64
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
64
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
32
]}
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
32
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
64
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
32
]}
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
32
,
32
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
10
,
64
,
64
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
64
,
64
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
16
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
32
,
32
]}
else
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
]
"input_data"
:
[
1
,
3
,
16
,
16
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
3
,
64
,
64
]
"input_data"
:
[
4
,
3
,
32
,
32
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
3
,
64
,
64
]
"input_data"
:
[
1
,
3
,
32
,
32
]
}
def
clear_dynamic_shape
():
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py
浏览文件 @
b0b75169
...
...
@@ -46,20 +46,16 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest):
self
.
trt_param
.
workspace_size
=
1073741824
def
generate_input1
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
attrs
[
0
][
'groups'
]
==
1
:
return
np
.
ones
([
batch
,
3
,
64
,
64
]).
astype
(
np
.
float32
)
elif
attrs
[
0
][
'groups'
]
==
2
:
return
np
.
ones
([
batch
,
6
,
64
,
64
]).
astype
(
np
.
float32
)
else
:
return
np
.
ones
([
batch
,
9
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
(
[
batch
,
attrs
[
0
][
'groups'
]
*
3
,
64
,
64
]).
astype
(
np
.
float32
)
def
generate_weight1
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
24
,
3
,
3
,
3
]).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
strides
in
[[
1
,
1
],
[
2
,
2
],
[
1
,
2
]]:
for
paddings
in
[[
0
,
3
],
[
1
,
2
,
3
,
4
]]:
for
groups
in
[
1
,
2
,
3
]:
for
groups
in
[
1
,
3
]:
for
padding_algorithm
in
[
'EXPLICIT'
,
'SAME'
,
'VALID'
]:
for
dilations
in
[[
1
,
1
],
[
2
,
2
],
[
1
,
2
]]:
for
data_format
in
[
'NCHW'
]:
...
...
@@ -116,45 +112,19 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest):
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
if
attrs
[
0
][
'groups'
]
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
3
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
3
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
}
elif
attrs
[
0
][
'groups'
]
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
6
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
6
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
6
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
}
else
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
9
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
9
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
9
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
}
input_groups
=
attrs
[
0
][
'groups'
]
*
3
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
input_groups
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
input_groups
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
input_groups
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
}
def
clear_dynamic_shape
():
self
.
dynamic_shape
.
min_input_shape
=
{}
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_fusion.py
浏览文件 @
b0b75169
...
...
@@ -49,10 +49,8 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
self
.
trt_param
.
workspace_size
=
1073741824
def
generate_input1
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
attrs
[
0
][
'groups'
]
==
2
:
return
np
.
ones
([
batch
,
6
,
64
,
64
]).
astype
(
np
.
float32
)
else
:
return
np
.
ones
([
batch
,
9
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
(
[
batch
,
attrs
[
0
][
'groups'
]
*
3
,
64
,
64
]).
astype
(
np
.
float32
)
def
generate_weight1
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
24
,
3
,
3
,
3
]).
astype
(
np
.
float32
)
...
...
@@ -60,7 +58,7 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
def
generate_weight2
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
24
,
1
,
1
]).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
strides
in
[[
1
,
1
],
[
2
,
2
],
[
1
,
2
]]:
for
paddings
in
[[
0
,
3
],
[
1
,
2
,
3
,
4
]]:
for
groups
in
[
2
,
3
]:
...
...
@@ -126,32 +124,19 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
if
attrs
[
0
][
'groups'
]
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
6
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
6
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
6
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
}
else
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
9
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
9
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
9
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
}
input_groups
=
attrs
[
0
][
'groups'
]
*
3
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
input_groups
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
input_groups
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
input_groups
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
}
def
clear_dynamic_shape
():
self
.
dynamic_shape
.
min_input_shape
=
{}
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py
浏览文件 @
b0b75169
...
...
@@ -32,7 +32,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
def
generate_weight
():
return
np
.
random
.
randn
(
32
).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
shape
in
[[
32
],
[
batch
,
32
],
[
batch
,
32
,
32
],
[
batch
,
32
,
16
,
32
]]:
for
op_type
in
[
"elementwise_add"
,
"elementwise_mul"
]:
...
...
@@ -72,7 +72,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
# The input.dims[1] must be equal to the weight's length.
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
4
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
256
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
16
]}
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
32
]}
...
...
@@ -80,19 +80,17 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
]}
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
32
,
4
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
256
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
32
]}
elif
self
.
dims
==
4
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
32
,
4
,
4
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
128
,
256
]
"input_data"
:
[
4
,
32
,
32
,
32
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
32
,
16
]
"input_data"
:
[
4
,
32
,
16
,
32
]
}
def
clear_dynamic_shape
():
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py
浏览文件 @
b0b75169
...
...
@@ -28,13 +28,13 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest):
def
sample_program_configs
(
self
):
def
generate_input1
(
dims
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
dims
==
1
:
return
np
.
ones
([
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
32
]).
astype
(
np
.
float32
)
elif
dims
==
2
:
return
np
.
ones
([
3
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
3
,
32
]).
astype
(
np
.
float32
)
elif
dims
==
3
:
return
np
.
ones
([
3
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
3
,
32
,
32
]).
astype
(
np
.
float32
)
else
:
return
np
.
ones
([
1
,
3
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
1
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
for
dims
in
[
1
,
2
,
3
,
4
]:
for
approximate
in
[
True
,
False
]:
...
...
@@ -69,27 +69,25 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest):
def
generate_dynamic_shape
(
attrs
):
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
128
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
64
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
64
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
32
]}
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
32
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
64
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
32
]}
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
32
,
32
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
10
,
64
,
64
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
64
,
64
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
16
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
3
,
32
,
32
]}
else
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
]
"input_data"
:
[
1
,
3
,
16
,
16
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
3
,
64
,
64
]
"input_data"
:
[
4
,
3
,
32
,
32
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
3
,
64
,
64
]
"input_data"
:
[
1
,
3
,
32
,
32
]
}
def
clear_dynamic_shape
():
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py
浏览文件 @
b0b75169
...
...
@@ -29,8 +29,8 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
shape
in
[[
batch
,
64
],
[
batch
,
32
,
64
],
[
batch
,
64
,
32
,
128
]]:
for
batch
in
[
1
,
4
]:
for
shape
in
[[
batch
,
32
],
[
batch
,
16
,
32
],
[
batch
,
32
,
16
,
128
]]:
self
.
input_dim
=
len
(
shape
)
for
slope
in
[
0.1
,
0.5
]:
for
offset
in
[
0.2
,
0.7
]:
...
...
@@ -63,23 +63,21 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest):
def
generate_dynamic_shape
(
attrs
):
if
self
.
input_dim
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
64
,
128
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
16
]}
elif
self
.
input_dim
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
64
,
128
,
256
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
16
,
64
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
16
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
4
,
16
,
32
]}
elif
self
.
input_dim
==
4
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
,
4
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
64
,
128
,
256
,
512
]
"input_data"
:
[
4
,
32
,
16
,
128
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
16
,
64
,
128
]
"input_data"
:
[
4
,
32
,
16
,
128
]
}
def
clear_dynamic_shape
():
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py
浏览文件 @
b0b75169
...
...
@@ -37,7 +37,7 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest):
def
sample_program_configs
(
self
):
def
generate_input1
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
ones
([
1
,
3
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
1
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
for
threshold
in
[
6.0
,
7.0
,
100.0
,
0.0
,
-
1.0
]:
for
scale
in
[
5.0
,
6.0
,
7.0
,
-
1.0
,
0.0
,
100.0
]:
...
...
@@ -74,9 +74,9 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest):
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
3
,
64
,
64
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
3
,
64
,
64
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
16
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
2
,
3
,
32
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
]}
def
clear_dynamic_shape
():
self
.
dynamic_shape
.
min_input_shape
=
{}
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py
浏览文件 @
b0b75169
...
...
@@ -136,7 +136,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest):
"input_data"
:
[
1
,
1
],
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
],
"input_data"
:
[
4
,
32
],
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
3
],
...
...
@@ -146,7 +146,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest):
"input_data"
:
[
1
,
1
,
1
,
1
],
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
,
128
,
128
],
"input_data"
:
[
4
,
3
,
16
,
32
],
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
3
,
16
,
32
],
...
...
@@ -156,10 +156,10 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest):
"input_data"
:
[
1
,
1
,
1
],
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
,
256
],
"input_data"
:
[
4
,
3
,
32
],
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
3
,
1
28
],
"input_data"
:
[
2
,
3
,
1
6
],
}
def
clear_dynamic_shape
():
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py
浏览文件 @
b0b75169
...
...
@@ -94,14 +94,14 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest):
"scale_input"
:
[
1
,
3
,
24
,
24
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"scale_input"
:
[
9
,
3
,
48
,
48
]
"scale_input"
:
[
4
,
3
,
24
,
24
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"scale_input"
:
[
1
,
3
,
48
,
24
]
"scale_input"
:
[
1
,
3
,
24
,
24
]
}
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"scale_input"
:
[
1
,
3
,
24
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"scale_input"
:
[
9
,
6
,
48
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"scale_input"
:
[
4
,
3
,
24
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"scale_input"
:
[
1
,
3
,
24
]}
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"scale_input"
:
[
1
,
24
]}
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py
浏览文件 @
b0b75169
...
...
@@ -69,7 +69,7 @@ class TrtConvertStackTest(TrtLayerAutoScanTest):
return
np
.
ones
([
24
]).
astype
(
np
.
float32
)
for
dims
in
[
1
,
2
,
3
,
4
]:
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
axis
in
[
-
2
,
-
1
,
0
,
1
,
2
,
3
]:
self
.
dims
=
dims
dics
=
[{
"axis"
:
axis
},
{}]
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py
浏览文件 @
b0b75169
...
...
@@ -37,7 +37,7 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest):
def
generate_input2
(
attrs
:
List
[
Dict
[
str
,
Any
]],
batch
):
return
np
.
random
.
random
([
batch
,
2
]).
astype
(
np
.
int32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
class_num
in
[
80
,
30
]:
for
anchors
in
[[
10
,
13
,
16
,
30
,
33
,
23
]]:
for
downsample_ratio
in
[
32
,
16
]:
...
...
@@ -97,24 +97,24 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest):
if
attrs
[
0
][
'iou_aware'
]
==
True
:
channel
=
3
*
(
attrs
[
0
][
'class_num'
]
+
6
)
self
.
dynamic_shape
.
min_input_shape
=
{
"scale_input"
:
[
1
,
channel
,
24
,
24
]
"scale_input"
:
[
1
,
channel
,
12
,
12
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"scale_input"
:
[
4
,
channel
,
48
,
48
]
"scale_input"
:
[
4
,
channel
,
24
,
24
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"scale_input"
:
[
1
,
channel
,
24
,
48
]
"scale_input"
:
[
1
,
channel
,
24
,
24
]
}
else
:
channel
=
3
*
(
attrs
[
0
][
'class_num'
]
+
5
)
self
.
dynamic_shape
.
min_input_shape
=
{
"scale_input"
:
[
1
,
channel
,
24
,
24
]
"scale_input"
:
[
1
,
channel
,
12
,
12
]
}
self
.
dynamic_shape
.
max_input_shape
=
{
"scale_input"
:
[
4
,
channel
,
48
,
48
]
"scale_input"
:
[
4
,
channel
,
24
,
24
]
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"scale_input"
:
[
1
,
channel
,
24
,
48
]
"scale_input"
:
[
1
,
channel
,
24
,
24
]
}
def
clear_dynamic_shape
():
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录