Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
1ac8ca4d
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1ac8ca4d
编写于
8月 31, 2022
作者:
L
Leo Chen
提交者:
GitHub
8月 31, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix UT failures (#45099)
上级
213f8038
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
56 addition
and
22 deletion
+56
-22
paddle/fluid/framework/selected_rows_utils.cc
paddle/fluid/framework/selected_rows_utils.cc
+5
-1
python/paddle/fluid/contrib/slim/tests/CMakeLists.txt
python/paddle/fluid/contrib/slim/tests/CMakeLists.txt
+1
-1
python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mobilenetv1.py
...slim/tests/test_post_training_quantization_mobilenetv1.py
+6
-1
python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py
...fluid/tests/unittests/ir/inference/inference_pass_test.py
+1
-1
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py
...d/tests/unittests/ir/inference/test_trt_convert_conv2d.py
+5
-5
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_instance_norm.py
.../unittests/ir/inference/test_trt_convert_instance_norm.py
+2
-2
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reduce_sum.py
...sts/unittests/ir/inference/test_trt_convert_reduce_sum.py
+3
-3
python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py
...ttests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py
+1
-1
python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py
.../fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py
+8
-1
python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py
...le/fluid/tests/unittests/ir/inference/test_trt_pool_op.py
+8
-1
python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_mean_op.py
...d/tests/unittests/ir/inference/test_trt_reduce_mean_op.py
+8
-2
python/paddle/fluid/tests/unittests/test_einsum_v2.py
python/paddle/fluid/tests/unittests/test_einsum_v2.py
+8
-3
未找到文件。
paddle/fluid/framework/selected_rows_utils.cc
浏览文件 @
1ac8ca4d
...
...
@@ -72,8 +72,12 @@ void DeserializeFromStream(std::istream& is,
}
{
// the 2st field, rows information
uint64_t
size
;
uint64_t
size
=
0
;
is
.
read
(
reinterpret_cast
<
char
*>
(
&
size
),
sizeof
(
size
));
PADDLE_ENFORCE_EQ
(
is
.
good
(),
true
,
platform
::
errors
::
Unavailable
(
"Cannot read the number of rows."
));
auto
&
rows
=
*
selected_rows
->
mutable_rows
();
rows
.
resize
(
size
);
for
(
uint64_t
i
=
0
;
i
<
size
;
++
i
)
{
...
...
python/paddle/fluid/contrib/slim/tests/CMakeLists.txt
浏览文件 @
1ac8ca4d
...
...
@@ -524,7 +524,7 @@ if(NOT WIN32)
set_tests_properties
(
test_post_training_quantization_program_resnet50
PROPERTIES TIMEOUT 240
)
set_tests_properties
(
test_post_training_quantization_mobilenetv1
PROPERTIES TIMEOUT
6
00 LABELS
"RUN_TYPE=NIGHTLY"
)
PROPERTIES TIMEOUT
9
00 LABELS
"RUN_TYPE=NIGHTLY"
)
set_tests_properties
(
test_post_training_quantization_resnet50
PROPERTIES TIMEOUT 600 LABELS
"RUN_TYPE=NIGHTLY"
)
set_tests_properties
(
test_post_training_quantization_mnist PROPERTIES TIMEOUT
...
...
python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mobilenetv1.py
浏览文件 @
1ac8ca4d
...
...
@@ -241,6 +241,8 @@ class TestPostTrainingQuantization(unittest.TestCase):
def
generate_quantized_model
(
self
,
model_path
,
quantizable_op_type
,
batch_size
,
batch_nums
,
algo
=
"KL"
,
round_type
=
"round"
,
is_full_quantize
=
False
,
...
...
@@ -263,6 +265,8 @@ class TestPostTrainingQuantization(unittest.TestCase):
ptq
=
PostTrainingQuantization
(
executor
=
exe
,
sample_generator
=
val_reader
,
model_dir
=
model_path
,
batch_size
=
batch_size
,
batch_nums
=
batch_nums
,
algo
=
algo
,
batch_nums
=
batch_nums
,
quantizable_op_type
=
quantizable_op_type
,
...
...
@@ -302,7 +306,8 @@ class TestPostTrainingQuantization(unittest.TestCase):
print
(
"Start INT8 post training quantization for {0} on {1} images ..."
.
format
(
model
,
sample_iterations
*
batch_size
))
self
.
generate_quantized_model
(
os
.
path
.
join
(
model_cache_folder
,
"model"
),
quantizable_op_type
,
algo
,
round_type
,
quantizable_op_type
,
batch_size
,
sample_iterations
,
algo
,
round_type
,
is_full_quantize
,
is_use_cache_file
,
is_optimize_model
,
batch_nums
,
onnx_format
)
...
...
python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py
浏览文件 @
1ac8ca4d
...
...
@@ -225,8 +225,8 @@ class InferencePassTest(unittest.TestCase):
tensorrt_output
=
tensorrt_output
.
flatten
()
np
.
testing
.
assert_allclose
(
paddle_out
,
tensorrt_output
,
paddle_out
,
rtol
=
rtol
,
atol
=
atol
,
err_msg
=
'Output has diff between GPU and TensorRT. '
)
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py
浏览文件 @
1ac8ca4d
...
...
@@ -47,11 +47,11 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest):
self
.
trt_param
.
workspace_size
=
1073741824
def
generate_input1
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
ones
([
batch
,
attrs
[
0
][
'groups'
]
*
3
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
batch
,
attrs
[
0
][
'groups'
]
*
3
,
64
,
64
]).
astype
(
np
.
float32
)
/
4
def
generate_weight1
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
24
,
3
,
3
,
3
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
9
,
3
,
3
,
3
]).
astype
(
np
.
float32
)
-
0.5
batch_options
=
[
1
,
2
]
strides_options
=
[[
2
,
2
],
[
1
,
2
]]
...
...
@@ -162,7 +162,7 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest):
attrs
,
False
),
(
1e-3
,
1e-3
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Int8
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-
3
,
1e-3
)
attrs
,
False
),
(
1e-
2
,
1e-2
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
...
...
@@ -174,7 +174,7 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest):
attrs
,
True
),
(
1e-3
,
1e-3
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Int8
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-
3
,
1e-3
)
attrs
,
True
),
(
1e-
2
,
1e-2
)
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_instance_norm.py
浏览文件 @
1ac8ca4d
...
...
@@ -128,7 +128,7 @@ class TrtConvertInstanceNormTest(TrtLayerAutoScanTest):
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
...
...
@@ -137,7 +137,7 @@ class TrtConvertInstanceNormTest(TrtLayerAutoScanTest):
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reduce_sum.py
浏览文件 @
1ac8ca4d
...
...
@@ -44,9 +44,9 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest):
def
generate_input1
(
dtype
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
dtype
==
-
1
or
dtype
==
5
:
return
np
.
random
.
random
([
1
,
3
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
1
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
elif
dtype
==
2
:
return
np
.
random
.
random
([
1
,
3
,
64
,
64
]).
astype
(
np
.
int32
)
return
np
.
random
.
random
([
1
,
3
,
32
,
32
]).
astype
(
np
.
int32
)
for
keep_dim
in
[
True
,
False
]:
for
dim
in
[[],
[
1
],
[
0
],
[
0
,
1
],
[
1
,
2
,
3
],
[
-
2
,
0
,
3
],
[
-
3
],
...
...
@@ -93,7 +93,7 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
3
,
64
,
64
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
3
,
64
,
64
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
]}
def
clear_dynamic_shape
():
self
.
dynamic_shape
.
min_input_shape
=
{}
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py
浏览文件 @
1ac8ca4d
...
...
@@ -154,7 +154,7 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest):
def
test
(
self
):
self
.
run_and_statis
(
quant
=
False
,
max_examples
=
50
,
max_examples
=
25
,
passes
=
[
"trt_flatten2_matmul_fuse_pass"
])
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py
浏览文件 @
1ac8ca4d
...
...
@@ -79,7 +79,14 @@ class TensorRTPool3dTest(InferencePassTest):
shutil
.
rmtree
(
self
.
path
+
"_opt_cache"
)
if
core
.
is_compiled_with_cuda
():
use_gpu
=
True
self
.
check_output_with_option
(
use_gpu
)
if
self
.
precision
==
AnalysisConfig
.
Precision
.
Float32
:
atol
,
rtol
=
(
1e-5
,
1e-5
)
elif
self
.
precision
==
AnalysisConfig
.
Precision
.
Half
:
atol
,
rtol
=
(
1e-3
,
1e-3
)
else
:
raise
ValueError
(
"Unsupported precision {}"
.
format
(
self
.
precision
))
self
.
check_output_with_option
(
use_gpu
,
atol
=
atol
,
rtol
=
rtol
)
self
.
assertTrue
(
PassVersionChecker
.
IsCompatible
(
'tensorrt_subgraph_pass'
))
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py
浏览文件 @
1ac8ca4d
...
...
@@ -75,7 +75,14 @@ class TensorRTPoolTest(InferencePassTest):
shutil
.
rmtree
(
self
.
path
+
"_opt_cache"
)
if
core
.
is_compiled_with_cuda
():
use_gpu
=
True
self
.
check_output_with_option
(
use_gpu
)
if
self
.
precision
==
AnalysisConfig
.
Precision
.
Float32
:
atol
,
rtol
=
(
1e-5
,
1e-5
)
elif
self
.
precision
==
AnalysisConfig
.
Precision
.
Half
:
atol
,
rtol
=
(
1e-3
,
1e-3
)
else
:
raise
ValueError
(
"Unsupported precision {}"
.
format
(
self
.
precision
))
self
.
check_output_with_option
(
use_gpu
,
atol
=
atol
,
rtol
=
rtol
)
self
.
assertTrue
(
PassVersionChecker
.
IsCompatible
(
'tensorrt_subgraph_pass'
))
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_mean_op.py
浏览文件 @
1ac8ca4d
...
...
@@ -218,7 +218,10 @@ class TRTReduceMeanStaticFP16(InferencePassTest):
def
test_check_output
(
self
):
if
core
.
is_compiled_with_cuda
():
use_gpu
=
True
self
.
check_output_with_option
(
use_gpu
,
flatten
=
True
)
self
.
check_output_with_option
(
use_gpu
,
flatten
=
True
,
atol
=
1e-3
,
rtol
=
1e-3
)
self
.
assertTrue
(
PassVersionChecker
.
IsCompatible
(
'tensorrt_subgraph_pass'
))
...
...
@@ -244,7 +247,10 @@ class TRTReduceMeanFP16Static(InferencePassTest):
def
test_check_output
(
self
):
if
core
.
is_compiled_with_cuda
():
use_gpu
=
True
self
.
check_output_with_option
(
use_gpu
,
flatten
=
True
)
self
.
check_output_with_option
(
use_gpu
,
flatten
=
True
,
atol
=
1e-3
,
rtol
=
1e-3
)
self
.
assertTrue
(
PassVersionChecker
.
IsCompatible
(
'tensorrt_subgraph_pass'
))
...
...
python/paddle/fluid/tests/unittests/test_einsum_v2.py
浏览文件 @
1ac8ca4d
...
...
@@ -530,21 +530,26 @@ class TestStaticGraphShape(unittest.TestCase):
self
.
assertEqual
(
C
.
shape
,
(
-
1
,
384
))
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
not
core
.
is_bfloat16_supported
(
core
.
CUDAPlace
(
0
)),
"core is not compiled with CUDA or not support the bfloat16"
)
class
TestBF16
(
unittest
.
TestCase
):
"""
EinsumOp support bfloat16 type, add unittest here for the correctness.
"""
def
test_shape
(
self
):
if
paddle
.
is_compiled_with_cuda
()
and
_is_gpu_bfloat16_supported
():
""" MatmulKernel support bfloat16 only if cuda_major >= 11.0 and Compute Capability >= 8.0
cuda_major
=
paddle
.
version
.
cuda
().
split
(
'.'
)[
0
].
strip
()
if
int
(
cuda_major
)
>=
11
:
""" MatmulKernel support bfloat16 only if cuda_major > 11.0.
"""
A
=
paddle
.
to_tensor
(
np
.
array
([
1.0
,
2.0
])).
astype
(
paddle
.
bfloat16
)
A
=
A
.
cuda
()
B
=
paddle
.
to_tensor
(
np
.
array
([
2.0
,
3.0
])).
astype
(
paddle
.
bfloat16
)
B
=
B
.
cuda
()
C
=
paddle
.
einsum
(
'i,i->'
,
A
,
B
)
self
.
assertEqual
(
C
.
astype
(
paddle
.
float32
).
item
(),
8.0
)
D
=
paddle
.
to_tensor
(
8.0
).
astype
(
paddle
.
bfloat16
)
self
.
assertEqual
(
C
.
item
(),
D
.
item
())
class
TestComplex
(
unittest
.
TestCase
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录