From 8fb8b95286cda2bf7e6ba8abd3bf94c09f50a1b6 Mon Sep 17 00:00:00 2001 From: liu zhengxi <380185688@qq.com> Date: Mon, 16 Mar 2020 09:44:37 +0800 Subject: [PATCH] Add transpose_flatten_concat_fuse_pass tests for gpu and trt (#22976) * add transpose_flatten_concat_fuse_pass tests for gpu and trt, test=develop * update test_inference_api.py, test=develop --- .../ir/inference/inference_pass_test.py | 3 + ...test_transpose_flatten_concat_fuse_pass.py | 51 +++++++++++++++++ ..._trt_transpose_flatten_concat_fuse_pass.py | 55 +++++++++++++++++++ .../tests/unittests/test_inference_api.py | 28 ++-------- 4 files changed, 115 insertions(+), 22 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py create mode 100644 python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py diff --git a/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py b/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py index 95b812bdf7c..b2a27f95886 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py @@ -53,6 +53,9 @@ class InferencePassTest(unittest.TestCase): feed=self.feeds, fetch_list=self.fetch_list, return_numpy=False) + # save models as combined to ensure that + # there won't be too many useless files + # after finishing a couple of tests. fluid.io.save_inference_model( dirname=self.path, feeded_var_names=list(self.feeds.keys()), diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py new file mode 100644 index 00000000000..5c91ceeb44a --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py @@ -0,0 +1,51 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from inference_pass_test import InferencePassTest +import paddle.fluid as fluid +import paddle.fluid.core as core + + +class TransposeFlattenConcatFusePassTest(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data1 = fluid.data( + name="data1", shape=[8, 32, 128], dtype="float32") + data2 = fluid.data( + name="data2", shape=[8, 32, 128], dtype="float32") + trans1 = fluid.layers.transpose(data1, perm=[2, 1, 0]) + trans2 = fluid.layers.transpose(data2, perm=[2, 1, 0]) + flatt1 = fluid.layers.flatten(trans1) + flatt2 = fluid.layers.flatten(trans2) + concat_out = fluid.layers.concat([flatt1, flatt2]) + # There is no parameters for above structure. + # Hence, append a batch_norm to avoid failure caused by load_combined. + out = fluid.layers.batch_norm(concat_out, is_test=True) + + self.feeds = { + "data1": np.random.random([8, 32, 128]).astype("float32"), + "data2": np.random.random([8, 32, 128]).astype("float32") + } + self.fetch_list = [out] + + def test_check_output(self): + # There is no cpu pass for transpose_flatten_concat_fuse + if core.is_compiled_with_cuda(): + self.check_output_with_option([True]) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py new file mode 100644 index 00000000000..c85c54c7418 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py @@ -0,0 +1,55 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from inference_pass_test import InferencePassTest +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid.core import AnalysisConfig + + +class TransposeFlattenConcatFusePassTRTTest(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data1 = fluid.data( + name="data1", shape=[8, 32, 128], dtype="float32") + data2 = fluid.data( + name="data2", shape=[8, 32, 128], dtype="float32") + trans1 = fluid.layers.transpose(data1, perm=[2, 1, 0]) + trans2 = fluid.layers.transpose(data2, perm=[2, 1, 0]) + flatt1 = fluid.layers.flatten(trans1) + flatt2 = fluid.layers.flatten(trans2) + concat_out = fluid.layers.concat([flatt1, flatt2]) + # There is no parameters for above structure. + # Hence, append a batch_norm to avoid failure caused by load_combined. + out = fluid.layers.batch_norm(concat_out, is_test=True) + + self.feeds = { + "data1": np.random.random([8, 32, 128]).astype("float32"), + "data2": np.random.random([8, 32, 128]).astype("float32") + } + self.enable_trt = True + self.trt_parameters = TransposeFlattenConcatFusePassTRTTest.TensorRTParam( + 1 << 20, 1, 3, AnalysisConfig.Precision.Float32, False, False) + self.fetch_list = [out] + + def test_check_output(self): + # There is no cpu pass for transpose_flatten_concat_fuse + if core.is_compiled_with_cuda(): + self.check_output_with_option([True]) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_inference_api.py b/python/paddle/fluid/tests/unittests/test_inference_api.py index a74771211f6..98ec0b3db04 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_api.py +++ b/python/paddle/fluid/tests/unittests/test_inference_api.py @@ -24,50 +24,34 @@ class TestInferenceApi(unittest.TestCase): def test_inference_api(self): tensor32 = np.random.randint(10, 20, size=[20, 2]).astype('int32') paddletensor32 = PaddleTensor(tensor32) - value32 = np.array(paddletensor32.data.int32_data()).reshape(* [20, 2]) dtype32 = paddletensor32.dtype - self.assertEqual(value32.all(), tensor32.all()) self.assertEqual(dtype32, PaddleDType.INT32) - self.assertEqual( - type(paddletensor32.data.tolist('int32')), type(tensor32.tolist())) self.assertEqual( paddletensor32.data.tolist('int32'), tensor32.ravel().tolist()) - self.assertEqual(type(paddletensor32.as_ndarray()), type(tensor32)) paddletensor32.data.reset(tensor32) - self.assertEqual(paddletensor32.as_ndarray().all(), tensor32.all()) + self.assertEqual(paddletensor32.as_ndarray().ravel().tolist(), + tensor32.ravel().tolist()) tensor64 = np.random.randint(10, 20, size=[20, 2]).astype('int64') paddletensor64 = PaddleTensor(tensor64) - value64 = np.array(paddletensor64.data.int64_data()).reshape(* [20, 2]) dtype64 = paddletensor64.dtype - self.assertEqual(value64.all(), tensor64.all()) self.assertEqual(dtype64, PaddleDType.INT64) - self.assertEqual( - type(paddletensor64.data.tolist('int64')), type(tensor64.tolist())) self.assertEqual( paddletensor64.data.tolist('int64'), tensor64.ravel().tolist()) - self.assertEqual(type(paddletensor64.as_ndarray()), type(tensor64)) paddletensor64.data.reset(tensor64) - self.assertEqual(paddletensor64.as_ndarray().all(), tensor64.all()) + self.assertEqual(paddletensor64.as_ndarray().ravel().tolist(), + tensor64.ravel().tolist()) tensor_float = np.random.randn(20, 2).astype('float32') paddletensor_float = PaddleTensor(tensor_float) - value_float = np.array(paddletensor_float.data.float_data()).reshape( - * [20, 2]) dtype_float = paddletensor_float.dtype - self.assertEqual(value_float.all(), tensor_float.all()) self.assertEqual(dtype_float, PaddleDType.FLOAT32) - self.assertEqual( - type(paddletensor_float.data.tolist('float32')), - type(tensor_float.tolist())) self.assertEqual( paddletensor_float.data.tolist('float32'), tensor_float.ravel().tolist()) - self.assertEqual( - type(paddletensor_float.as_ndarray()), type(tensor_float)) paddletensor_float.data.reset(tensor_float) - self.assertEqual(paddletensor_float.as_ndarray().all(), - tensor_float.all()) + self.assertEqual(paddletensor_float.as_ndarray().ravel().tolist(), + tensor_float.ravel().tolist()) if __name__ == '__main__': -- GitLab