未验证 提交 8fb8b952 编写于 作者: L liu zhengxi 提交者: GitHub

Add transpose_flatten_concat_fuse_pass tests for gpu and trt (#22976)

* add transpose_flatten_concat_fuse_pass tests for gpu and trt, test=develop

* update test_inference_api.py, test=develop
上级 ab473357
......@@ -53,6 +53,9 @@ class InferencePassTest(unittest.TestCase):
feed=self.feeds,
fetch_list=self.fetch_list,
return_numpy=False)
# save models as combined to ensure that
# there won't be too many useless files
# after finishing a couple of tests.
fluid.io.save_inference_model(
dirname=self.path,
feeded_var_names=list(self.feeds.keys()),
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
class TransposeFlattenConcatFusePassTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
name="data1", shape=[8, 32, 128], dtype="float32")
data2 = fluid.data(
name="data2", shape=[8, 32, 128], dtype="float32")
trans1 = fluid.layers.transpose(data1, perm=[2, 1, 0])
trans2 = fluid.layers.transpose(data2, perm=[2, 1, 0])
flatt1 = fluid.layers.flatten(trans1)
flatt2 = fluid.layers.flatten(trans2)
concat_out = fluid.layers.concat([flatt1, flatt2])
# There is no parameters for above structure.
# Hence, append a batch_norm to avoid failure caused by load_combined.
out = fluid.layers.batch_norm(concat_out, is_test=True)
self.feeds = {
"data1": np.random.random([8, 32, 128]).astype("float32"),
"data2": np.random.random([8, 32, 128]).astype("float32")
}
self.fetch_list = [out]
def test_check_output(self):
# There is no cpu pass for transpose_flatten_concat_fuse
if core.is_compiled_with_cuda():
self.check_output_with_option([True])
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
class TransposeFlattenConcatFusePassTRTTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
name="data1", shape=[8, 32, 128], dtype="float32")
data2 = fluid.data(
name="data2", shape=[8, 32, 128], dtype="float32")
trans1 = fluid.layers.transpose(data1, perm=[2, 1, 0])
trans2 = fluid.layers.transpose(data2, perm=[2, 1, 0])
flatt1 = fluid.layers.flatten(trans1)
flatt2 = fluid.layers.flatten(trans2)
concat_out = fluid.layers.concat([flatt1, flatt2])
# There is no parameters for above structure.
# Hence, append a batch_norm to avoid failure caused by load_combined.
out = fluid.layers.batch_norm(concat_out, is_test=True)
self.feeds = {
"data1": np.random.random([8, 32, 128]).astype("float32"),
"data2": np.random.random([8, 32, 128]).astype("float32")
}
self.enable_trt = True
self.trt_parameters = TransposeFlattenConcatFusePassTRTTest.TensorRTParam(
1 << 20, 1, 3, AnalysisConfig.Precision.Float32, False, False)
self.fetch_list = [out]
def test_check_output(self):
# There is no cpu pass for transpose_flatten_concat_fuse
if core.is_compiled_with_cuda():
self.check_output_with_option([True])
if __name__ == "__main__":
unittest.main()
......@@ -24,50 +24,34 @@ class TestInferenceApi(unittest.TestCase):
def test_inference_api(self):
tensor32 = np.random.randint(10, 20, size=[20, 2]).astype('int32')
paddletensor32 = PaddleTensor(tensor32)
value32 = np.array(paddletensor32.data.int32_data()).reshape(* [20, 2])
dtype32 = paddletensor32.dtype
self.assertEqual(value32.all(), tensor32.all())
self.assertEqual(dtype32, PaddleDType.INT32)
self.assertEqual(
type(paddletensor32.data.tolist('int32')), type(tensor32.tolist()))
self.assertEqual(
paddletensor32.data.tolist('int32'), tensor32.ravel().tolist())
self.assertEqual(type(paddletensor32.as_ndarray()), type(tensor32))
paddletensor32.data.reset(tensor32)
self.assertEqual(paddletensor32.as_ndarray().all(), tensor32.all())
self.assertEqual(paddletensor32.as_ndarray().ravel().tolist(),
tensor32.ravel().tolist())
tensor64 = np.random.randint(10, 20, size=[20, 2]).astype('int64')
paddletensor64 = PaddleTensor(tensor64)
value64 = np.array(paddletensor64.data.int64_data()).reshape(* [20, 2])
dtype64 = paddletensor64.dtype
self.assertEqual(value64.all(), tensor64.all())
self.assertEqual(dtype64, PaddleDType.INT64)
self.assertEqual(
type(paddletensor64.data.tolist('int64')), type(tensor64.tolist()))
self.assertEqual(
paddletensor64.data.tolist('int64'), tensor64.ravel().tolist())
self.assertEqual(type(paddletensor64.as_ndarray()), type(tensor64))
paddletensor64.data.reset(tensor64)
self.assertEqual(paddletensor64.as_ndarray().all(), tensor64.all())
self.assertEqual(paddletensor64.as_ndarray().ravel().tolist(),
tensor64.ravel().tolist())
tensor_float = np.random.randn(20, 2).astype('float32')
paddletensor_float = PaddleTensor(tensor_float)
value_float = np.array(paddletensor_float.data.float_data()).reshape(
* [20, 2])
dtype_float = paddletensor_float.dtype
self.assertEqual(value_float.all(), tensor_float.all())
self.assertEqual(dtype_float, PaddleDType.FLOAT32)
self.assertEqual(
type(paddletensor_float.data.tolist('float32')),
type(tensor_float.tolist()))
self.assertEqual(
paddletensor_float.data.tolist('float32'),
tensor_float.ravel().tolist())
self.assertEqual(
type(paddletensor_float.as_ndarray()), type(tensor_float))
paddletensor_float.data.reset(tensor_float)
self.assertEqual(paddletensor_float.as_ndarray().all(),
tensor_float.all())
self.assertEqual(paddletensor_float.as_ndarray().ravel().tolist(),
tensor_float.ravel().tolist())
if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册