From 1db61c3e9dc74e9216409e5f6396563bff7d6faf Mon Sep 17 00:00:00 2001 From: baoachun <962571062@qq.com> Date: Tue, 28 Dec 2021 12:56:02 +0800 Subject: [PATCH] add mul_lstm_fuse_pass ut (#37795) * add mul_lstm_fuse_pass ut * update mul_lstm_fuse_pass ut * update ut * update ut * update ut * add CPU ut cmake setting * update ut --- .../fluid/framework/ir/fc_lstm_fuse_pass.cc | 6 +- .../unittests/ir/inference/CMakeLists.txt | 15 +++ .../ir/inference/test_mul_lstm_fuse_pass.py | 126 ++++++++++++++++++ 3 files changed, 144 insertions(+), 3 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/ir/inference/test_mul_lstm_fuse_pass.py diff --git a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc index d72b626fc1e..b99e607f92b 100644 --- a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc @@ -68,13 +68,13 @@ MulLstmFusePass::MulLstmFusePass() { .IsType() .End() .AddAttr("gate_activation") - .IsStringIn({"sigmoid", "tanh", "relu", "identity"}) + .IsStringIn({"sigmoid"}) .End() .AddAttr("cell_activation") - .IsStringIn({"sigmoid", "tanh", "relu", "identity"}) + .IsStringIn({"tanh", "relu", "identity"}) .End() .AddAttr("candidate_activation") - .IsStringIn({"sigmoid", "tanh", "relu", "identity"}) + .IsStringIn({"tanh", "relu", "identity"}) .End(); AddOpCompat(OpCompat("mul")) .AddInput("X") diff --git a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt index 9f4a6b52ec0..1c640aad303 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt @@ -7,6 +7,13 @@ string(REPLACE ".py" "" TEST_TRT_IR_PASSES "${TEST_TRT_IR_PASSES}") file(GLOB TEST_TRT_CONVERTER RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_trt_convert_*.py") string(REPLACE ".py" "" TEST_TRT_CONVERTER "${TEST_TRT_CONVERTER}") +# Only for cpu(mkl + openblas) +set(TEST_INFERENCE_CPU_UT "test_mul_lstm_fuse_pass") + +foreach(CPU_UT ${TEST_INFERENCE_CPU_UT}) + list(REMOVE_ITEM TEST_INFERENCE_IR_PASSES ${CPU_UT}) +endforeach() + foreach(TEST_INFERENCE_IR_PASS ${TEST_TRT_IR_PASSES}) list(REMOVE_ITEM TEST_INFERENCE_IR_PASSES ${TEST_INFERENCE_IR_PASS}) endforeach() @@ -53,6 +60,14 @@ if (WITH_MKLDNN AND TENSORRT_FOUND AND WITH_GPU) endforeach() endif() +if (NOT WITH_MKLDNN AND NOT TENSORRT_FOUND AND NOT WITH_GPU) + foreach(target ${TEST_INFERENCE_CPU_UT}) + py_test_modules(${target} MODULES ${target}) + endforeach() + +set_tests_properties(test_mul_lstm_fuse_pass PROPERTIES TIMEOUT 300) +endif() + if(WITH_GPU AND TENSORRT_FOUND) set_tests_properties(test_trt_subgraph_pass PROPERTIES TIMEOUT 120) set_tests_properties(test_trt_activation_pass PROPERTIES TIMEOUT 120) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mul_lstm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mul_lstm_fuse_pass.py new file mode 100644 index 00000000000..c944abb60c8 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mul_lstm_fuse_pass.py @@ -0,0 +1,126 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import PassAutoScanTest, SkipReasons +from program_config import TensorConfig, ProgramConfig, OpConfig +import numpy as np +import paddle.inference as paddle_infer +from functools import partial +from typing import Optional, List, Callable, Dict, Any, Set +import unittest + +import hypothesis +from hypothesis import given, settings, seed, example, assume +import hypothesis.strategies as st +from functools import reduce + + +class TestMulLstmFusePass(PassAutoScanTest): + def is_program_valid(self, program_config: ProgramConfig) -> bool: + return True + + def sample_program_config(self, draw): + x_col = draw(st.sampled_from([1])) + y_col = draw(st.sampled_from([1])) + use_peepholes = draw(st.booleans()) + is_reverse = draw(st.booleans()) + gate_activation = draw(st.sampled_from(["sigmoid"])) + cell_activation = draw(st.sampled_from(["tanh", "relu", "identity"])) + candidate_activation = draw( + st.sampled_from(["tanh", "relu", "identity"])) + batch_size = draw(st.integers(min_value=1, max_value=40)) + + def generate_input(): + shape = [batch_size, 128, 6, 120] + return np.full(shape, 0.01).astype(np.float32) + + def generate_weight(shape): + return np.full(shape, 0.0001).astype(np.float32) + + im2sequence_op = OpConfig( + type="im2sequence", + inputs={"X": ["input_data"]}, + outputs={"Out": ["seq_out"]}, + attrs={ + "kernels": [6, 1], + "out_stride": [1, 1], + "paddings": [0, 0, 0, 0], + "strides": [1, 1] + }) + + mul_op = OpConfig( + type="mul", + inputs={"X": ["seq_out"], + "Y": ["mul_weight"]}, + outputs={"Out": ["mul_out"]}, + attrs={"x_num_col_dims": x_col, + "y_num_col_dims": y_col}) + + lstm_op = OpConfig( + type="lstm", + inputs={ + "Input": ["mul_out"], + "Weight": ["lstm_weight"], + "Bias": ["lstm_bias"] + }, + outputs={ + "Hidden": ["lstm_hidden"], + "Cell": ["lstm_cell"], + "BatchGate": ["lstm_gate"], + "BatchCellPreAct": ["lstm_batch_cell"] + }, + attrs={ + 'use_peepholes': use_peepholes, + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'cell_activation': cell_activation, + 'candidate_activation': candidate_activation, + 'is_test': True + }) + + model_net = [im2sequence_op, mul_op, lstm_op] + + if use_peepholes: + lstm_bias_shape = [1, 1050] + else: + lstm_bias_shape = [1, 600] + + program_config = ProgramConfig( + ops=model_net, + weights={ + "mul_weight": + TensorConfig(data_gen=partial(generate_weight, [768, 600])), + "lstm_weight": + TensorConfig(data_gen=partial(generate_weight, [150, 600])), + "lstm_bias": + TensorConfig(data_gen=partial(generate_weight, lstm_bias_shape)) + }, + inputs={ + "input_data": TensorConfig(data_gen=partial(generate_input)), + }, + outputs=["lstm_hidden"]) + + return program_config + + def sample_predictor_configs(self, program_config): + config = self.create_inference_config() + yield config, ["im2sequence", "fusion_lstm"], (1e-5, 1e-5) + + def test(self): + self.run_and_statis( + quant=False, max_duration=300, passes=["mul_lstm_fuse_pass"]) + + +if __name__ == "__main__": + unittest.main() -- GitLab