diff --git a/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc b/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc index 52e88c6408b0e8007d84bd16c21bb5beec8e76b5..53cd2335fe23f5ef1299d9cc95b2008a03a003af 100644 --- a/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc @@ -31,6 +31,22 @@ namespace ir { class Node; MulGRUFusePass::MulGRUFusePass() { + AddOpCompat(OpCompat("mul")) + .AddInput("X") + .IsTensor() + .End() + .AddInput("Y") + .IsTensor() + .End() + .AddOutput("Out") + .IsTensor() + .End() + .AddAttr("x_num_col_dims") + .IsNumEQ(1) + .End() + .AddAttr("y_num_col_dims") + .IsNumEQ(1) + .End(); AddOpCompat(OpCompat("gru")) .AddInput("Input") .IsTensor() @@ -58,10 +74,10 @@ MulGRUFusePass::MulGRUFusePass() { .IsTensor() .End() .AddAttr("activation") - .IsStringIn({"sigmoid", "tanh", "relu", "identity"}) + .IsStringIn({"sigmoid", "tanh"}) .End() .AddAttr("gate_activation") - .IsStringIn({"sigmoid", "tanh", "relu", "identity"}) + .IsStringIn({"sigmoid", "tanh"}) .End() .AddAttr("is_reverse") .IsType() @@ -70,22 +86,6 @@ MulGRUFusePass::MulGRUFusePass() { .IsType() .IsOptional() .End(); - AddOpCompat(OpCompat("mul")) - .AddInput("X") - .IsTensor() - .End() - .AddInput("Y") - .IsTensor() - .End() - .AddOutput("Out") - .IsTensor() - .End() - .AddAttr("x_num_col_dims") - .IsNumEQ(1) - .End() - .AddAttr("y_num_col_dims") - .IsNumEQ(1) - .End(); } FCGRUFusePass::FCGRUFusePass() { diff --git a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt index bd2a8449446d2ef0486645e150f5fe11a1a50bcd..11abb2623bb224bc5bda1f1194558f4d1ca09b17 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt @@ -8,7 +8,7 @@ file(GLOB TEST_TRT_CONVERTER RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_trt_co string(REPLACE ".py" "" TEST_TRT_CONVERTER "${TEST_TRT_CONVERTER}") # Only for cpu(mkl + openblas) -set(TEST_INFERENCE_CPU_UT "test_mul_lstm_fuse_pass") +set(TEST_INFERENCE_CPU_UT "test_mul_lstm_fuse_pass" "test_mul_gru_fuse_pass") foreach(CPU_UT ${TEST_INFERENCE_CPU_UT}) list(REMOVE_ITEM TEST_INFERENCE_IR_PASSES ${CPU_UT}) @@ -66,6 +66,7 @@ if (NOT WITH_MKLDNN AND NOT TENSORRT_FOUND AND NOT WITH_GPU) endforeach() set_tests_properties(test_mul_lstm_fuse_pass PROPERTIES TIMEOUT 300) +set_tests_properties(test_mul_gru_fuse_pass PROPERTIES TIMEOUT 300) endif() if(WITH_GPU AND TENSORRT_FOUND) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mul_gru_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mul_gru_fuse_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..9b1400e45bbc07c16ec062c28bd739884a8095fa --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mul_gru_fuse_pass.py @@ -0,0 +1,139 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import PassAutoScanTest, SkipReasons +from program_config import TensorConfig, ProgramConfig, OpConfig +import numpy as np +import paddle.inference as paddle_infer +from functools import partial +from typing import Optional, List, Callable, Dict, Any, Set +import unittest + +import hypothesis +from hypothesis import given, settings, seed, example, assume +import hypothesis.strategies as st +from functools import reduce + + +class TestMulGruFusePass(PassAutoScanTest): + def is_program_valid(self, program_config: ProgramConfig) -> bool: + return True + + def sample_program_config(self, draw): + x_col = draw(st.sampled_from([1])) + y_col = draw(st.sampled_from([1])) + activation = draw(st.sampled_from(['sigmoid', 'tanh'])) + is_reverse = draw(st.booleans()) + has_origin_mode = draw(st.booleans()) + origin_mode = False + gate_activation = draw(st.sampled_from(['sigmoid', 'tanh'])) + batch_size = draw(st.integers(min_value=1, max_value=40)) + + def generate_input(): + shape = [batch_size, 128, 6, 120] + return np.full(shape, 0.001).astype(np.float32) + + def generate_weight(shape): + return np.full(shape, 0.0001).astype(np.float32) + + im2sequence_op = OpConfig( + type="im2sequence", + inputs={"X": ["input_data"]}, + outputs={"Out": ["seq_out"]}, + attrs={ + "kernels": [6, 1], + "out_stride": [1, 1], + "paddings": [0, 0, 0, 0], + "strides": [1, 1] + }) + + mul_op = OpConfig( + type="mul", + inputs={"X": ["seq_out"], + "Y": ["mul_weight"]}, + outputs={"Out": ["mul_out"]}, + attrs={"x_num_col_dims": x_col, + "y_num_col_dims": y_col}) + + if has_origin_mode: + gru_op = OpConfig( + type="gru", + inputs={ + "Input": ["mul_out"], + "Weight": ["gru_weight"], + "Bias": ["gru_bias"] + }, + outputs={ + "BatchGate": ["batch_gate"], + "BatchHidden": ["batch_hidden"], + "BatchResetHiddenPrev": ["batch_reset"], + "Hidden": ["hidden"] + }, + attrs={ + 'activation': activation, + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'is_test': True, + 'origin_mode': origin_mode + }) + else: + gru_op = OpConfig( + type="gru", + inputs={ + "Input": ["mul_out"], + "Weight": ["gru_weight"], + "Bias": ["gru_bias"] + }, + outputs={ + "BatchGate": ["batch_gate"], + "BatchHidden": ["batch_hidden"], + "BatchResetHiddenPrev": ["batch_reset"], + "Hidden": ["hidden"] + }, + attrs={ + 'activation': activation, + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'is_test': True + }) + + model_net = [im2sequence_op, mul_op, gru_op] + + program_config = ProgramConfig( + ops=model_net, + weights={ + "mul_weight": + TensorConfig(data_gen=partial(generate_weight, [768, 600])), + "gru_weight": + TensorConfig(data_gen=partial(generate_weight, [200, 600])), + "gru_bias": + TensorConfig(data_gen=partial(generate_weight, [1, 600])) + }, + inputs={ + "input_data": TensorConfig(data_gen=partial(generate_input)) + }, + outputs=["hidden"]) + + return program_config + + def sample_predictor_configs(self, program_config): + config = self.create_inference_config() + yield config, ["im2sequence", "fusion_gru"], (1e-5, 1e-5) + + def test(self): + self.run_and_statis(quant=False, passes=["mul_gru_fuse_pass"]) + + +if __name__ == "__main__": + unittest.main()