diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index 2b5cb6dd050a6ee0a0410b44d3a5bbd3c40cd469..1f1f86e70c9f535136938d0f26ca5dfd4891014c 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -110,7 +110,6 @@ const std::vector kTRTSubgraphPasses({ "conv_bn_fuse_pass", // "unsqueeze2_eltwise_fuse_pass", // "trt_squeeze2_matmul_fuse_pass", // - "trt_reshape2_matmul_fuse_pass", // "trt_flatten2_matmul_fuse_pass", // "trt_map_matmul_v2_to_mul_pass", // "trt_map_matmul_v2_to_matmul_pass", // diff --git a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt index 085ed55a979be73c2aa399ff4760ccec9368ffd9..5f3bfa62ebc1a64683a7b99efb65a6db89929d38 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt @@ -171,8 +171,6 @@ if(WITH_GPU AND TENSORRT_FOUND) 240) set_tests_properties(test_trt_squeeze2_matmul_fuse_pass PROPERTIES TIMEOUT 240) - set_tests_properties(test_trt_reshape2_matmul_fuse_pass PROPERTIES TIMEOUT - 240) set_tests_properties(test_shuffle_channel_detect_pass PROPERTIES TIMEOUT 120) if(WIN32) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape2_matmul_fuse_pass.py deleted file mode 100644 index d2dca92345ad33647c07f47641346d99f8a6d493..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape2_matmul_fuse_pass.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import PassAutoScanTest, IgnoreReasons -from program_config import TensorConfig, ProgramConfig, OpConfig -import numpy as np -import paddle.inference as paddle_infer -from functools import partial -from typing import Optional, List, Callable, Dict, Any, Set -import unittest - -import hypothesis -from hypothesis import given, settings, seed, example, assume, reproduce_failure -import hypothesis.strategies as st - - -class TestReshape2MatmulFusePass(PassAutoScanTest): - """ - x_var - | - reshape2 - \ - reshape2_out_var y_var - \ / - matmul bias_var - \ / - elementwise_add - """ - - def sample_predictor_configs(self, program_config): - # TRT - config = self.create_trt_inference_config() - config.enable_tensorrt_engine( - max_batch_size=10, - workspace_size=102400, - min_subgraph_size=0, - precision_mode=paddle_infer.PrecisionType.Float32, - use_static=False, - use_calib_mode=False) - yield config, ['mul', 'elementwise_add'], (1e-4, 1e-1) - - def add_ignore_pass_case(self): - # Here we put some skip rules to avoid known bugs - def teller1(program_config, predictor_config): - y_shape = list(program_config.weights["matmul_y"].shape) - bias_shape = program_config.weights["bias"].shape - axis = program_config.ops[2].attrs["axis"] - # bias should be [mul_y_shape[-1]] - if axis == 0 or bias_shape[0] != y_shape[1] or len(bias_shape) != 1: - return True - return False - - self.add_ignore_check_case( - teller1, - IgnoreReasons.PASS_ACCURACY_ERROR, - "The pass error on TRT while shape of bias is not [out_size].", - ) - - def sample_program_config(self, draw): - # 1. Generate shape and attr of reshape2 - reshape = draw( - st.lists(st.integers(min_value=1, max_value=10), - min_size=2, - max_size=2)) - x_shape = reshape + [1, 1] - - # 2. Generate attr:transpose_X/transpose_Y/alpha of matmul - alpha = 1.0 - transpose_X = False - transpose_Y = False - - # 3. Generate legal shape of input:Y of matmul - y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) - y_shape[0] = x_shape[1] - - # 4. Generate legal attr:axis of elementwise_add - axis = draw(st.integers(min_value=-1, max_value=1)) - if axis == 0: - axis = -1 - bias_shape = [ - y_shape[1], - ] - # if axis == -1: - # if draw(st.booleans()): - # bias_shape = [y_shape[1], ] - # else: - # bias_shape = [x_shape[0], y_shape[1]] - - reshape2_op = OpConfig( - "reshape2", - inputs={ - "X": ["reshape2_x"], - }, - shape=reshape, - outputs={ - "Out": ["reshape2_out"], - "XShape": ["xshape"] - }, - ) - matmul_op = OpConfig( - "matmul", - inputs={ - "X": ["reshape2_out"], - "Y": ["matmul_y"] - }, - outputs={"Out": ["matmul_out"]}, - alpha=alpha, - transpose_X=transpose_X, - transpose_Y=transpose_Y, - fused_reshape_X=[], - fused_reshape_Y=[], - fused_transpose_X=[], - fused_transpose_Y=[], - fused_reshape_Out=[], - fused_transpose_Out=[], - ) - - add_op = OpConfig( - "elementwise_add", - inputs={ - "X": ["matmul_out"], - "Y": ["bias"] - }, - outputs={"Out": ["add_out"]}, - axis=axis, - ) - - ops = [reshape2_op, matmul_op, add_op] - - program_config = ProgramConfig( - ops=ops, - weights={ - "matmul_y": TensorConfig(shape=y_shape), - "bias": TensorConfig(shape=bias_shape), - }, - inputs={ - "reshape2_x": TensorConfig(shape=x_shape), - }, - outputs=ops[-1].outputs["Out"], - ) - - return program_config - - def test(self): - self.run_and_statis(quant=False, - max_examples=50, - passes=["trt_reshape2_matmul_fuse_pass"]) - - -if __name__ == "__main__": - unittest.main()