test_trt_convert_equal.py 6.7 KB
Newer Older
C
ccrrong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
from functools import partial
17
from typing import List
C
ccrrong 已提交
18

19 20 21 22 23 24
import numpy as np
from program_config import ProgramConfig, TensorConfig
from trt_layer_auto_scan_test import TrtLayerAutoScanTest

import paddle.inference as paddle_infer

C
ccrrong 已提交
25

26
class TrtConvertEqualOneInputCornerCase(TrtLayerAutoScanTest):
C
ccrrong 已提交
27 28 29 30 31
    def is_program_valid(self, program_config: ProgramConfig) -> bool:
        attrs = [
            program_config.ops[i].attrs for i in range(len(program_config.ops))
        ]
        if attrs[0]['axis'] == 0:
32
            return False
C
ccrrong 已提交
33 34 35 36 37 38 39 40 41
        ver = paddle_infer.get_trt_compile_version()
        if ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8415:
            return False
        return True

    def sample_program_configs(self):
        def generate_input(shape):
            return np.random.random(shape).astype(np.float32)

S
Sanbu 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
        for op_type in ["equal", "not_equal"]:
            for batch in [1, 2, 4]:
                for shape in [[batch, 1], [batch, 1, 32], [batch, 1, 16, 32]]:
                    for axis in [-1 if len(shape) == 1 else 1]:
                        self.dims = len(shape)
                        dics = [{"axis": axis}, {"in_dtype": 0, "out_dtype": 5}]
                        ops_config = [
                            {
                                "op_type": op_type,
                                "op_inputs": {
                                    "X": ["input_data1"],
                                    "Y": ["input_data2"],
                                },
                                "op_outputs": {"Out": ["compare_output_data"]},
                                "op_attrs": dics[0],
57 58 59
                                "outputs_dtype": {
                                    "compare_output_data": np.bool_
                                },
60
                            },
S
Sanbu 已提交
61 62 63 64 65
                            {
                                "op_type": "cast",
                                "op_inputs": {"X": ["compare_output_data"]},
                                "op_outputs": {"Out": ["output_data"]},
                                "op_attrs": dics[1],
66
                                "outputs_dtype": {"output_data": np.float32},
S
Sanbu 已提交
67 68 69
                            },
                        ]
                        ops = self.generate_op_config(ops_config)
C
ccrrong 已提交
70

S
Sanbu 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83 84
                        program_config = ProgramConfig(
                            ops=ops,
                            weights={},
                            inputs={
                                "input_data1": TensorConfig(
                                    data_gen=partial(generate_input, shape)
                                ),
                                "input_data2": TensorConfig(
                                    data_gen=partial(generate_input, shape)
                                ),
                            },
                            outputs=["output_data"],
                        )
                        yield program_config
C
ccrrong 已提交
85 86

    def sample_predictor_configs(
87 88
        self, program_config
    ) -> (paddle_infer.Config, List[int], float):
C
ccrrong 已提交
89 90 91 92 93
        def generate_dynamic_shape(attrs):
            # The input.dims[1] must be equal to the weight's length.
            if self.dims == 2:
                self.dynamic_shape.min_input_shape = {
                    "input_data1": [1, 1],
94
                    "input_data2": [1, 1],
C
ccrrong 已提交
95 96 97
                }
                self.dynamic_shape.max_input_shape = {
                    "input_data1": [4, 1],
98
                    "input_data2": [4, 1],
C
ccrrong 已提交
99 100 101
                }
                self.dynamic_shape.opt_input_shape = {
                    "input_data1": [2, 1],
102
                    "input_data2": [2, 1],
C
ccrrong 已提交
103 104 105 106
                }
            elif self.dims == 3:
                self.dynamic_shape.min_input_shape = {
                    "input_data1": [1, 1, 4],
107
                    "input_data2": [1, 1, 4],
C
ccrrong 已提交
108 109
                }
                self.dynamic_shape.max_input_shape = {
110 111
                    "input_data1": [4, 1, 32],
                    "input_data2": [4, 1, 32],
C
ccrrong 已提交
112 113 114
                }
                self.dynamic_shape.opt_input_shape = {
                    "input_data1": [2, 1, 16],
115
                    "input_data2": [2, 1, 16],
C
ccrrong 已提交
116 117 118 119
                }
            elif self.dims == 4:
                self.dynamic_shape.min_input_shape = {
                    "input_data1": [1, 1, 4, 4],
120
                    "input_data2": [1, 1, 4, 4],
C
ccrrong 已提交
121 122
                }
                self.dynamic_shape.max_input_shape = {
123 124
                    "input_data1": [4, 1, 64, 32],
                    "input_data2": [4, 1, 64, 32],
C
ccrrong 已提交
125 126 127
                }
                self.dynamic_shape.opt_input_shape = {
                    "input_data1": [2, 1, 32, 16],
128
                    "input_data2": [2, 1, 32, 16],
C
ccrrong 已提交
129 130 131 132 133 134 135 136
                }

        def clear_dynamic_shape():
            self.dynamic_shape.max_input_shape = {}
            self.dynamic_shape.min_input_shape = {}
            self.dynamic_shape.opt_input_shape = {}

        def generate_trt_nodes_num(attrs, dynamic_shape):
137 138
            if not dynamic_shape:
                return 0, 5
C
ccrrong 已提交
139 140
            if self.dims == 1:
                return 0, 3
141
            return 1, 3
C
ccrrong 已提交
142 143 144 145 146 147 148 149 150

        attrs = [
            program_config.ops[i].attrs for i in range(len(program_config.ops))
        ]

        # for static_shape
        clear_dynamic_shape()
        self.trt_param.precision = paddle_infer.PrecisionType.Float32
        yield self.create_inference_config(), generate_trt_nodes_num(
151 152
            attrs, False
        ), 1e-5
C
ccrrong 已提交
153 154
        self.trt_param.precision = paddle_infer.PrecisionType.Half
        yield self.create_inference_config(), generate_trt_nodes_num(
155 156
            attrs, False
        ), 1e-3
C
ccrrong 已提交
157 158 159 160 161

        # for dynamic_shape
        generate_dynamic_shape(attrs)
        self.trt_param.precision = paddle_infer.PrecisionType.Float32
        yield self.create_inference_config(), generate_trt_nodes_num(
162 163
            attrs, True
        ), 1e-5
C
ccrrong 已提交
164 165
        self.trt_param.precision = paddle_infer.PrecisionType.Half
        yield self.create_inference_config(), generate_trt_nodes_num(
166 167
            attrs, True
        ), 1e-3
C
ccrrong 已提交
168 169

    def test(self):
170
        self.trt_param.workspace_size = 1 << 20
C
ccrrong 已提交
171 172 173 174 175
        self.run_test()


if __name__ == "__main__":
    unittest.main()