test_trt_convert_prelu.py 9.0 KB
Newer Older
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9 10 11 12 13 14 15 16 17 18 19
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
20
from typing import Any, Dict, List
W
Wilber 已提交
21
import unittest
22 23 24


class TrtConvertPreluTest(TrtLayerAutoScanTest):
25

26 27 28 29
    def is_program_valid(self, program_config: ProgramConfig) -> bool:
        return True

    def sample_program_configs(self):
30

31 32 33 34 35 36 37 38 39 40 41 42 43
        def generate_input(batch, dim1, dim2, dim3):
            shape = [batch]
            if dim1 != 0:
                shape.append(dim1)
            if dim2 != 0:
                shape.append(dim2)
            if dim3 != 0:
                shape.append(dim3)
            return np.random.random(shape).astype(np.float32)

        def generate_alpha(attrs: List[Dict[str, Any]], dim1, dim2, dim3):
            if attrs[0]["mode"] == "all":
                return np.random.random(size=(1)).astype(np.float32)
44 45
            elif attrs[0]["mode"] == "channel" and attrs[0][
                    "data_format"] == "NCHW":
46 47 48 49 50 51 52 53
                shape = [1]
                if dim1 != 0:
                    shape.append(dim1)
                if dim2 != 0:
                    shape.append(1)
                if dim3 != 0:
                    shape.append(1)
                return np.random.random(size=shape).astype(np.float32)
54 55 56 57 58 59 60 61 62 63
            elif attrs[0]["mode"] == "channel" and attrs[0][
                    "data_format"] == "NHWC":
                shape = [1]
                if dim1 != 0:
                    shape.append(1)
                if dim2 != 0:
                    shape.append(1)
                if dim3 != 0:
                    shape.append(dim3)
                return np.random.random(size=shape).astype(np.float32)
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
            elif attrs[0]["mode"] == "element":
                shape = [1]
                if dim1 != 0:
                    shape.append(dim1)
                if dim2 != 0:
                    shape.append(dim2)
                if dim3 != 0:
                    shape.append(dim3)
                return np.random.random(size=shape).astype(np.float32)

        for batch in [1, 4]:
            for dim1 in [0, 3]:
                for dim2 in [0, 16]:
                    for dim3 in [0, 32]:
                        self.dim1 = dim1
                        self.dim2 = dim2
                        self.dim3 = dim3

                        if dim1 == 0 and dim2 != 0:
                            continue
                        if dim1 == 0 and dim2 == 0 and dim3 != 0:
                            continue

                        for mode in ["all", "channel", "element"]:
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
                            for data_format in ['NCHW', 'NHWC']:
                                if mode == "channel" and dim1 == 0 and data_format == "NCHW":
                                    continue
                                if mode == "channel" and dim3 == 0 and data_format == "NHWC":
                                    continue
                                dics = [{
                                    "mode": mode,
                                    "data_format": data_format
                                }]
                                ops_config = [{
                                    "op_type": "prelu",
                                    "op_inputs": {
                                        "X": ["input_data"],
                                        "Alpha": ["alpha_weight"]
                                    },
                                    "op_outputs": {
                                        "Out": ["output_data"]
                                    },
                                    "op_attrs": dics[0]
                                }]
                                ops = self.generate_op_config(ops_config)
109

110 111 112
                                program_config = ProgramConfig(
                                    ops=ops,
                                    weights={
113 114 115 116
                                        "alpha_weight":
                                        TensorConfig(data_gen=partial(
                                            generate_alpha, dics, dim1, dim2,
                                            dim3))
117 118
                                    },
                                    inputs={
119 120 121 122
                                        "input_data":
                                        TensorConfig(data_gen=partial(
                                            generate_input, batch, dim1, dim2,
                                            dim3)),
123 124
                                    },
                                    outputs=["output_data"])
125

126
                                yield program_config
127 128 129

    def sample_predictor_configs(
            self, program_config) -> (paddle_infer.Config, List[int], float):
130

131 132
        def generate_dynamic_shape(attrs):
            if self.dim1 == 0:
133 134 135 136 137 138 139 140 141
                self.dynamic_shape.min_input_shape = {
                    "input_data": [1],
                }
                self.dynamic_shape.max_input_shape = {
                    "input_data": [4],
                }
                self.dynamic_shape.opt_input_shape = {
                    "input_data": [2],
                }
142 143 144 145 146 147
            else:
                if self.dim2 == 0 and self.dim3 == 0:
                    self.dynamic_shape.min_input_shape = {
                        "input_data": [1, 1],
                    }
                    self.dynamic_shape.max_input_shape = {
148
                        "input_data": [4, 32],
149 150 151 152 153 154 155 156 157
                    }
                    self.dynamic_shape.opt_input_shape = {
                        "input_data": [2, 3],
                    }
                elif self.dim2 != 0 and self.dim3 != 0:
                    self.dynamic_shape.min_input_shape = {
                        "input_data": [1, 1, 1, 1],
                    }
                    self.dynamic_shape.max_input_shape = {
158
                        "input_data": [4, 3, 16, 32],
159 160 161 162 163 164 165 166 167
                    }
                    self.dynamic_shape.opt_input_shape = {
                        "input_data": [2, 3, 16, 32],
                    }
                elif self.dim3 == 0:
                    self.dynamic_shape.min_input_shape = {
                        "input_data": [1, 1, 1],
                    }
                    self.dynamic_shape.max_input_shape = {
168
                        "input_data": [4, 3, 32],
169 170
                    }
                    self.dynamic_shape.opt_input_shape = {
171
                        "input_data": [2, 3, 16],
172 173 174 175 176 177 178 179
                    }

        def clear_dynamic_shape():
            self.dynamic_shape.max_input_shape = {}
            self.dynamic_shape.min_input_shape = {}
            self.dynamic_shape.opt_input_shape = {}

        attrs = [
180
            program_config.ops[i].attrs for i in range(len(program_config.ops))
181 182
        ]

183 184 185 186 187
        def generate_trt_nodes_num(attrs, dynamic_shape):
            if not dynamic_shape and self.dim1 == 0 and self.dim2 == 0 and self.dim3 == 0:
                return 0, 3
            return 1, 2

188 189 190
        # for static_shape
        clear_dynamic_shape()
        self.trt_param.precision = paddle_infer.PrecisionType.Float32
191 192
        yield self.create_inference_config(), generate_trt_nodes_num(
            attrs, False), 1e-5
193
        self.trt_param.precision = paddle_infer.PrecisionType.Half
194
        yield self.create_inference_config(), generate_trt_nodes_num(
Z
zlsh80826 已提交
195
            attrs, False), (1e-3, 1e-3)
196 197 198 199

        # for dynamic_shape
        generate_dynamic_shape(attrs)
        self.trt_param.precision = paddle_infer.PrecisionType.Float32
200 201
        yield self.create_inference_config(), generate_trt_nodes_num(
            attrs, True), 1e-5
202
        self.trt_param.precision = paddle_infer.PrecisionType.Half
203
        yield self.create_inference_config(), generate_trt_nodes_num(
Z
zlsh80826 已提交
204
            attrs, True), (1e-3, 1e-3)
205 206

    def add_skip_trt_case(self):
W
Wilber 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219
        ver = paddle_infer.get_trt_compile_version()
        if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 < 7000:

            def teller(program_config, predictor_config):
                if not predictor_config.tensorrt_dynamic_shape_enabled():
                    return True
                return False

            self.add_skip_case(
                teller, SkipReasons.TRT_NOT_IMPLEMENTED,
                "Need to repair the case: the output of GPU and tensorrt has diff in trt6, the prelu static plugin has bug."
            )

220 221 222 223 224 225 226
    def test(self):
        self.add_skip_trt_case()
        self.run_test()


if __name__ == "__main__":
    unittest.main()