test_trt_convert_activation.py 8.2 KB
Newer Older
J
JingZhuangzhuang 已提交
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
J
JingZhuangzhuang 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
J
JingZhuangzhuang 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
J
JingZhuangzhuang 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

W
Wilber 已提交
15
import unittest
J
JingZhuangzhuang 已提交
16
from functools import partial
17
from typing import Any, Dict, List
J
JingZhuangzhuang 已提交
18

19 20 21 22 23 24
import numpy as np
from program_config import ProgramConfig, TensorConfig
from trt_layer_auto_scan_test import TrtLayerAutoScanTest

import paddle.inference as paddle_infer

J
JingZhuangzhuang 已提交
25 26 27

class TrtConvertActivationTest(TrtLayerAutoScanTest):
    def is_program_valid(self, program_config: ProgramConfig) -> bool:
28 29 30 31
        ver = paddle_infer.get_trt_compile_version()
        if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 < 8200:
            if program_config.ops[0].type == "round":
                return False
J
JingZhuangzhuang 已提交
32 33 34 35
        return True

    def sample_program_configs(self):
        def generate_input1(dims, batch, attrs: List[Dict[str, Any]]):
36 37 38
            if dims == 0:
                return np.random.random([]).astype(np.float32)
            elif dims == 1:
39
                return np.random.random([32]).astype(np.float32)
J
JingZhuangzhuang 已提交
40
            else:
41
                return np.random.random([batch, 3, 32, 32]).astype(np.float32)
J
JingZhuangzhuang 已提交
42

43
        for dims in [0, 1, 4]:
44
            for batch in [1, 4]:
45
                for op_type in [
46 47 48 49 50
                    "relu",
                    "sigmoid",
                    "relu6",
                    "elu",
                    "selu",
51
                    "silu",
52 53 54
                    "softsign",
                    "stanh",
                    "thresholded_relu",
55 56 57
                    "celu",
                    "logsigmoid",
                    "tanh_shrink",
58
                    "softplus",
59 60 61
                    "hard_swish",
                    "hard_sigmoid",
                    "leaky_relu",
62 63
                ]:
                    # few samples to reduce time
64
                    # for beta in [-0.2, 0.5, 0.67, 3]:
65 66 67 68 69
                    #    for alpha in [-0.2, 0.5, 0.67, 3]:
                    for beta in [0.67]:
                        for alpha in [0.67]:
                            self.dims = dims
                            dics = [{}]
70 71
                            if op_type == "celu":
                                dics = [{"alpha": 1.0}]
72 73 74 75 76 77 78 79 80 81
                            if op_type == "elu":
                                dics = [{"alpha": alpha}]
                            if op_type == "selu":
                                dics = [{"alpha": beta, "scale": alpha}]
                            if op_type == "stanh":
                                dics = [{"scale_a": beta, "scale_b": alpha}]
                            if op_type == "thresholded_relu":
                                dics = [{"threshold": alpha}]
                            if op_type == "softplus":
                                dics = [{"beta": beta}]
82 83 84 85 86 87 88 89 90 91 92 93
                            if op_type == "hard_swish":
                                dics = [
                                    {
                                        "threshold": 6.0,
                                        "scale": 6.0,
                                        "offset": 3.0,
                                    }
                                ]
                            if op_type == "hard_sigmoid":
                                dics = [{"slope": beta, "offset": alpha}]
                            if op_type == "leaky_relu":
                                dics = [{"alpha": alpha}]
J
JingZhuangzhuang 已提交
94

95 96 97 98 99 100 101 102
                            ops_config = [
                                {
                                    "op_type": op_type,
                                    "op_inputs": {"X": ["input_data"]},
                                    "op_outputs": {"Out": ["output_data"]},
                                    "op_attrs": dics[0],
                                }
                            ]
103
                            ops = self.generate_op_config(ops_config)
J
JingZhuangzhuang 已提交
104

105 106 107 108
                            program_config = ProgramConfig(
                                ops=ops,
                                weights={},
                                inputs={
109 110 111 112 113
                                    "input_data": TensorConfig(
                                        data_gen=partial(
                                            generate_input1, dims, batch, dics
                                        )
                                    )
114
                                },
115 116
                                outputs=["output_data"],
                            )
J
JingZhuangzhuang 已提交
117

118
                            yield program_config
J
JingZhuangzhuang 已提交
119 120

    def sample_predictor_configs(
121 122
        self, program_config
    ) -> (paddle_infer.Config, List[int], float):
J
JingZhuangzhuang 已提交
123
        def generate_dynamic_shape(attrs):
124 125 126 127 128
            if self.dims == 0:
                self.dynamic_shape.min_input_shape = {"input_data": []}
                self.dynamic_shape.max_input_shape = {"input_data": []}
                self.dynamic_shape.opt_input_shape = {"input_data": []}
            elif self.dims == 1:
J
JingZhuangzhuang 已提交
129
                self.dynamic_shape.min_input_shape = {"input_data": [1]}
130 131
                self.dynamic_shape.max_input_shape = {"input_data": [64]}
                self.dynamic_shape.opt_input_shape = {"input_data": [32]}
J
JingZhuangzhuang 已提交
132
            elif self.dims == 2:
133 134 135
                self.dynamic_shape.min_input_shape = {"input_data": [1, 16]}
                self.dynamic_shape.max_input_shape = {"input_data": [4, 32]}
                self.dynamic_shape.opt_input_shape = {"input_data": [3, 32]}
J
JingZhuangzhuang 已提交
136
            elif self.dims == 3:
137 138 139
                self.dynamic_shape.min_input_shape = {"input_data": [1, 16, 16]}
                self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 32]}
                self.dynamic_shape.opt_input_shape = {"input_data": [3, 32, 32]}
J
JingZhuangzhuang 已提交
140 141
            else:
                self.dynamic_shape.min_input_shape = {
142
                    "input_data": [1, 3, 16, 16]
J
JingZhuangzhuang 已提交
143 144
                }
                self.dynamic_shape.max_input_shape = {
145
                    "input_data": [4, 3, 32, 32]
J
JingZhuangzhuang 已提交
146 147
                }
                self.dynamic_shape.opt_input_shape = {
148
                    "input_data": [1, 3, 32, 32]
J
JingZhuangzhuang 已提交
149 150 151 152 153 154 155 156
                }

        def clear_dynamic_shape():
            self.dynamic_shape.min_input_shape = {}
            self.dynamic_shape.max_input_shape = {}
            self.dynamic_shape.opt_input_shape = {}

        def generate_trt_nodes_num(attrs, dynamic_shape):
157
            if not dynamic_shape and (self.dims == 1 or self.dims == 0):
J
JingZhuangzhuang 已提交
158
                return 0, 3
159 160 161 162 163 164 165
            runtime_version = paddle_infer.get_trt_runtime_version()
            if (
                runtime_version[0] * 1000
                + runtime_version[1] * 100
                + runtime_version[2] * 10
                < 8600
                and self.dims == 0
166 167 168 169 170
            ) and program_config.ops[0].type in [
                "celu",
                "logsigmoid",
                "tanh_shrink",
            ]:
171
                return 0, 3
J
JingZhuangzhuang 已提交
172 173 174
            return 1, 2

        attrs = [
175
            program_config.ops[i].attrs for i in range(len(program_config.ops))
J
JingZhuangzhuang 已提交
176 177 178 179 180 181
        ]

        # for static_shape
        clear_dynamic_shape()
        self.trt_param.precision = paddle_infer.PrecisionType.Float32
        yield self.create_inference_config(), generate_trt_nodes_num(
182 183
            attrs, False
        ), 1e-5
J
JingZhuangzhuang 已提交
184 185
        self.trt_param.precision = paddle_infer.PrecisionType.Half
        yield self.create_inference_config(), generate_trt_nodes_num(
186 187
            attrs, False
        ), 1e-3
J
JingZhuangzhuang 已提交
188 189 190 191

        # for dynamic_shape
        generate_dynamic_shape(attrs)
        self.trt_param.precision = paddle_infer.PrecisionType.Float32
192
        yield self.create_inference_config(), generate_trt_nodes_num(
193 194
            attrs, True
        ), 1e-5
J
JingZhuangzhuang 已提交
195
        self.trt_param.precision = paddle_infer.PrecisionType.Half
196
        yield self.create_inference_config(), generate_trt_nodes_num(
197 198
            attrs, True
        ), 1e-3
J
JingZhuangzhuang 已提交
199 200 201 202 203 204 205

    def test(self):
        self.run_test()


if __name__ == "__main__":
    unittest.main()