test_trt_convert_conv2d.py 6.9 KB
Newer Older
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Z
zlsh80826 已提交
15
import itertools
16
import unittest
17
from functools import partial
18
from typing import Any, Dict, List
19

Z
zlsh80826 已提交
20
import numpy as np
21
from program_config import ProgramConfig, TensorConfig
22
from trt_layer_auto_scan_test import TrtLayerAutoScanTest
23 24

import paddle.inference as paddle_infer
25 26 27


class TrtConvertConv2dTest(TrtLayerAutoScanTest):
W
Wilber 已提交
28
    def is_program_valid(self, program_config: ProgramConfig) -> bool:
29 30 31
        inputs = program_config.inputs
        weights = program_config.weights
        attrs = [
32
            program_config.ops[i].attrs for i in range(len(program_config.ops))
33
        ]
34

35 36 37 38
        if (
            inputs['input_data'].shape[1]
            != weights['conv2d_weight'].shape[1] * attrs[0]['groups']
        ):
39
            return False
40

41 42 43
        ver = paddle_infer.get_trt_compile_version()
        if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 < 7000:
            if attrs[0]['padding_algorithm'] == 'SAME' and (
44 45
                attrs[0]['strides'][0] > 1 or attrs[0]['strides'][1] > 1
            ):
46 47
                return False

48 49 50
        return True

    def sample_program_configs(self):
51 52 53
        self.trt_param.workspace_size = 1073741824

        def generate_input1(batch, attrs: List[Dict[str, Any]]):
54 55 56 57 58 59
            return (
                np.ones([batch, attrs[0]['groups'] * 3, 64, 64]).astype(
                    np.float32
                )
                / 4
            )
60 61

        def generate_weight1(attrs: List[Dict[str, Any]]):
L
Leo Chen 已提交
62
            return np.random.random([9, 3, 3, 3]).astype(np.float32) - 0.5
63

Z
zlsh80826 已提交
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
        batch_options = [1, 2]
        strides_options = [[2, 2], [1, 2]]
        paddings_options = [[0, 3], [1, 2, 3, 4]]
        groups_options = [1, 3]
        padding_altorithm_options = ['EXPLICIT', 'SAME', 'VALID']
        dilations_options = [[1, 2]]
        data_format_options = ['NCHW']

        configurations = [
            batch_options,
            strides_options,
            paddings_options,
            groups_options,
            padding_altorithm_options,
            dilations_options,
            data_format_options,
        ]

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
        for (
            batch,
            strides,
            paddings,
            groups,
            padding_algorithm,
            dilations,
            data_format,
        ) in itertools.product(*configurations):
            attrs = [
                {
                    "data_fromat": data_format,
                    "dilations": dilations,
                    "padding_algorithm": padding_algorithm,
                    "groups": groups,
                    "paddings": paddings,
                    "strides": strides,
                    "data_format": data_format,
Z
zlsh80826 已提交
100
                },
101 102 103 104 105 106 107 108 109 110 111 112
                {},
            ]

            ops_config = [
                {
                    "op_type": "conv2d",
                    "op_inputs": {
                        "Input": ["input_data"],
                        "Filter": ["conv2d_weight"],
                    },
                    "op_outputs": {"Output": ["conv_output_data"]},
                    "op_attrs": attrs[0],
Z
zlsh80826 已提交
113
                },
114 115 116 117 118
                {
                    "op_type": "relu",
                    "op_inputs": {"X": ["conv_output_data"]},
                    "op_outputs": {"Out": ["output_data"]},
                    "op_attrs": attrs[1],
Z
zlsh80826 已提交
119
                },
120
            ]
Z
zlsh80826 已提交
121 122 123 124 125 126

            ops = self.generate_op_config(ops_config)

            program_config = ProgramConfig(
                ops=ops,
                weights={
127 128 129
                    "conv2d_weight": TensorConfig(
                        data_gen=partial(generate_weight1, attrs)
                    )
Z
zlsh80826 已提交
130 131
                },
                inputs={
132 133 134
                    "input_data": TensorConfig(
                        data_gen=partial(generate_input1, batch, attrs)
                    )
Z
zlsh80826 已提交
135
                },
136 137
                outputs=["output_data"],
            )
Z
zlsh80826 已提交
138 139

            yield program_config
140 141

    def sample_predictor_configs(
142 143
        self, program_config
    ) -> (paddle_infer.Config, List[int], float):
144
        def generate_dynamic_shape(attrs):
145 146 147
            input_groups = attrs[0]['groups'] * 3
            self.dynamic_shape.min_input_shape = {
                "input_data": [1, input_groups, 32, 32],
148
                "output_data": [1, 24, 32, 32],
149 150 151
            }
            self.dynamic_shape.max_input_shape = {
                "input_data": [4, input_groups, 64, 64],
152
                "output_data": [4, 24, 64, 64],
153 154 155
            }
            self.dynamic_shape.opt_input_shape = {
                "input_data": [1, input_groups, 64, 64],
156
                "output_data": [1, 24, 64, 64],
157
            }
158

W
Wilber 已提交
159 160 161 162 163
        def clear_dynamic_shape():
            self.dynamic_shape.min_input_shape = {}
            self.dynamic_shape.max_input_shape = {}
            self.dynamic_shape.opt_input_shape = {}

164
        def generate_trt_nodes_num(attrs, dynamic_shape):
165
            return 1, 2
166 167

        attrs = [
168
            program_config.ops[i].attrs for i in range(len(program_config.ops))
169
        ]
170

171
        # for static_shape
W
Wilber 已提交
172
        clear_dynamic_shape()
173 174
        self.trt_param.precision = paddle_infer.PrecisionType.Float32
        yield self.create_inference_config(), generate_trt_nodes_num(
175 176
            attrs, False
        ), 1e-5
177
        self.trt_param.precision = paddle_infer.PrecisionType.Half
178
        yield self.create_inference_config(), generate_trt_nodes_num(
179 180
            attrs, False
        ), (1e-3, 1e-3)
181 182
        self.trt_param.precision = paddle_infer.PrecisionType.Int8
        yield self.create_inference_config(), generate_trt_nodes_num(
183 184
            attrs, False
        ), (1e-2, 1e-2)
185

186 187 188
        # for dynamic_shape
        generate_dynamic_shape(attrs)
        self.trt_param.precision = paddle_infer.PrecisionType.Float32
189
        yield self.create_inference_config(), generate_trt_nodes_num(
190 191
            attrs, True
        ), 1e-5
192
        self.trt_param.precision = paddle_infer.PrecisionType.Half
193
        yield self.create_inference_config(), generate_trt_nodes_num(
194 195
            attrs, True
        ), (1e-3, 1e-3)
196
        self.trt_param.precision = paddle_infer.PrecisionType.Int8
197
        yield self.create_inference_config(), generate_trt_nodes_num(
198 199
            attrs, True
        ), (1e-2, 1e-2)
200 201 202 203 204 205

    def test(self):
        self.run_test()

    def test_quant(self):
        self.run_test(quant=True)
206 207 208 209


if __name__ == "__main__":
    unittest.main()