From 25d25b00ba1fb9376fc4f1959c96a0a7f13d8a31 Mon Sep 17 00:00:00 2001 From: zhoutianzi666 <39978853+zhoutianzi666@users.noreply.github.com> Date: Mon, 22 Aug 2022 20:22:43 +0800 Subject: [PATCH] [Paddle-TRT] support output_padding in conv2d_transpose and conv3d_transpose (#45004) --- .../inference/tensorrt/convert/conv2d_op.cc | 33 +++-- .../inference/tensorrt/convert/conv3d_op.cc | 29 +++- paddle/fluid/inference/tensorrt/op_teller.cc | 10 +- .../test_trt_convert_conv2d_transpose.py | 10 +- .../test_trt_convert_conv3d_transpose.py | 138 ++++++++++++++++++ 5 files changed, 197 insertions(+), 23 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv3d_transpose.py diff --git a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc index c483202bfa..91c1ab33be 100644 --- a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc @@ -77,6 +77,12 @@ void ConvertConv2d(TensorRTEngine* engine, PADDLE_GET_CONST(std::vector, op_desc.GetAttr("strides")); std::vector paddings = PADDLE_GET_CONST(std::vector, op_desc.GetAttr("paddings")); + // for conv2d_transpose + std::vector output_padding; + if (op_desc.HasAttr("output_padding")) { + output_padding = + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("output_padding")); + } std::string padding_algorithm = "EXPLICIT"; if (op_desc.HasAttr("padding_algorithm")) padding_algorithm = @@ -90,15 +96,14 @@ void ConvertConv2d(TensorRTEngine* engine, nvinfer1::DimsHW nv_ksize(filter_h, filter_w); nvinfer1::DimsHW nv_dilations(dilations[0], dilations[1]); nvinfer1::DimsHW nv_strides(strides[0], strides[1]); - nvinfer1::DimsHW nv_paddings; - nvinfer1::Dims nv_pre_paddings; - nvinfer1::Dims nv_post_paddings; + nvinfer1::DimsHW nv_pre_paddings; + nvinfer1::DimsHW nv_post_paddings; if (paddings.size() == 2) { - nv_paddings.d[0] = paddings[0]; - nv_paddings.d[1] = paddings[1]; + nv_pre_paddings.d[0] = paddings[0]; + nv_pre_paddings.d[1] = paddings[1]; + nv_post_paddings.d[0] = paddings[0]; + nv_post_paddings.d[1] = paddings[1]; } else { - nv_pre_paddings.nbDims = 2; - nv_post_paddings.nbDims = 2; nv_pre_paddings.d[0] = paddings[0]; nv_pre_paddings.d[1] = paddings[2]; nv_post_paddings.d[0] = paddings[1]; @@ -138,12 +143,16 @@ void ConvertConv2d(TensorRTEngine* engine, platform::errors::Fatal("TensorRT create conv2d/conv2d_transpose" " layer failed.")); layer->setStride(nv_strides); - if (paddings.size() == 2) { - layer->setPadding(nv_paddings); - } else { - layer->setPrePadding(nv_pre_paddings); - layer->setPostPadding(nv_post_paddings); + layer->setPrePadding(nv_pre_paddings); + if (output_padding.size() > 0) { + nv_post_paddings.d[0] -= output_padding[0]; + nv_post_paddings.d[1] -= output_padding[1]; + } + if (nv_post_paddings.d[0] < 0 || nv_post_paddings.d[1] < 0) { + PADDLE_THROW(platform::errors::Fatal( + "The value in conv2d_transpose's PostPadding should be >= 0.")); } + layer->setPostPadding(nv_post_paddings); layer->setNbGroups(groups); if (padding_algorithm == "SAME") { diff --git a/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc b/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc index de6b24eabc..78f2a02ee6 100644 --- a/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc @@ -78,10 +78,17 @@ void ConvertConv3d(TensorRTEngine* engine, padding_algorithm = PADDLE_GET_CONST(std::string, op_desc.GetAttr("padding_algorithm")); + // for conv3d_transpose + std::vector output_padding; + if (op_desc.HasAttr("output_padding")) { + output_padding = + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("output_padding")); + } + nvinfer1::Dims3 nv_ksize(filter_d, filter_h, filter_w); nvinfer1::Dims3 nv_dilations(dilations[0], dilations[1], dilations[2]); nvinfer1::Dims3 nv_strides(strides[0], strides[1], strides[2]); - nvinfer1::Dims3 nv_paddings(paddings[0], paddings[1], paddings[2]); + nvinfer1::Dims3 nv_pre_paddings(paddings[0], paddings[1], paddings[2]); auto weight = engine->GetTrtWeight(op_desc.Input("Filter").front(), *Y_t); float* bias_data = nullptr; @@ -99,7 +106,25 @@ void ConvertConv3d(TensorRTEngine* engine, platform::errors::Fatal("TensorRT create conv3d/conv3d_transpose" " layer failed.")); layer->setStrideNd(nv_strides); - layer->setPaddingNd(nv_paddings); + layer->setPrePadding(nv_pre_paddings); + nvinfer1::Dims3 nv_post_paddings = nv_pre_paddings; + if (output_padding.size() > 0) { +// Here is consistent with op_teller.cc +#if IS_TRT_VERSION_GE(8400) + nv_post_paddings.d[0] -= output_padding[0]; + nv_post_paddings.d[1] -= output_padding[1]; + nv_post_paddings.d[2] -= output_padding[2]; + + if (nv_post_paddings.d[0] < 0 || nv_post_paddings.d[1] < 0 || + nv_post_paddings.d[2] < 0) { + PADDLE_THROW(platform::errors::Fatal( + "The value in conv3d_transpose's PostPadding should be >= 0.")); + } + +#endif + } + layer->setPostPadding(nv_post_paddings); + layer->setNbGroups(groups); if (padding_algorithm == "SAME") { layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER); diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 36e4f2cbc9..eaa2188623 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -2061,10 +2061,11 @@ bool OpTeller::Tell(const framework::ir::Node* node, } #endif - // conv2d_transpose, conv3d_transpose, depthwise_conv2d_transpose - if (op_type.find("d_transpose") > 0) { - // trt doen't support output_padding, - // output_padding is set when stride > 1 + // conv3d_transpose + if (op_type == "conv3d_transpose") { + // trt doen't support output_padding when < 8406 + // output_padding is usually set when stride > 1 +#if !IS_TRT_VERSION_GE(8400) if (desc.HasAttr("output_padding")) { const std::vector output_padding = PADDLE_GET_CONST(std::vector, desc.GetAttr("output_padding")); @@ -2074,6 +2075,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, if (max_padding > 0) return false; } } +#endif } if (op_type == "conv3d" || op_type == "conv3d_transpose") { diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py index 3cecb9bb74..ce69d9d739 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py @@ -301,7 +301,7 @@ class TrtConvertConv2dTransposeTest2(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(attrs, dynamic_shape): - return 0, 3 + return 1, 2 attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -311,19 +311,19 @@ class TrtConvertConv2dTransposeTest2(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), 1e-4 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-3) + attrs, False), (1e0, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), 1e-4 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-3) + attrs, True), (1e0, 1e-3) def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv3d_transpose.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv3d_transpose.py new file mode 100644 index 0000000000..3852254a53 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv3d_transpose.py @@ -0,0 +1,138 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons +from program_config import TensorConfig, ProgramConfig +import numpy as np +import unittest +import paddle.inference as paddle_infer +from functools import partial +from typing import Optional, List, Callable, Dict, Any, Set + + +# Special case +class TrtConvertConv3dTransposeTest(TrtLayerAutoScanTest): + + def is_program_valid(self, program_config: ProgramConfig) -> bool: + ver = paddle_infer.get_trt_compile_version() + if ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8400: + return False + return True + + def sample_program_configs(self): + self.trt_param.workspace_size = 1073741824 + + def generate_input1(batch, num_channels, attrs: List[Dict[str, Any]]): + return np.ones([batch, num_channels, 4, 20, 30]).astype(np.float32) + + def generate_weight1(num_channels, attrs: List[Dict[str, Any]]): + return np.random.random([num_channels, 64, 3, 3, + 3]).astype(np.float32) + + num_channels = 128 + batch = 1 + # in_channels + self.num_channels = num_channels + dics = [{ + "data_fromat": 'NCHW', + "dilations": [1, 1, 1], + "padding_algorithm": 'EXPLICIT', + "groups": 1, + "paddings": [1, 1, 1], + "strides": [2, 2, 2], + "output_padding": [1, 1, 1], + "output_size": [], + }] + + ops_config = [{ + "op_type": "conv3d_transpose", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["conv3d_weight"] + }, + "op_outputs": { + "Output": ["output_data"] + }, + "op_attrs": dics[0] + }] + ops = self.generate_op_config(ops_config) + + program_config = ProgramConfig( + ops=ops, + weights={ + "conv3d_weight": + TensorConfig( + data_gen=partial(generate_weight1, num_channels, dics)) + }, + inputs={ + "input_data": + TensorConfig(data_gen=partial(generate_input1, batch, + num_channels, dics)) + }, + outputs=["output_data"]) + + yield program_config + + def sample_predictor_configs( + self, program_config) -> (paddle_infer.Config, List[int], float): + + def generate_dynamic_shape(attrs): + self.dynamic_shape.min_input_shape = { + "input_data": [1, 128, 4, 20, 30], + } + self.dynamic_shape.max_input_shape = { + "input_data": [1, 128, 4, 20, 30], + } + self.dynamic_shape.opt_input_shape = { + "input_data": [1, 128, 4, 20, 30], + } + + def clear_dynamic_shape(): + self.dynamic_shape.min_input_shape = {} + self.dynamic_shape.max_input_shape = {} + self.dynamic_shape.opt_input_shape = {} + + def generate_trt_nodes_num(attrs, dynamic_shape): + return 1, 2 + + attrs = [ + program_config.ops[i].attrs for i in range(len(program_config.ops)) + ] + + # for static_shape + clear_dynamic_shape() + self.trt_param.precision = paddle_infer.PrecisionType.Float32 + yield self.create_inference_config(), generate_trt_nodes_num( + attrs, False), 1e-3 + + # for dynamic_shape + generate_dynamic_shape(attrs) + self.trt_param.precision = paddle_infer.PrecisionType.Float32 + yield self.create_inference_config(), generate_trt_nodes_num( + attrs, True), 1e-3 + + def add_skip_trt_case(self): + pass + + def test(self): + self.add_skip_trt_case() + self.run_test() + + def test_quant(self): + self.add_skip_trt_case() + self.run_test(quant=True) + + +if __name__ == "__main__": + unittest.main() -- GitLab