diff --git a/paddle/fluid/inference/tensorrt/convert/nearest_interp_op.cc b/paddle/fluid/inference/tensorrt/convert/nearest_interp_op.cc index 3940cc5dce1b0003eb947e63706b9ebd0463ef6a..fa21442e2db85727c76a9d97e1fd6116cbbbe3f6 100644 --- a/paddle/fluid/inference/tensorrt/convert/nearest_interp_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/nearest_interp_op.cc @@ -59,22 +59,24 @@ class NearestInterpolateOpConverter : public OpConverter { float scale_w = 1.f; std::vector scales; - - if (scale > 0.f && (out_h <= 0 && out_w <= 0)) { + if (scale > 0.f) { scale_h = scale; scale_w = scale; } else { // axis are different in static/dynamic mode bool with_dynamic = engine_->with_dynamic_shape(); - int h_axis = (data_layout == framework::DataLayout::kNCHW) + with_dynamic; - int w_axis = - (data_layout == framework::DataLayout::kNCHW) + 1 + with_dynamic; - - scale_h = - static_cast(out_h) / static_cast(in_dim.d[h_axis]); - scale_w = - static_cast(out_w) / static_cast(in_dim.d[w_axis]); + if (!with_dynamic) { + int h_axis = + (data_layout == framework::DataLayout::kNCHW) + with_dynamic; + int w_axis = + (data_layout == framework::DataLayout::kNCHW) + 1 + with_dynamic; + + scale_h = + static_cast(out_h) / static_cast(in_dim.d[h_axis]); + scale_w = + static_cast(out_w) / static_cast(in_dim.d[w_axis]); + } } if (engine_->with_dynamic_shape()) { diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index c92a85269e2abb0168004367c3c0e06d49a978f9..19ecc4eaf47452ac909e45ca5d7d517a262e0733 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -476,6 +476,10 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, return false; } } + if ((scale <= 0.f) && with_dynamic_shape) { + VLOG(3) << "dynamic shape not support scale not set."; + return false; + } } } diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py new file mode 100644 index 0000000000000000000000000000000000000000..134446ffa57e094f587e1634ed24680ed5208cb6 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py @@ -0,0 +1,142 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons +from program_config import TensorConfig, ProgramConfig +import numpy as np +import paddle.inference as paddle_infer +from functools import partial +from typing import Optional, List, Callable, Dict, Any, Set +import unittest + + +class TrtConvertNearestInterpTest(TrtLayerAutoScanTest): + def is_program_valid(self, program_config: ProgramConfig) -> bool: + inputs = program_config.inputs + weights = program_config.weights + attrs = [ + program_config.ops[i].attrs + for i in range(len(program_config.ops)) + ] + + if attrs[0]['scale'] <= 0 and (attrs[0]['out_h'] <= 0 or + attrs[0]['out_w'] <= 0): + return False + if (attrs[0]['out_h'] <= 0) ^ (attrs[0]['out_w'] <= 0): + return False + + return True + + def sample_program_configs(self): + def generate_input1(attrs: List[Dict[str, Any]]): + return np.ones([1, 3, 64, 64]).astype(np.float32) + + for data_layout in ["NCHW", "NHWC"]: + for interp_method in ["nearest"]: + for align_corners in [True, False]: + for scale in [2.0, -1.0, 0.0]: + for out_h in [32, 64, 128 - 32]: + for out_w in [32, -32]: + dics = [{ + "data_layout": data_layout, + "interp_method": interp_method, + "align_corners": align_corners, + "scale": scale, + "out_h": out_h, + "out_w": out_w + }] + + ops_config = [{ + "op_type": "nearest_interp", + "op_inputs": { + "X": ["input_data"] + }, + "op_outputs": { + "Out": ["nearest_interp_output_data"] + }, + "op_attrs": dics[0] + }] + ops = self.generate_op_config(ops_config) + + program_config = ProgramConfig( + ops=ops, + weights={}, + inputs={ + "input_data": TensorConfig( + data_gen=partial(generate_input1, + dics)) + }, + outputs=["nearest_interp_output_data"]) + + yield program_config + + def sample_predictor_configs( + self, program_config) -> (paddle_infer.Config, List[int], float): + def generate_dynamic_shape(attrs): + self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} + self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} + self.dynamic_shape.opt_input_shape = {"input_data": [1, 3, 64, 64]} + + def clear_dynamic_shape(): + self.dynamic_shape.min_input_shape = {} + self.dynamic_shape.max_input_shape = {} + self.dynamic_shape.opt_input_shape = {} + + def generate_trt_nodes_num(attrs, dynamic_shape): + return 1, 2 + + attrs = [ + program_config.ops[i].attrs + for i in range(len(program_config.ops)) + ] + + # for static_shape + clear_dynamic_shape() + self.trt_param.precision = paddle_infer.PrecisionType.Float32 + yield self.create_inference_config(), generate_trt_nodes_num( + attrs, False), 1e-5 + self.trt_param.precision = paddle_infer.PrecisionType.Half + yield self.create_inference_config(), generate_trt_nodes_num( + attrs, False), 1e-2 + + # for dynamic_shape + generate_dynamic_shape(attrs) + self.trt_param.precision = paddle_infer.PrecisionType.Float32 + yield self.create_inference_config(), generate_trt_nodes_num(attrs, + True), 1e-5 + self.trt_param.precision = paddle_infer.PrecisionType.Half + yield self.create_inference_config(), generate_trt_nodes_num(attrs, + True), 1e-2 + + def add_skip_trt_case(self): + def teller1(program_config, predictor_config): + if program_config.ops[0].attrs[ + 'scale'] <= 0 and self.dynamic_shape.min_input_shape: + return True + return False + + self.add_skip_case( + teller1, SkipReasons.TRT_NOT_IMPLEMENTED, + "NOT Implemented: we need to add support scale <= 0 in dynamic shape in the future" + ) + + pass + + def test(self): + self.add_skip_trt_case() + self.run_test() + + +if __name__ == "__main__": + unittest.main()