diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 27187b229f5a7db191db075bfc9ae0c025a16e8e..5e9f7e28a20e779de571df1bdd6ea9eca147b7e1 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -204,12 +204,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, << " op does not support input's dim is 1 in tensorrt."; return false; } - // TODO(inference): fix - if (x_shape.size() == 2 && !with_dynamic_shape) { - VLOG(3) << "activation op does not support input's dim is 2 in " - "tensorrt static shape, the output shape has diff."; - return false; - } } if (op_type == "pool2d") { @@ -458,12 +452,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); - // TODO(inference): fix - if (x_shape.size() == 2 && !with_dynamic_shape) { - VLOG(3) << "softmax op does not support input's dim is 2 in tensorrt " - "static shape, the output shape has diff."; - return false; - } } if (op_type == "group_norm") { if (!with_dynamic_shape) return false; @@ -489,22 +477,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, return false; } } - auto* block = desc.Block(); - if (block == nullptr) { - VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " - "Developers need to check whether block_desc is passed in " - "the pass."; - return false; - } - auto x_var_name = desc.Input("X")[0]; - auto* x_var_desc = block->FindVar(x_var_name); - const auto x_shape = x_var_desc->GetShape(); - // TODO(inference): fix - if (x_shape.size() == 2 && !with_dynamic_shape) { - VLOG(3) << "concat op does not support input's dim is 2 in tensorrt " - "static shape, the output shape has diff."; - return false; - } } if (op_type == "transpose2" || op_type == "transpose") { if (!desc.HasAttr("axis")) { @@ -831,12 +803,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); - // TODO(inference): fix - if (x_shape.size() == 2 && !with_dynamic_shape) { - VLOG(3) << "batch_norm op does not support input's dim is 2 in " - "tensorrt static shape, the output shape has diff."; - return false; - } } if (op_type == "split") { @@ -924,13 +890,8 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, VLOG(3) << "The output_length should be equal to the output size."; return false; } - // TODO(inference): fix - if (x_shape.size() == 2 && !with_dynamic_shape) { - VLOG(3) << "split op does not support input's dim is 2 in tensorrt " - "static shape. The output shape has diff."; - return false; - } } + if (op_type == "scale") { auto scale_inputs = desc.Inputs(); if (scale_inputs.find("ScaleTensor") != scale_inputs.end()) { @@ -948,11 +909,27 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); - if (!with_dynamic_shape && x_shape.size() == 1) return false; + if (!with_dynamic_shape && x_shape.size() == 1) { + VLOG(3) << "Scale op does not support 1-dimensional input in tensorrt"; + return false; + } } + if (op_type == "slice") { + if (desc.HasAttr("decrease_axis")) { + std::vector decrease_axis = + BOOST_GET_CONST(std::vector, desc.GetAttr("decrease_axis")); + if (decrease_axis.size() > 0) { + VLOG(3) << "Invalid slice decrease_axis. decrease_axis.size() > 0" + "is not supported in TensorRT"; + return false; + } + } + if (!desc.HasAttr("axes") || !desc.HasAttr("starts") || - !desc.HasAttr("ends") || !desc.HasAttr("decrease_axis")) { + !desc.HasAttr("ends")) { + VLOG(3) << "The necessary attributes of the slice operator axes " + "or starts or ends are missing."; return false; } else { std::vector axes = @@ -961,14 +938,10 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, BOOST_GET_CONST(std::vector, desc.GetAttr("starts")); std::vector ends = BOOST_GET_CONST(std::vector, desc.GetAttr("ends")); - std::vector decrease_axis = - BOOST_GET_CONST(std::vector, desc.GetAttr("decrease_axis")); + if (axes.size() != starts.size() || axes.size() != ends.size()) { - return false; - } - if (decrease_axis.size() > 0) { - VLOG(3) << "Invalid slice decrease_axis. decrease_axis.size() > 0" - "is not supported in TensorRT"; + VLOG(3) << "The shape of attributes of the slice operator axes " + "or starts or ends are not equal."; return false; } if (!with_dynamic_shape) { @@ -1082,12 +1055,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, VLOG(3) << "gelu op does not support input's dim is 1 in tensorrt."; return false; } - // TODO(inference): fix - if (x_shape.size() == 2 && !with_dynamic_shape) { - VLOG(3) << "gelu op does not support input's dim is 2 in tensorrt " - "static shape, the output shape has diff."; - return false; - } } if (op_type == "layer_norm") { @@ -1207,29 +1174,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, } } - if (op_type == "scale") { - auto* block = desc.Block(); - if (block == nullptr) { - VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " - "Developers need to check whether block_desc is passed in " - "the pass."; - return false; - } - auto x_var_name = desc.Input("X")[0]; - auto* x_var_desc = block->FindVar(x_var_name); - const auto x_shape = x_var_desc->GetShape(); - if (x_shape.size() == 1) { - VLOG(3) << "scale op does not support input's dim is 1 in tensorrt."; - return false; - } - // TODO(inference): fix - if (x_shape.size() == 2 && !with_dynamic_shape) { - VLOG(3) << "scale op does not support input's dim is 2 in tensorrt " - "static shape, the output shape has diff."; - return false; - } - } - if (op_type == "swish") { auto* block = desc.Block(); if (block == nullptr) { @@ -1245,12 +1189,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, VLOG(3) << "swish op does not support input's dim is 1 in tensorrt."; return false; } - // TODO(inference): fix - if (x_shape.size() == 2 && !with_dynamic_shape) { - VLOG(3) << "swish op does not support input's dim is 2 in tensorrt " - "static shape, the output shape has diff."; - return false; - } } if (op_type == "prelu") { @@ -1288,13 +1226,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, return false; } - if (!with_dynamic_shape) { - if (x_shape.size() == 2) { - VLOG(3) << "prelu op does not support input's dim is 2 in tensorrt."; - return false; - } - } - #if IS_TRT_VERSION_LT(7000) if (!with_dynamic_shape) { // TODO(inference): fix trt6 static plugin error. @@ -1546,12 +1477,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, VLOG(3) << "clip op does not support input's dim is 1 in tensorrt."; return false; } - // TODO(inference): fix - if (x_shape.size() == 2 && !with_dynamic_shape) { - VLOG(3) << "clip op does not support input's dim is 2 in tensorrt " - "static shape, the output shape has diff."; - return false; - } } if (op_type == "reduce_sum" || op_type == "reduce_mean") { @@ -1667,15 +1592,17 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (!with_dynamic_shape) { auto* block = desc.Block(); if (block == nullptr) { - VLOG(3) << "The block is null."; + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; return false; } auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); - if (x_shape.size() <= 2) { - VLOG(3) << "hard_sigmoid op does not support input's dim less than 3 " - "in tensorrt."; + if (x_shape.size() == 1) { + VLOG(3) << "Hard sigmoid does not support 1-dimensional input in " + "tensorrt"; return false; } } diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_activation.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_activation.py index a87cab3430cd3043fe3af6c8090e72a9c45a7645..bc40d3b4c27d90fbd545fa9a32485e2b416afc13 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_activation.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_activation.py @@ -126,18 +126,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): yield self.create_inference_config(), generate_trt_nodes_num(attrs, True), 1e-5 - def add_skip_trt_case(self): - def teller1(program_config, predictor_config): - if self.dims == 2: - return True - return False - - self.add_skip_case( - teller1, SkipReasons.TRT_NOT_IMPLEMENTED, - "When input dims is 2, pulgin will product a 4 dims output.") - def test(self): - self.add_skip_trt_case() self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py index fc96f297918dda149c67f37be22e83d96d4a65bc..410cef798aa6323f2f84fc510a88a00c95bf2a63 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py @@ -212,18 +212,6 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest): self.add_skip_case(teller1, SkipReasons.TRT_NOT_SUPPORT, "INPUT MomentumTensor NOT SUPPORT") - def teller2(program_config, predictor_config): - if len( - program_config.inputs['batch_norm_input'].shape - ) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled(): - return True - return False - - self.add_skip_case( - teller2, SkipReasons.TRT_NOT_IMPLEMENTED, - "The output shape has diff, but we can add shuffle layer to resolve it." - ) - def test(self): self.add_skip_trt_case() self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py index 081df87d1033086ecef26e6fd6aed20ef2eff65a..5150622cf801d35c94b48d8dd9ddd01f48cd62c9 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py @@ -146,21 +146,7 @@ class TrtConvertClipTest(TrtLayerAutoScanTest): yield self.create_inference_config(), generate_trt_nodes_num(attrs, True), 1e-5 - def add_skip_trt_case(self): - def teller1(program_config, predictor_config): - if len( - program_config.inputs['input_data'].shape - ) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled(): - return True - return False - - self.add_skip_case( - teller1, SkipReasons.TRT_NOT_IMPLEMENTED, - "The output shape has diff, but we can add shuffle layer to resolve it." - ) - def test(self): - self.add_skip_trt_case() self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py index 78ac06a323b1ddba7a6f57b290030e682419e476..e8a7649fd95fb104aa0bae4648f6a50dc0d7d611 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py @@ -318,18 +318,6 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest): self.add_skip_case(teller1, SkipReasons.TRT_NOT_SUPPORT, "INPUT AxisTensor NOT SUPPORT") - def teller2(program_config, predictor_config): - if len( - program_config.inputs['concat_input1'].shape - ) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled(): - return True - return False - - self.add_skip_case( - teller2, SkipReasons.TRT_NOT_IMPLEMENTED, - "The output shape has diff, but we can add shuffle layer to resolve it." - ) - def test(self): self.add_skip_trt_case() self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py index 2f75e4e723e281b31f2f75bcc7cee89389036142..838678b1c8449b6136dda00dcb3a70c03b3e9c16 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py @@ -126,18 +126,7 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest): yield self.create_inference_config(), generate_trt_nodes_num(attrs, True), 1e-5 - def add_skip_trt_case(self): - def teller1(program_config, predictor_config): - if self.dims == 2: - return True - return False - - self.add_skip_case( - teller1, SkipReasons.TRT_NOT_IMPLEMENTED, - "When input dims is 2, pulgin will product a 4 dims output.") - def test(self): - self.add_skip_trt_case() self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py index c09c7f0bc9c2f098991f38181f010b8d1badef56..969f0e8b148a2983e2a414bc0959b75fbcc2087e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py @@ -106,20 +106,7 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest): self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), (1, 2), 1e-5 - def add_skip_trt_case(self): - def teller(program_config, predictor_config): - if len(self.dynamic_shape. - min_input_shape) == 0 and self.input_dim == 2: - return True - return False - - self.add_skip_case( - teller, SkipReasons.TRT_NOT_SUPPORT, - "Need to repair the case: the output of trt and GPU has diff when inputs' dims is 2 in static shape mode." - ) - def test(self): - self.add_skip_trt_case() self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py index fbb78fceb3e84a11fa35d05ce3a86150c24cdb78..0bcbffb36789d833708040a5194aa25aa3183424 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py @@ -176,17 +176,6 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): self.add_skip_case(teller1, SkipReasons.TRT_NOT_SUPPORT, "Trt does not support 1-dimensional input.") - def teller2(program_config, predictor_config): - if (len(self.dynamic_shape.min_input_shape) == 0): - if self.dim1 != 0 and self.dim2 == 0 and self.dim3 == 0: - return True - return False - - self.add_skip_case( - teller2, SkipReasons.TRT_NOT_SUPPORT, - "Need to repair the case: the output of GPU and tensorrt has diff when the input dimension is 2 in static shape mode." - ) - ver = paddle_infer.get_trt_compile_version() if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 < 7000: diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py index 51bcee080376ea55a11d911978d5c36013ca4693..62e7a1032774728a772e376a12b73ec4fb9aeff2 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py @@ -145,7 +145,7 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest): def add_skip_trt_case(self): def teller1(program_config, predictor_config): - if len(program_config.weights) == 1: + if self.num_input == 0: return True return False @@ -153,7 +153,7 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest): "INPUT ScaleTensor and Shape NOT SUPPORT") def teller2(program_config, predictor_config): - if self.dims == 1 and self.dynamic_shape.min_input_shape == 0: + if self.dims == 1 and len(self.dynamic_shape.min_input_shape) == 0: return True return False diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py index 4a15a09b0f77eebdf2eedf1c29b20842020da231..7efaebf00cf728ddcec0a7d615f9f6871c04b4c1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py @@ -135,21 +135,7 @@ class TrtConvertSoftmaxTest(TrtLayerAutoScanTest): yield self.create_inference_config(), generate_trt_nodes_num(attrs, True), 1e-5 - def add_skip_trt_case(self): - def teller1(program_config, predictor_config): - if len( - program_config.inputs['softmax_input'].shape - ) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled(): - return True - return False - - self.add_skip_case( - teller1, SkipReasons.TRT_NOT_IMPLEMENTED, - "The output shape has diff, but we can add shuffle layer to resolve it." - ) - def test(self): - self.add_skip_trt_case() self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_split.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_split.py index f03ed0a335eebaaf526e5c11ae8b72195de7f0c5..cef84dfbb4e0a426b2702d38ed7cdd38f48fa2c5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_split.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_split.py @@ -227,18 +227,6 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): teller1, SkipReasons.TRT_NOT_SUPPORT, "INPUT AxisTensor AND SectionsTensorList NOT SUPPORT.") - def teller2(program_config, predictor_config): - if len( - program_config.inputs['split_input'].shape - ) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled(): - return True - return False - - self.add_skip_case( - teller2, SkipReasons.TRT_NOT_IMPLEMENTED, - "The output shape has diff, but we can add shuffle layer to resolve it." - ) - def test(self): self.add_skip_trt_case() self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py index 5eb4e8505ff228801de5b81f0141d3bfc7d5465b..df97e7542b88297a484686933e777d84ccf3eedd 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py @@ -126,18 +126,7 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest): yield self.create_inference_config(), generate_trt_nodes_num(attrs, True), 1e-5 - def add_skip_trt_case(self): - def teller1(program_config, predictor_config): - if self.dims == 2: - return True - return False - - self.add_skip_case( - teller1, SkipReasons.TRT_NOT_IMPLEMENTED, - "When input dims is 2, pulgin will product a 4 dims output.") - def test(self): - self.add_skip_trt_case() self.run_test()