diff --git a/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_test.cc b/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_test.cc index f8c338a2fe7f7ba295711d9eb2c83edc65918dd3..e851be755855bc9ae5dc605c7e6b06602d64b959 100644 --- a/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_test.cc +++ b/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_test.cc @@ -14,8 +14,8 @@ limitations under the License. */ #include #include - #include "gflags/gflags.h" + #include "paddle/fluid/inference/tensorrt/helper.h" #include "paddle/fluid/inference/tests/api/trt_test_helper.h" @@ -423,7 +423,7 @@ TEST(AnalysisPredictor, ernie_varlen) { run(predictor.get(), &out_data); std::vector ref_data{ 0.59814, 0.219882, 0.181978, 0.359796, 0.577414, 0.0627908}; - float near_tolerance = 1e-3; + float near_tolerance = 4e-3; for (size_t i = 0; i < out_data.size(); i++) { EXPECT_NEAR(ref_data[i], out_data[i], near_tolerance); } diff --git a/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py b/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py index 50d32a9ed77ae74f189192c99cd235a2017ee081..92f1d2cc8405706f9a0fbeada1ab0ad363ef8119 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py @@ -538,8 +538,9 @@ class TrtLayerAutoScanTest(AutoScanTest): self.dynamic_shape = self.DynamicShapeParam({}, {}, {}, False) self.num_percent_cases = float( os.getenv('TEST_NUM_PERCENT_CASES', default='1.0')) - # Choose different tests by week - np.random.seed(int(time.strftime("%W"))) + + # Use a seperate random generator for skipping tests + self.skip_rng = np.random.default_rng(int(time.strftime("%W"))) def create_inference_config(self, use_trt=True) -> paddle_infer.Config: config = paddle_infer.Config() @@ -555,11 +556,10 @@ class TrtLayerAutoScanTest(AutoScanTest): precision_mode=self.trt_param.precision, use_static=self.trt_param.use_static, use_calib_mode=self.trt_param.use_calib_mode) - if len(self.dynamic_shape.min_input_shape - ) != 0 and self.dynamic_shape.min_input_shape.keys( - ) == self.dynamic_shape.max_input_shape.keys( - ) and self.dynamic_shape.min_input_shape.keys( - ) == self.dynamic_shape.opt_input_shape.keys(): + if self.dynamic_shape.min_input_shape and ( + self.dynamic_shape.min_input_shape.keys() == + self.dynamic_shape.max_input_shape.keys() == + self.dynamic_shape.opt_input_shape.keys()): config.set_trt_dynamic_shape_info( self.dynamic_shape.min_input_shape, self.dynamic_shape.max_input_shape, @@ -567,6 +567,16 @@ class TrtLayerAutoScanTest(AutoScanTest): self.dynamic_shape.disable_trt_plugin_fp16) return config + def assert_tensors_near(self, atol: float, rtol: float, + tensor: Dict[str, np.array], + baseline: Dict[str, np.array]): + for key, arr in tensor.items(): + self.assertEqual( + baseline[key].shape, arr.shape, + 'The output shapes are not equal, the baseline shape is ' + + str(baseline[key].shape) + ', but got ' + str(arr.shape)) + np.testing.assert_allclose(baseline[key], arr, rtol=rtol, atol=atol) + def assert_op_size(self, trt_engine_num, paddle_op_num): last_passed_program = os.path.join( self.cache_dir, 'transpose_flatten_concat_fuse_pass.pdmodel') @@ -579,14 +589,14 @@ class TrtLayerAutoScanTest(AutoScanTest): ] trt_engine_size = sum(op_types) paddle_op_size = op_size - trt_engine_size - self.assertTrue( - trt_engine_size == trt_engine_num, - 'trt_engine_num is {}, but got {}!'.format(trt_engine_size, - trt_engine_num)) - self.assertTrue( - paddle_op_size == paddle_op_num, - 'paddle_op_num is {}, but got {}!'.format(paddle_op_size, - paddle_op_num)) + self.assertEqual( + trt_engine_num, trt_engine_size, + 'Expected trt_engine_num is {}, but got {}!'.format( + trt_engine_num, trt_engine_size)) + self.assertEqual( + paddle_op_num, paddle_op_size, + 'Expected paddle_op_num is {}, but got {}!'.format( + paddle_op_num, paddle_op_size)) def inference_config_str(self, config: paddle_infer.Config) -> str: dic = {} @@ -602,18 +612,16 @@ class TrtLayerAutoScanTest(AutoScanTest): return str(dic) def run_test(self, quant=False, skip_baseline=False, *args, **kwargs): - status = True - run_flags = [] + all_passes = True + + def random_to_skip(): + if self.skip_rng.random() < self.num_percent_cases: + return False + return True + for prog_config in self.sample_program_configs(*args, **kwargs): - # In CI, only run 10% cases - if np.random.rand() < self.num_percent_cases: - run_flags.append(True) - else: - run_flags.append(False) - - for prog_config, run_flags in zip( - self.sample_program_configs(*args, **kwargs), run_flags): - if not run_flags: + + if random_to_skip(): continue # if program is invalid, we should skip that cases. @@ -657,30 +665,32 @@ class TrtLayerAutoScanTest(AutoScanTest): else: raise NotImplementedError - if quant and pred_config.tensorrt_precision_mode( - ) != paddle_infer.PrecisionType.Int8: + if pred_config.tensorrt_precision_mode( + ) != paddle_infer.PrecisionType.Int8 and quant: continue if pred_config.tensorrt_precision_mode( ) == paddle_infer.PrecisionType.Int8 and not quant: continue ignore_flag = False - for ignore_info in self.ignore_cases: - if ignore_info[0](prog_config, pred_config): + for teller, reason, note in self.ignore_cases: + if teller(prog_config, pred_config): ignore_flag = True - if ignore_info[1] == IgnoreReasons.TRT_NOT_IMPLEMENTED: + if reason == IgnoreReasons.TRT_NOT_IMPLEMENTED: self.ignore_log( - "[TRT_NOT_IMPLEMENTED] " + ignore_info[2] + - ' ' + ' vs ' + - self.inference_config_str(pred_config)) - elif ignore_info[1] == IgnoreReasons.TRT_NOT_SUPPORT: - self.ignore_log( - "[TRT_NOT_SUPPORT] " + ignore_info[2] + ' ' + - ' vs ' + self.inference_config_str(pred_config)) + '[TRT_NOT_IMPLEMENTED] {} vs {}'.format( + note, + self.inference_config_str(pred_config))) + elif reason == IgnoreReasons.TRT_NOT_SUPPORT: + self.ignore_log('[TRT_NOT_SUPPORT] {} vs {}'.format( + note, self.inference_config_str(pred_config))) else: raise NotImplementedError break + if ignore_flag: + continue + try: pred_config_deserialize = paddle_infer.Config(pred_config) results.append( @@ -688,24 +698,23 @@ class TrtLayerAutoScanTest(AutoScanTest): pred_config, feed_data)) self.assert_tensors_near(atol, rtol, results[-1], results[0]) - if not ignore_flag: - self.assert_op_size(nodes_num[0], nodes_num[1]) + trt_engine_num, paddle_op_num = nodes_num + self.assert_op_size(trt_engine_num, paddle_op_num) + # deserialize test - if nodes_num[0] > 0: + if trt_engine_num > 0: self.run_test_config(model, params, prog_config, pred_config_deserialize, feed_data) + + self.success_log('RUN predictor_config {} done'.format( + self.inference_config_str(pred_config))) except Exception as e: self.fail_log( self.inference_config_str(pred_config) + '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e))) - if not ignore_flag: - status = False - continue - self.success_log('RUN predictor_config ' + - self.inference_config_str(pred_config) + - ' done') + all_passes = False - self.assertTrue(status) + self.assertTrue(all_passes) # TODO(wilber): just for backward compatible def add_skip_case(self, teller: [ diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_affine_channel.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_affine_channel.py index c5958f93ef837fc30e95f7cace0c2c4b79aa1593..deeffe99674f140e5d04febbec72f63322fc0f5a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_affine_channel.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_affine_channel.py @@ -135,7 +135,7 @@ class TrtConvertAffineChannelTest(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -144,7 +144,7 @@ class TrtConvertAffineChannelTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py index fa73ab7c62eacc4624ce33a26d8aec58f98b5826..6682993c83c9cbeab193926f1226fae3ec1a0592 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py @@ -205,7 +205,7 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -214,7 +214,7 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def add_skip_trt_case(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py index 13b9fb6f5c90b9e7014d51a4ffe72187dff1b750..e4514dd0ee948ac988a46ecd2784e3f87de13a9f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons -from program_config import TensorConfig, ProgramConfig -import numpy as np import unittest -import paddle.inference as paddle_infer +import itertools from functools import partial from typing import Optional, List, Callable, Dict, Any, Set +import numpy as np +import paddle.inference as paddle_infer +from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons +from program_config import TensorConfig, ProgramConfig class TrtConvertConv2dTest(TrtLayerAutoScanTest): @@ -52,62 +53,74 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): def generate_weight1(attrs: List[Dict[str, Any]]): return np.random.random([24, 3, 3, 3]).astype(np.float32) - for batch in [1, 4]: - for strides in [[1, 1], [2, 2], [1, 2]]: - for paddings in [[0, 3], [1, 2, 3, 4]]: - for groups in [1, 3]: - for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']: - for dilations in [[1, 1], [2, 2], [1, 2]]: - for data_format in ['NCHW']: - - dics = [{ - "data_fromat": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides, - "data_format": data_format - }, {}] - - ops_config = [{ - "op_type": "conv2d", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["conv2d_weight"] - }, - "op_outputs": { - "Output": ["conv_output_data"] - }, - "op_attrs": dics[0] - }, { - "op_type": "relu", - "op_inputs": { - "X": ["conv_output_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[1] - }] - - ops = self.generate_op_config(ops_config) - - program_config = ProgramConfig( - ops=ops, - weights={ - "conv2d_weight": - TensorConfig(data_gen=partial( - generate_weight1, dics)) - }, - inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, batch, dics)) - }, - outputs=["output_data"]) - - yield program_config + batch_options = [1, 2] + strides_options = [[2, 2], [1, 2]] + paddings_options = [[0, 3], [1, 2, 3, 4]] + groups_options = [1, 3] + padding_altorithm_options = ['EXPLICIT', 'SAME', 'VALID'] + dilations_options = [[1, 2]] + data_format_options = ['NCHW'] + + configurations = [ + batch_options, + strides_options, + paddings_options, + groups_options, + padding_altorithm_options, + dilations_options, + data_format_options, + ] + + for batch, strides, paddings, groups, padding_algorithm, dilations, data_format in itertools.product( + *configurations): + + attrs = [{ + "data_fromat": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, + "data_format": data_format, + }, {}] + + ops_config = [{ + "op_type": "conv2d", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["conv2d_weight"] + }, + "op_outputs": { + "Output": ["conv_output_data"] + }, + "op_attrs": attrs[0] + }, { + "op_type": "relu", + "op_inputs": { + "X": ["conv_output_data"] + }, + "op_outputs": { + "Out": ["output_data"] + }, + "op_attrs": attrs[1] + }] + + ops = self.generate_op_config(ops_config) + + program_config = ProgramConfig( + ops=ops, + weights={ + "conv2d_weight": + TensorConfig(data_gen=partial(generate_weight1, attrs)) + }, + inputs={ + "input_data": + TensorConfig( + data_gen=partial(generate_input1, batch, attrs)) + }, + outputs=["output_data"]) + + yield program_config def sample_predictor_configs( self, program_config) -> (paddle_infer.Config, List[int], float): @@ -146,10 +159,10 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-5) + attrs, False), (1e-3, 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-5) + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -158,10 +171,10 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-5) + attrs, True), (1e-3, 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-5) + attrs, True), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_fusion.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_fusion.py index 1a36ea12e86c8a21acacc241978c66be9c625277..9ac0bbd470b127b7bff69705c6325ce8ed63e207 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_fusion.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_fusion.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons -from program_config import TensorConfig, ProgramConfig -import numpy as np -import paddle.inference as paddle_infer +import unittest +from itertools import product from functools import partial from typing import Optional, List, Callable, Dict, Any, Set -import unittest +import numpy as np +import paddle.inference as paddle_infer +from program_config import TensorConfig, ProgramConfig +from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): @@ -58,68 +59,78 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): def generate_weight2(attrs: List[Dict[str, Any]]): return np.random.random([24, 1, 1]).astype(np.float32) - for batch in [1, 4]: - for strides in [[1, 1], [2, 2], [1, 2]]: - for paddings in [[0, 3], [1, 2, 3, 4]]: - for groups in [2, 3]: - for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']: - for dilations in [[1, 1], [2, 2], [1, 2]]: - for data_format in ['NCHW']: - - dics = [{ - "data_fromat": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides, - "data_format": data_format - }, { - "axis": 1 - }] - - ops_config = [{ - "op_type": "conv2d", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["conv2d_weight"] - }, - "op_outputs": { - "Output": ["conv_output_data"] - }, - "op_attrs": dics[0] - }, { - "op_type": "elementwise_add", - "op_inputs": { - "X": ["conv_output_data"], - "Y": ["elementwise_weight"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[1] - }] - - ops = self.generate_op_config(ops_config) - - program_config = ProgramConfig( - ops=ops, - weights={ - "conv2d_weight": - TensorConfig(data_gen=partial( - generate_weight1, dics)), - "elementwise_weight": - TensorConfig(data_gen=partial( - generate_weight2, dics)) - }, - inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, batch, dics)) - }, - outputs=["output_data"]) - - yield program_config + batch_options = [1, 2] + strides_options = [[1, 2], [2, 2]] + paddings_options = [[0, 3], [1, 2, 3, 4]] + groups_options = [2, 3] + padding_algorithm_options = ['EXPLICIT', 'SAME', 'VALID'] + dilations_options = [[1, 2]] + data_format_options = ['NCHW'] + + configurations = [ + batch_options, + strides_options, + paddings_options, + groups_options, + padding_algorithm_options, + dilations_options, + data_format_options, + ] + + for (batch, strides, paddings, groups, padding_algorithm, dilations, + data_format) in product(*configurations): + + attrs = [{ + "strides": strides, + "paddings": paddings, + "groups": groups, + "padding_algorithm": padding_algorithm, + "dilations": dilations, + "data_format": data_format, + }, { + "axis": 1 + }] + + ops_config = [{ + "op_type": "conv2d", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["conv2d_weight"] + }, + "op_outputs": { + "Output": ["conv_output_data"] + }, + "op_attrs": attrs[0] + }, { + "op_type": "elementwise_add", + "op_inputs": { + "X": ["conv_output_data"], + "Y": ["elementwise_weight"] + }, + "op_outputs": { + "Out": ["output_data"] + }, + "op_attrs": attrs[1] + }] + + ops = self.generate_op_config(ops_config) + + program_config = ProgramConfig( + ops=ops, + weights={ + "conv2d_weight": + TensorConfig(data_gen=partial(generate_weight1, attrs)), + "elementwise_weight": + TensorConfig(data_gen=partial(generate_weight2, attrs)) + }, + inputs={ + "input_data": + TensorConfig( + data_gen=partial(generate_input1, batch, attrs)) + }, + outputs=["output_data"]) + + yield program_config def sample_predictor_configs( self, program_config) -> (paddle_infer.Config, List[int], float): @@ -131,8 +142,8 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): "output_data": [1, 24, 32, 32] } self.dynamic_shape.max_input_shape = { - "input_data": [4, input_groups, 64, 64], - "output_data": [4, 24, 64, 64] + "input_data": [2, input_groups, 64, 64], + "output_data": [2, 24, 64, 64] } self.dynamic_shape.opt_input_shape = { "input_data": [1, input_groups, 64, 64], @@ -158,10 +169,10 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-5) + attrs, False), (1e-3, 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-5) + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -170,10 +181,10 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-5) + attrs, True), (1e-3, 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-5) + attrs, True), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py index cab61143b77370be66941133b849875f9cbe99e9..3cecb9bb74a6ccc0e93f7b5cca377663bb26317a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py @@ -62,14 +62,14 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest): 3]).astype(np.float32) for num_channels in [2, 4, 6]: - for batch in [1, 2, 4]: - for strides in [[1, 1], [2, 2], [1, 2]]: + for batch in [1, 4]: + for strides in [[2, 2], [1, 2]]: for paddings in [[0, 3], [1, 2, 3, 4]]: for groups in [2]: for padding_algorithm in [ 'EXPLICIT', 'SAME', 'VALID' ]: - for dilations in [[1, 1], [2, 2], [1, 2]]: + for dilations in [[2, 2], [1, 2]]: for data_format in ['NCHW']: self.num_channels = num_channels diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d.py index f4d6a5f1efa5e8035f642eec85b3b4143e3fcf19..90afa7fc1f05aec23061c6d69255b812c9542a8a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons -from program_config import TensorConfig, ProgramConfig +import unittest +import itertools +from functools import partial +from typing import List, Dict, Any import numpy as np +from program_config import TensorConfig, ProgramConfig +from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons import paddle.inference as paddle_infer -from functools import partial -from typing import Optional, List, Callable, Dict, Any, Set -import unittest class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest): @@ -40,113 +41,93 @@ class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest): self.trt_param.workspace_size = 1073741824 def generate_input1(batch, attrs: List[Dict[str, Any]]): - if attrs[0]['groups'] == 1: - return np.ones([batch, 1, 64, 64]).astype(np.float32) - elif attrs[0]['groups'] == 2: - return np.ones([batch, 2, 64, 64]).astype(np.float32) - else: - return np.ones([batch, 3, 64, 64]).astype(np.float32) + groups = attrs[0]['groups'] + return np.ones([batch, groups, 64, 64]).astype(np.float32) def generate_weight1(attrs: List[Dict[str, Any]]): return np.random.random([24, 1, 3, 3]).astype(np.float32) - for batch in [1, 2, 4]: - for strides in [[1, 1], [2, 2], [1, 2]]: - for paddings in [[0, 3], [1, 2, 3, 4]]: - for groups in [1, 2, 3]: - for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']: - for dilations in [[1, 1], [2, 2], [1, 2]]: - for data_format in ['NCHW']: - - dics = [{ - "data_fromat": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides, - "data_format": data_format - }] - - ops_config = [{ - "op_type": "depthwise_conv2d", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["conv2d_weight"] - }, - "op_outputs": { - "Output": ["output_data"] - }, - "op_attrs": dics[0] - }] - ops = self.generate_op_config(ops_config) - - program_config = ProgramConfig( - ops=ops, - weights={ - "conv2d_weight": - TensorConfig(data_gen=partial( - generate_weight1, dics)) - }, - inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, batch, dics)) - }, - outputs=["output_data"]) - - yield program_config + batch_options = [1, 4] + strides_options = [[1, 2]] + paddings_options = [[0, 3], [1, 2, 3, 4]] + groups_options = [1, 3] + padding_algorithm_options = ['EXPLICIT', 'SAME', 'VAILD'] + dilations_options = [[1, 1], [1, 2]] + data_format_options = ['NCHW'] + + configurations = [ + batch_options, + strides_options, + paddings_options, + groups_options, + padding_algorithm_options, + dilations_options, + data_format_options, + ] + + for (batch, strides, paddings, groups, padding_algorithm, dilations, + data_format) in itertools.product(*configurations): + attrs = [{ + "strides": strides, + "paddings": paddings, + "groups": groups, + "padding_algorithm": padding_algorithm, + "dilations": dilations, + "data_fromat": data_format, + }] + + ops_config = [{ + "op_type": "depthwise_conv2d", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["conv2d_weight"] + }, + "op_outputs": { + "Output": ["output_data"] + }, + "op_attrs": attrs[0] + }] + ops = self.generate_op_config(ops_config) + + program_config = ProgramConfig( + ops=ops, + weights={ + "conv2d_weight": + TensorConfig(data_gen=partial(generate_weight1, attrs)) + }, + inputs={ + "input_data": + TensorConfig( + data_gen=partial(generate_input1, batch, attrs)) + }, + outputs=["output_data"]) + + yield program_config def sample_predictor_configs( self, program_config) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): - if attrs[0]['groups'] == 1: - self.dynamic_shape.min_input_shape = { - "input_data": [1, 1, 32, 32], - "output_data": [1, 24, 32, 32] - } - self.dynamic_shape.max_input_shape = { - "input_data": [4, 1, 64, 64], - "output_data": [4, 24, 64, 64] - } - self.dynamic_shape.opt_input_shape = { - "input_data": [1, 1, 64, 64], - "output_data": [1, 24, 64, 64] - } - elif attrs[0]['groups'] == 2: - self.dynamic_shape.min_input_shape = { - "input_data": [1, 2, 32, 32], - "output_data": [1, 24, 32, 32] - } - self.dynamic_shape.max_input_shape = { - "input_data": [4, 2, 64, 64], - "output_data": [4, 24, 64, 64] - } - self.dynamic_shape.opt_input_shape = { - "input_data": [1, 2, 64, 64], - "output_data": [1, 24, 64, 64] - } - else: - self.dynamic_shape.min_input_shape = { - "input_data": [1, 3, 32, 32], - "output_data": [1, 24, 32, 32] - } - self.dynamic_shape.max_input_shape = { - "input_data": [4, 3, 64, 64], - "output_data": [4, 24, 64, 64] - } - self.dynamic_shape.opt_input_shape = { - "input_data": [1, 3, 64, 64], - "output_data": [1, 24, 64, 64] - } + groups = attrs[0]['groups'] + self.dynamic_shape.min_input_shape = { + "input_data": [1, groups, 32, 32], + "output_data": [1, 24, 32, 32] + } + self.dynamic_shape.max_input_shape = { + "input_data": [4, groups, 64, 64], + "output_data": [4, 24, 64, 64] + } + self.dynamic_shape.opt_input_shape = { + "input_data": [1, groups, 64, 64], + "output_data": [1, 24, 64, 64] + } def clear_dynamic_shape(): self.dynamic_shape.min_input_shape = {} self.dynamic_shape.max_input_shape = {} self.dynamic_shape.opt_input_shape = {} - def generate_trt_nodes_num(attrs, dynamic_shape): + def generate_trt_nodes_num(): return 1, 2 attrs = [ @@ -156,26 +137,24 @@ class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest): # for static_shape clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 - yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + yield self.create_inference_config(), generate_trt_nodes_num(), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-5) + yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3, + 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 - yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-5) + yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3, + 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 - yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + yield self.create_inference_config(), generate_trt_nodes_num(), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-5) + yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3, + 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 - yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-5) + yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3, + 1e-3) def add_skip_trt_case(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d_transpose.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d_transpose.py index f32dfdb47c9541739e4f9fc793ae62b9b37f70c9..431b3c27e422adb3f48e698a9e23fb1d555d1df5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d_transpose.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d_transpose.py @@ -145,7 +145,7 @@ class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-3) + attrs, False), (1e-3, 1e-3) # self.trt_param.precision = paddle_infer.PrecisionType.Int8 # yield self.create_inference_config(), generate_trt_nodes_num( # attrs, False), (1e-5, 1e-5) @@ -157,7 +157,7 @@ class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-5) + attrs, True), (1e-3, 1e-3) # self.trt_param.precision = paddle_infer.PrecisionType.Int8 # yield self.create_inference_config(), generate_trt_nodes_num( # attrs, True), (1e-5, 1e-5) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py index c692b3f9d677f77c4769fb3fe593dc93e1b1902d..5a1cc19c618b8d42301c5bb77292e7d28a08b4a3 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py @@ -105,7 +105,7 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -114,7 +114,7 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def add_skip_trt_case(self): pass @@ -200,7 +200,7 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -209,7 +209,7 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def add_skip_trt_case(self): pass @@ -316,7 +316,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -325,7 +325,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def add_skip_trt_case(self): pass @@ -460,14 +460,14 @@ class TrtConvertElementwiseTest_two_input_without_broadcast( attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), (1, 3), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), (1, 3), 1e-5 + yield self.create_inference_config(), (1, 3), (1e-3, 1e-3) def add_skip_trt_case(self): pass @@ -590,14 +590,14 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest): self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), (1, 3), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), (1, 3), 1e-5 + yield self.create_inference_config(), (1, 3), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), (1, 3), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), (1, 3), 1e-5 + yield self.create_inference_config(), (1, 3), (1e-3, 1e-3) def add_skip_trt_case(self): pass @@ -706,14 +706,14 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest): self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), (0, 3), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), (0, 3), 1e-5 + yield self.create_inference_config(), (0, 3), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), (1, 2), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), (1, 2), 1e-5 + yield self.create_inference_config(), (1, 2), (1e-3, 1e-3) def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py index da947dc35dfdea6bbc98fe3a0c9655b4474d9f83..e9f9b70b91671533da033ed31f614555f99c2804 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py @@ -103,7 +103,7 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -112,7 +112,7 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def test(self): self.run_test() @@ -196,11 +196,12 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest): # for static_shape clear_dynamic_shape() + self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -209,7 +210,7 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def test(self): self.run_test() @@ -297,7 +298,7 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -306,7 +307,7 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def test(self): self.run_test() @@ -394,7 +395,7 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -403,7 +404,7 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten_contiguous_range.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten_contiguous_range.py index 406f5e1a13ca8989afb959399e06b91fa8948a13..42b43982828180bead819905312b48618133c129 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten_contiguous_range.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten_contiguous_range.py @@ -95,11 +95,12 @@ class TrtConvertFlattenContiguousRangeTest(TrtLayerAutoScanTest): # for static_shape clear_dynamic_shape() + self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -108,7 +109,7 @@ class TrtConvertFlattenContiguousRangeTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py index 5f5664d2aa433b501c807a8a3f3ac3104bb659a6..220611517e0634d6074b08cf8540e5abdc2f7830 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py @@ -41,8 +41,8 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest): return np.ones([1, 3, 32, 32]).astype(np.float32) for threshold in [6.0, 7.0, 100.0, 0.0, -1.0]: - for scale in [5.0, 6.0, 7.0, -1.0, 0.0, 100.0]: - for offset in [3.0, 4.0, 5.0, -1.0, 0.0, 100.0]: + for scale in [5.0, 7.0, -1.0, 0.0, 100.0]: + for offset in [3.0, 5.0, -1.0, 0.0, 100.0]: dics = [{ "threshold": threshold, "scale": scale, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_mish.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_mish.py index 063fbba1a07c1b97020e75db123873a21498ad86..0dd003487c600cad41381a4f9b39418aa018bba4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_mish.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_mish.py @@ -143,7 +143,7 @@ class TrtConvertMishTest(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -152,7 +152,7 @@ class TrtConvertMishTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def add_skip_trt_case(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py index 24e80e01e9707d4dd4c817dbd63473571a9a5a27..b543484d89251f766c078866865943154a6ee3f7 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py @@ -19,6 +19,7 @@ import paddle.inference as paddle_infer from functools import partial from typing import Optional, List, Callable, Dict, Any, Set import unittest +import itertools class TrtConvertPool2dTest(TrtLayerAutoScanTest): @@ -52,69 +53,71 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): def generate_weight1(attrs: List[Dict[str, Any]]): return np.random.random([24, 3, 3, 3]).astype(np.float32) - for strides in [[1, 1], [1, 2], [2, 2]]: - for paddings in [[0, 2], [0, 3]]: - for pooling_type in ['max', 'avg']: - for padding_algotithm in ['EXPLICIT', 'SAME', 'VAILD']: - for ksize in [[2, 3], [3, 3]]: - for data_format in ['NCHW']: - for global_pooling in [True, False]: - for exclusive in [False, True]: - for adaptive in [True, False]: - for ceil_mode in [False, True]: - - dics = [{ - "pooling_type": - pooling_type, - "ksize": ksize, - "data_fromat": data_format, - "padding_algorithm": - padding_algotithm, - "paddings": paddings, - "strides": strides, - "data_format": data_format, - "global_pooling": - global_pooling, - "exclusive": exclusive, - "adaptive": adaptive, - "ceil_mode": ceil_mode - }] - - ops_config = [{ - "op_type": - "pool2d", - "op_inputs": { - "X": ["input_data"], - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": - dics[0] - }] - ops = self.generate_op_config( - ops_config) - - program_config = ProgramConfig( - ops=ops, - weights={}, - inputs={ - "input_data": - TensorConfig( - data_gen=partial( - generate_input1, - dics)) - }, - outputs=["output_data"]) - - yield program_config + strides_options = [[1, 2]] + paddings_options = [[0, 2]] + pooling_type_options = ['max', 'avg'] + padding_algorithm_options = ['EXPLICIT', 'SAME', 'VAILD'] + ksize_options = [[2, 3], [3, 3]] + data_format_options = ['NCHW'] + global_pooling_options = [True, False] + exclusive_options = [True, False] + adaptive_option = [True, False] + ceil_mode_options = [True, False] + + configurations = [ + strides_options, paddings_options, pooling_type_options, + padding_algorithm_options, ksize_options, data_format_options, + global_pooling_options, exclusive_options, adaptive_option, + ceil_mode_options + ] + + for (strides, paddings, pooling_type, padding_algorithm, ksize, + data_format, global_pooling, exclusive, adaptive, + ceil_mode) in itertools.product(*configurations): + + attrs = [{ + "strides": strides, + "paddings": paddings, + "pooling_type": pooling_type, + "padding_algorithm": padding_algorithm, + "ksize": ksize, + "data_format": data_format, + "global_pooling": global_pooling, + "exclusive": exclusive, + "adaptive": adaptive, + "ceil_mode": ceil_mode, + }] + + ops_config = [{ + "op_type": "pool2d", + "op_inputs": { + "X": ["input_data"] + }, + "op_outputs": { + "Out": ["output_data"] + }, + "op_attrs": attrs[0] + }] + + ops = self.generate_op_config(ops_config) + + program_config = ProgramConfig( + ops=ops, + weights={}, + inputs={ + "input_data": + TensorConfig(data_gen=partial(generate_input1, attrs)) + }, + outputs=["output_data"]) + + yield program_config def sample_predictor_configs( self, program_config) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} - self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} + self.dynamic_shape.max_input_shape = {"input_data": [1, 3, 64, 64]} self.dynamic_shape.opt_input_shape = {"input_data": [1, 3, 64, 64]} def clear_dynamic_shape(): @@ -136,7 +139,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -145,7 +148,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def add_skip_trt_case(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py index 49a750f14dd369f431cbee278ab90d3298d033b9..660afecaaa90f916739669e0e91070b113f963ab 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py @@ -192,7 +192,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -201,7 +201,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def add_skip_trt_case(self): ver = paddle_infer.get_trt_compile_version() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py index 1765760e15c4358ee32689c299685d46845058d4..7f44e356030df5ce8c2bad30a03a1da03459ae27 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py @@ -137,7 +137,7 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -146,7 +146,7 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def add_skip_trt_case(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py index 1956c2333ad01a3c29c4631fbfa93b0cacc2b2d8..145a5df781b4a633ba9c58e6ff79440fbb48ce02 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py @@ -198,7 +198,7 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): # attrs, False), 1e-5 # self.trt_param.precision = paddle_infer.PrecisionType.Half # yield self.create_inference_config(), generate_trt_nodes_num( - # attrs, False), 1e-5 + # attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -207,7 +207,7 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py index 1ae92dc527aa9686f4e5e56818b445dd34c678d3..2d667d645ccdc876dbef2fc42c9bc6caeae77ce4 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py @@ -118,7 +118,7 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -127,7 +127,7 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), (1e-3, 1e-3) def test(self): self.run_test()