未验证 提交 3512bf11 编写于 作者: Z zlsh80826 提交者: GitHub

Refine TRT unit test (#45102)

* Reduce pool2d test configuration

* Reduce depthwise_conv2d test configuration

* Reduce trt_convert_conv2d_fusion test configuration

* Reduce trt_convert_conv2d test configuration

* Reduce trt_convert_conv2d_transpose test configuration

* Reduce trt_convert_hard_swish test configuration

* Enhance trt auto scan test error message and mechanism

* Increase FP16 trt ut tolerance
上级 c75b091b
...@@ -14,8 +14,8 @@ limitations under the License. */ ...@@ -14,8 +14,8 @@ limitations under the License. */
#include <glog/logging.h> #include <glog/logging.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "paddle/fluid/inference/tensorrt/helper.h" #include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/inference/tests/api/trt_test_helper.h" #include "paddle/fluid/inference/tests/api/trt_test_helper.h"
...@@ -423,7 +423,7 @@ TEST(AnalysisPredictor, ernie_varlen) { ...@@ -423,7 +423,7 @@ TEST(AnalysisPredictor, ernie_varlen) {
run(predictor.get(), &out_data); run(predictor.get(), &out_data);
std::vector<float> ref_data{ std::vector<float> ref_data{
0.59814, 0.219882, 0.181978, 0.359796, 0.577414, 0.0627908}; 0.59814, 0.219882, 0.181978, 0.359796, 0.577414, 0.0627908};
float near_tolerance = 1e-3; float near_tolerance = 4e-3;
for (size_t i = 0; i < out_data.size(); i++) { for (size_t i = 0; i < out_data.size(); i++) {
EXPECT_NEAR(ref_data[i], out_data[i], near_tolerance); EXPECT_NEAR(ref_data[i], out_data[i], near_tolerance);
} }
......
...@@ -538,8 +538,9 @@ class TrtLayerAutoScanTest(AutoScanTest): ...@@ -538,8 +538,9 @@ class TrtLayerAutoScanTest(AutoScanTest):
self.dynamic_shape = self.DynamicShapeParam({}, {}, {}, False) self.dynamic_shape = self.DynamicShapeParam({}, {}, {}, False)
self.num_percent_cases = float( self.num_percent_cases = float(
os.getenv('TEST_NUM_PERCENT_CASES', default='1.0')) os.getenv('TEST_NUM_PERCENT_CASES', default='1.0'))
# Choose different tests by week
np.random.seed(int(time.strftime("%W"))) # Use a seperate random generator for skipping tests
self.skip_rng = np.random.default_rng(int(time.strftime("%W")))
def create_inference_config(self, use_trt=True) -> paddle_infer.Config: def create_inference_config(self, use_trt=True) -> paddle_infer.Config:
config = paddle_infer.Config() config = paddle_infer.Config()
...@@ -555,11 +556,10 @@ class TrtLayerAutoScanTest(AutoScanTest): ...@@ -555,11 +556,10 @@ class TrtLayerAutoScanTest(AutoScanTest):
precision_mode=self.trt_param.precision, precision_mode=self.trt_param.precision,
use_static=self.trt_param.use_static, use_static=self.trt_param.use_static,
use_calib_mode=self.trt_param.use_calib_mode) use_calib_mode=self.trt_param.use_calib_mode)
if len(self.dynamic_shape.min_input_shape if self.dynamic_shape.min_input_shape and (
) != 0 and self.dynamic_shape.min_input_shape.keys( self.dynamic_shape.min_input_shape.keys() ==
) == self.dynamic_shape.max_input_shape.keys( self.dynamic_shape.max_input_shape.keys() ==
) and self.dynamic_shape.min_input_shape.keys( self.dynamic_shape.opt_input_shape.keys()):
) == self.dynamic_shape.opt_input_shape.keys():
config.set_trt_dynamic_shape_info( config.set_trt_dynamic_shape_info(
self.dynamic_shape.min_input_shape, self.dynamic_shape.min_input_shape,
self.dynamic_shape.max_input_shape, self.dynamic_shape.max_input_shape,
...@@ -567,6 +567,16 @@ class TrtLayerAutoScanTest(AutoScanTest): ...@@ -567,6 +567,16 @@ class TrtLayerAutoScanTest(AutoScanTest):
self.dynamic_shape.disable_trt_plugin_fp16) self.dynamic_shape.disable_trt_plugin_fp16)
return config return config
def assert_tensors_near(self, atol: float, rtol: float,
tensor: Dict[str, np.array],
baseline: Dict[str, np.array]):
for key, arr in tensor.items():
self.assertEqual(
baseline[key].shape, arr.shape,
'The output shapes are not equal, the baseline shape is ' +
str(baseline[key].shape) + ', but got ' + str(arr.shape))
np.testing.assert_allclose(baseline[key], arr, rtol=rtol, atol=atol)
def assert_op_size(self, trt_engine_num, paddle_op_num): def assert_op_size(self, trt_engine_num, paddle_op_num):
last_passed_program = os.path.join( last_passed_program = os.path.join(
self.cache_dir, 'transpose_flatten_concat_fuse_pass.pdmodel') self.cache_dir, 'transpose_flatten_concat_fuse_pass.pdmodel')
...@@ -579,14 +589,14 @@ class TrtLayerAutoScanTest(AutoScanTest): ...@@ -579,14 +589,14 @@ class TrtLayerAutoScanTest(AutoScanTest):
] ]
trt_engine_size = sum(op_types) trt_engine_size = sum(op_types)
paddle_op_size = op_size - trt_engine_size paddle_op_size = op_size - trt_engine_size
self.assertTrue( self.assertEqual(
trt_engine_size == trt_engine_num, trt_engine_num, trt_engine_size,
'trt_engine_num is {}, but got {}!'.format(trt_engine_size, 'Expected trt_engine_num is {}, but got {}!'.format(
trt_engine_num)) trt_engine_num, trt_engine_size))
self.assertTrue( self.assertEqual(
paddle_op_size == paddle_op_num, paddle_op_num, paddle_op_size,
'paddle_op_num is {}, but got {}!'.format(paddle_op_size, 'Expected paddle_op_num is {}, but got {}!'.format(
paddle_op_num)) paddle_op_num, paddle_op_size))
def inference_config_str(self, config: paddle_infer.Config) -> str: def inference_config_str(self, config: paddle_infer.Config) -> str:
dic = {} dic = {}
...@@ -602,18 +612,16 @@ class TrtLayerAutoScanTest(AutoScanTest): ...@@ -602,18 +612,16 @@ class TrtLayerAutoScanTest(AutoScanTest):
return str(dic) return str(dic)
def run_test(self, quant=False, skip_baseline=False, *args, **kwargs): def run_test(self, quant=False, skip_baseline=False, *args, **kwargs):
status = True all_passes = True
run_flags = []
def random_to_skip():
if self.skip_rng.random() < self.num_percent_cases:
return False
return True
for prog_config in self.sample_program_configs(*args, **kwargs): for prog_config in self.sample_program_configs(*args, **kwargs):
# In CI, only run 10% cases
if np.random.rand() < self.num_percent_cases: if random_to_skip():
run_flags.append(True)
else:
run_flags.append(False)
for prog_config, run_flags in zip(
self.sample_program_configs(*args, **kwargs), run_flags):
if not run_flags:
continue continue
# if program is invalid, we should skip that cases. # if program is invalid, we should skip that cases.
...@@ -657,30 +665,32 @@ class TrtLayerAutoScanTest(AutoScanTest): ...@@ -657,30 +665,32 @@ class TrtLayerAutoScanTest(AutoScanTest):
else: else:
raise NotImplementedError raise NotImplementedError
if quant and pred_config.tensorrt_precision_mode( if pred_config.tensorrt_precision_mode(
) != paddle_infer.PrecisionType.Int8: ) != paddle_infer.PrecisionType.Int8 and quant:
continue continue
if pred_config.tensorrt_precision_mode( if pred_config.tensorrt_precision_mode(
) == paddle_infer.PrecisionType.Int8 and not quant: ) == paddle_infer.PrecisionType.Int8 and not quant:
continue continue
ignore_flag = False ignore_flag = False
for ignore_info in self.ignore_cases: for teller, reason, note in self.ignore_cases:
if ignore_info[0](prog_config, pred_config): if teller(prog_config, pred_config):
ignore_flag = True ignore_flag = True
if ignore_info[1] == IgnoreReasons.TRT_NOT_IMPLEMENTED: if reason == IgnoreReasons.TRT_NOT_IMPLEMENTED:
self.ignore_log( self.ignore_log(
"[TRT_NOT_IMPLEMENTED] " + ignore_info[2] + '[TRT_NOT_IMPLEMENTED] {} vs {}'.format(
' ' + ' vs ' + note,
self.inference_config_str(pred_config)) self.inference_config_str(pred_config)))
elif ignore_info[1] == IgnoreReasons.TRT_NOT_SUPPORT: elif reason == IgnoreReasons.TRT_NOT_SUPPORT:
self.ignore_log( self.ignore_log('[TRT_NOT_SUPPORT] {} vs {}'.format(
"[TRT_NOT_SUPPORT] " + ignore_info[2] + ' ' + note, self.inference_config_str(pred_config)))
' vs ' + self.inference_config_str(pred_config))
else: else:
raise NotImplementedError raise NotImplementedError
break break
if ignore_flag:
continue
try: try:
pred_config_deserialize = paddle_infer.Config(pred_config) pred_config_deserialize = paddle_infer.Config(pred_config)
results.append( results.append(
...@@ -688,24 +698,23 @@ class TrtLayerAutoScanTest(AutoScanTest): ...@@ -688,24 +698,23 @@ class TrtLayerAutoScanTest(AutoScanTest):
pred_config, feed_data)) pred_config, feed_data))
self.assert_tensors_near(atol, rtol, results[-1], self.assert_tensors_near(atol, rtol, results[-1],
results[0]) results[0])
if not ignore_flag: trt_engine_num, paddle_op_num = nodes_num
self.assert_op_size(nodes_num[0], nodes_num[1]) self.assert_op_size(trt_engine_num, paddle_op_num)
# deserialize test # deserialize test
if nodes_num[0] > 0: if trt_engine_num > 0:
self.run_test_config(model, params, prog_config, self.run_test_config(model, params, prog_config,
pred_config_deserialize, feed_data) pred_config_deserialize, feed_data)
self.success_log('RUN predictor_config {} done'.format(
self.inference_config_str(pred_config)))
except Exception as e: except Exception as e:
self.fail_log( self.fail_log(
self.inference_config_str(pred_config) + self.inference_config_str(pred_config) +
'\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e))) '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)))
if not ignore_flag: all_passes = False
status = False
continue
self.success_log('RUN predictor_config ' +
self.inference_config_str(pred_config) +
' done')
self.assertTrue(status) self.assertTrue(all_passes)
# TODO(wilber): just for backward compatible # TODO(wilber): just for backward compatible
def add_skip_case(self, teller: [ def add_skip_case(self, teller: [
......
...@@ -135,7 +135,7 @@ class TrtConvertAffineChannelTest(TrtLayerAutoScanTest): ...@@ -135,7 +135,7 @@ class TrtConvertAffineChannelTest(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -144,7 +144,7 @@ class TrtConvertAffineChannelTest(TrtLayerAutoScanTest): ...@@ -144,7 +144,7 @@ class TrtConvertAffineChannelTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def test(self): def test(self):
self.run_test() self.run_test()
......
...@@ -205,7 +205,7 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest): ...@@ -205,7 +205,7 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -214,7 +214,7 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest): ...@@ -214,7 +214,7 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
......
...@@ -12,13 +12,14 @@ ...@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import numpy as np
import unittest import unittest
import paddle.inference as paddle_infer import itertools
from functools import partial from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set from typing import Optional, List, Callable, Dict, Any, Set
import numpy as np
import paddle.inference as paddle_infer
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
class TrtConvertConv2dTest(TrtLayerAutoScanTest): class TrtConvertConv2dTest(TrtLayerAutoScanTest):
...@@ -52,62 +53,74 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): ...@@ -52,62 +53,74 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest):
def generate_weight1(attrs: List[Dict[str, Any]]): def generate_weight1(attrs: List[Dict[str, Any]]):
return np.random.random([24, 3, 3, 3]).astype(np.float32) return np.random.random([24, 3, 3, 3]).astype(np.float32)
for batch in [1, 4]: batch_options = [1, 2]
for strides in [[1, 1], [2, 2], [1, 2]]: strides_options = [[2, 2], [1, 2]]
for paddings in [[0, 3], [1, 2, 3, 4]]: paddings_options = [[0, 3], [1, 2, 3, 4]]
for groups in [1, 3]: groups_options = [1, 3]
for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']: padding_altorithm_options = ['EXPLICIT', 'SAME', 'VALID']
for dilations in [[1, 1], [2, 2], [1, 2]]: dilations_options = [[1, 2]]
for data_format in ['NCHW']: data_format_options = ['NCHW']
dics = [{ configurations = [
"data_fromat": data_format, batch_options,
"dilations": dilations, strides_options,
"padding_algorithm": padding_algorithm, paddings_options,
"groups": groups, groups_options,
"paddings": paddings, padding_altorithm_options,
"strides": strides, dilations_options,
"data_format": data_format data_format_options,
}, {}] ]
ops_config = [{ for batch, strides, paddings, groups, padding_algorithm, dilations, data_format in itertools.product(
"op_type": "conv2d", *configurations):
"op_inputs": {
"Input": ["input_data"], attrs = [{
"Filter": ["conv2d_weight"] "data_fromat": data_format,
}, "dilations": dilations,
"op_outputs": { "padding_algorithm": padding_algorithm,
"Output": ["conv_output_data"] "groups": groups,
}, "paddings": paddings,
"op_attrs": dics[0] "strides": strides,
}, { "data_format": data_format,
"op_type": "relu", }, {}]
"op_inputs": {
"X": ["conv_output_data"] ops_config = [{
}, "op_type": "conv2d",
"op_outputs": { "op_inputs": {
"Out": ["output_data"] "Input": ["input_data"],
}, "Filter": ["conv2d_weight"]
"op_attrs": dics[1] },
}] "op_outputs": {
"Output": ["conv_output_data"]
ops = self.generate_op_config(ops_config) },
"op_attrs": attrs[0]
program_config = ProgramConfig( }, {
ops=ops, "op_type": "relu",
weights={ "op_inputs": {
"conv2d_weight": "X": ["conv_output_data"]
TensorConfig(data_gen=partial( },
generate_weight1, dics)) "op_outputs": {
}, "Out": ["output_data"]
inputs={ },
"input_data": "op_attrs": attrs[1]
TensorConfig(data_gen=partial( }]
generate_input1, batch, dics))
}, ops = self.generate_op_config(ops_config)
outputs=["output_data"])
program_config = ProgramConfig(
yield program_config ops=ops,
weights={
"conv2d_weight":
TensorConfig(data_gen=partial(generate_weight1, attrs))
},
inputs={
"input_data":
TensorConfig(
data_gen=partial(generate_input1, batch, attrs))
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs( def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float): self, program_config) -> (paddle_infer.Config, List[int], float):
...@@ -146,10 +159,10 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): ...@@ -146,10 +159,10 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5) attrs, False), (1e-3, 1e-3)
self.trt_param.precision = paddle_infer.PrecisionType.Int8 self.trt_param.precision = paddle_infer.PrecisionType.Int8
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5) attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -158,10 +171,10 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): ...@@ -158,10 +171,10 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5) attrs, True), (1e-3, 1e-3)
self.trt_param.precision = paddle_infer.PrecisionType.Int8 self.trt_param.precision = paddle_infer.PrecisionType.Int8
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5) attrs, True), (1e-3, 1e-3)
def test(self): def test(self):
self.run_test() self.run_test()
......
...@@ -12,13 +12,14 @@ ...@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons import unittest
from program_config import TensorConfig, ProgramConfig from itertools import product
import numpy as np
import paddle.inference as paddle_infer
from functools import partial from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set from typing import Optional, List, Callable, Dict, Any, Set
import unittest import numpy as np
import paddle.inference as paddle_infer
from program_config import TensorConfig, ProgramConfig
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
...@@ -58,68 +59,78 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): ...@@ -58,68 +59,78 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
def generate_weight2(attrs: List[Dict[str, Any]]): def generate_weight2(attrs: List[Dict[str, Any]]):
return np.random.random([24, 1, 1]).astype(np.float32) return np.random.random([24, 1, 1]).astype(np.float32)
for batch in [1, 4]: batch_options = [1, 2]
for strides in [[1, 1], [2, 2], [1, 2]]: strides_options = [[1, 2], [2, 2]]
for paddings in [[0, 3], [1, 2, 3, 4]]: paddings_options = [[0, 3], [1, 2, 3, 4]]
for groups in [2, 3]: groups_options = [2, 3]
for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']: padding_algorithm_options = ['EXPLICIT', 'SAME', 'VALID']
for dilations in [[1, 1], [2, 2], [1, 2]]: dilations_options = [[1, 2]]
for data_format in ['NCHW']: data_format_options = ['NCHW']
dics = [{ configurations = [
"data_fromat": data_format, batch_options,
"dilations": dilations, strides_options,
"padding_algorithm": padding_algorithm, paddings_options,
"groups": groups, groups_options,
"paddings": paddings, padding_algorithm_options,
"strides": strides, dilations_options,
"data_format": data_format data_format_options,
}, { ]
"axis": 1
}] for (batch, strides, paddings, groups, padding_algorithm, dilations,
data_format) in product(*configurations):
ops_config = [{
"op_type": "conv2d", attrs = [{
"op_inputs": { "strides": strides,
"Input": ["input_data"], "paddings": paddings,
"Filter": ["conv2d_weight"] "groups": groups,
}, "padding_algorithm": padding_algorithm,
"op_outputs": { "dilations": dilations,
"Output": ["conv_output_data"] "data_format": data_format,
}, }, {
"op_attrs": dics[0] "axis": 1
}, { }]
"op_type": "elementwise_add",
"op_inputs": { ops_config = [{
"X": ["conv_output_data"], "op_type": "conv2d",
"Y": ["elementwise_weight"] "op_inputs": {
}, "Input": ["input_data"],
"op_outputs": { "Filter": ["conv2d_weight"]
"Out": ["output_data"] },
}, "op_outputs": {
"op_attrs": dics[1] "Output": ["conv_output_data"]
}] },
"op_attrs": attrs[0]
ops = self.generate_op_config(ops_config) }, {
"op_type": "elementwise_add",
program_config = ProgramConfig( "op_inputs": {
ops=ops, "X": ["conv_output_data"],
weights={ "Y": ["elementwise_weight"]
"conv2d_weight": },
TensorConfig(data_gen=partial( "op_outputs": {
generate_weight1, dics)), "Out": ["output_data"]
"elementwise_weight": },
TensorConfig(data_gen=partial( "op_attrs": attrs[1]
generate_weight2, dics)) }]
},
inputs={ ops = self.generate_op_config(ops_config)
"input_data":
TensorConfig(data_gen=partial( program_config = ProgramConfig(
generate_input1, batch, dics)) ops=ops,
}, weights={
outputs=["output_data"]) "conv2d_weight":
TensorConfig(data_gen=partial(generate_weight1, attrs)),
yield program_config "elementwise_weight":
TensorConfig(data_gen=partial(generate_weight2, attrs))
},
inputs={
"input_data":
TensorConfig(
data_gen=partial(generate_input1, batch, attrs))
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs( def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float): self, program_config) -> (paddle_infer.Config, List[int], float):
...@@ -131,8 +142,8 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): ...@@ -131,8 +142,8 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
"output_data": [1, 24, 32, 32] "output_data": [1, 24, 32, 32]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [4, input_groups, 64, 64], "input_data": [2, input_groups, 64, 64],
"output_data": [4, 24, 64, 64] "output_data": [2, 24, 64, 64]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [1, input_groups, 64, 64], "input_data": [1, input_groups, 64, 64],
...@@ -158,10 +169,10 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): ...@@ -158,10 +169,10 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5) attrs, False), (1e-3, 1e-3)
self.trt_param.precision = paddle_infer.PrecisionType.Int8 self.trt_param.precision = paddle_infer.PrecisionType.Int8
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5) attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -170,10 +181,10 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): ...@@ -170,10 +181,10 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5) attrs, True), (1e-3, 1e-3)
self.trt_param.precision = paddle_infer.PrecisionType.Int8 self.trt_param.precision = paddle_infer.PrecisionType.Int8
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5) attrs, True), (1e-3, 1e-3)
def test(self): def test(self):
self.run_test() self.run_test()
......
...@@ -62,14 +62,14 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest): ...@@ -62,14 +62,14 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
3]).astype(np.float32) 3]).astype(np.float32)
for num_channels in [2, 4, 6]: for num_channels in [2, 4, 6]:
for batch in [1, 2, 4]: for batch in [1, 4]:
for strides in [[1, 1], [2, 2], [1, 2]]: for strides in [[2, 2], [1, 2]]:
for paddings in [[0, 3], [1, 2, 3, 4]]: for paddings in [[0, 3], [1, 2, 3, 4]]:
for groups in [2]: for groups in [2]:
for padding_algorithm in [ for padding_algorithm in [
'EXPLICIT', 'SAME', 'VALID' 'EXPLICIT', 'SAME', 'VALID'
]: ]:
for dilations in [[1, 1], [2, 2], [1, 2]]: for dilations in [[2, 2], [1, 2]]:
for data_format in ['NCHW']: for data_format in ['NCHW']:
self.num_channels = num_channels self.num_channels = num_channels
......
...@@ -12,13 +12,14 @@ ...@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons import unittest
from program_config import TensorConfig, ProgramConfig import itertools
from functools import partial
from typing import List, Dict, Any
import numpy as np import numpy as np
from program_config import TensorConfig, ProgramConfig
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
import paddle.inference as paddle_infer import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest): class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest):
...@@ -40,113 +41,93 @@ class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest): ...@@ -40,113 +41,93 @@ class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest):
self.trt_param.workspace_size = 1073741824 self.trt_param.workspace_size = 1073741824
def generate_input1(batch, attrs: List[Dict[str, Any]]): def generate_input1(batch, attrs: List[Dict[str, Any]]):
if attrs[0]['groups'] == 1: groups = attrs[0]['groups']
return np.ones([batch, 1, 64, 64]).astype(np.float32) return np.ones([batch, groups, 64, 64]).astype(np.float32)
elif attrs[0]['groups'] == 2:
return np.ones([batch, 2, 64, 64]).astype(np.float32)
else:
return np.ones([batch, 3, 64, 64]).astype(np.float32)
def generate_weight1(attrs: List[Dict[str, Any]]): def generate_weight1(attrs: List[Dict[str, Any]]):
return np.random.random([24, 1, 3, 3]).astype(np.float32) return np.random.random([24, 1, 3, 3]).astype(np.float32)
for batch in [1, 2, 4]: batch_options = [1, 4]
for strides in [[1, 1], [2, 2], [1, 2]]: strides_options = [[1, 2]]
for paddings in [[0, 3], [1, 2, 3, 4]]: paddings_options = [[0, 3], [1, 2, 3, 4]]
for groups in [1, 2, 3]: groups_options = [1, 3]
for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']: padding_algorithm_options = ['EXPLICIT', 'SAME', 'VAILD']
for dilations in [[1, 1], [2, 2], [1, 2]]: dilations_options = [[1, 1], [1, 2]]
for data_format in ['NCHW']: data_format_options = ['NCHW']
dics = [{ configurations = [
"data_fromat": data_format, batch_options,
"dilations": dilations, strides_options,
"padding_algorithm": padding_algorithm, paddings_options,
"groups": groups, groups_options,
"paddings": paddings, padding_algorithm_options,
"strides": strides, dilations_options,
"data_format": data_format data_format_options,
}] ]
ops_config = [{ for (batch, strides, paddings, groups, padding_algorithm, dilations,
"op_type": "depthwise_conv2d", data_format) in itertools.product(*configurations):
"op_inputs": { attrs = [{
"Input": ["input_data"], "strides": strides,
"Filter": ["conv2d_weight"] "paddings": paddings,
}, "groups": groups,
"op_outputs": { "padding_algorithm": padding_algorithm,
"Output": ["output_data"] "dilations": dilations,
}, "data_fromat": data_format,
"op_attrs": dics[0] }]
}]
ops = self.generate_op_config(ops_config) ops_config = [{
"op_type": "depthwise_conv2d",
program_config = ProgramConfig( "op_inputs": {
ops=ops, "Input": ["input_data"],
weights={ "Filter": ["conv2d_weight"]
"conv2d_weight": },
TensorConfig(data_gen=partial( "op_outputs": {
generate_weight1, dics)) "Output": ["output_data"]
}, },
inputs={ "op_attrs": attrs[0]
"input_data": }]
TensorConfig(data_gen=partial( ops = self.generate_op_config(ops_config)
generate_input1, batch, dics))
}, program_config = ProgramConfig(
outputs=["output_data"]) ops=ops,
weights={
yield program_config "conv2d_weight":
TensorConfig(data_gen=partial(generate_weight1, attrs))
},
inputs={
"input_data":
TensorConfig(
data_gen=partial(generate_input1, batch, attrs))
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs( def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float): self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
if attrs[0]['groups'] == 1: groups = attrs[0]['groups']
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_data": [1, 1, 32, 32], "input_data": [1, groups, 32, 32],
"output_data": [1, 24, 32, 32] "output_data": [1, 24, 32, 32]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [4, 1, 64, 64], "input_data": [4, groups, 64, 64],
"output_data": [4, 24, 64, 64] "output_data": [4, 24, 64, 64]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [1, 1, 64, 64], "input_data": [1, groups, 64, 64],
"output_data": [1, 24, 64, 64] "output_data": [1, 24, 64, 64]
} }
elif attrs[0]['groups'] == 2:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 2, 32, 32],
"output_data": [1, 24, 32, 32]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 2, 64, 64],
"output_data": [4, 24, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 2, 64, 64],
"output_data": [1, 24, 64, 64]
}
else:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32],
"output_data": [1, 24, 32, 32]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 3, 64, 64],
"output_data": [4, 24, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 64, 64],
"output_data": [1, 24, 64, 64]
}
def clear_dynamic_shape(): def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {} self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.max_input_shape = {} self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.opt_input_shape = {} self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape): def generate_trt_nodes_num():
return 1, 2 return 1, 2
attrs = [ attrs = [
...@@ -156,26 +137,24 @@ class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest): ...@@ -156,26 +137,24 @@ class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest):
# for static_shape # for static_shape
clear_dynamic_shape() clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32 self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(), 1e-5
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3,
attrs, False), (1e-5, 1e-5) 1e-3)
self.trt_param.precision = paddle_infer.PrecisionType.Int8 self.trt_param.precision = paddle_infer.PrecisionType.Int8
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3,
attrs, False), (1e-5, 1e-5) 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32 self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(), 1e-5
attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3,
attrs, True), (1e-5, 1e-5) 1e-3)
self.trt_param.precision = paddle_infer.PrecisionType.Int8 self.trt_param.precision = paddle_infer.PrecisionType.Int8
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3,
attrs, True), (1e-5, 1e-5) 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
......
...@@ -145,7 +145,7 @@ class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest): ...@@ -145,7 +145,7 @@ class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-3) attrs, False), (1e-3, 1e-3)
# self.trt_param.precision = paddle_infer.PrecisionType.Int8 # self.trt_param.precision = paddle_infer.PrecisionType.Int8
# yield self.create_inference_config(), generate_trt_nodes_num( # yield self.create_inference_config(), generate_trt_nodes_num(
# attrs, False), (1e-5, 1e-5) # attrs, False), (1e-5, 1e-5)
...@@ -157,7 +157,7 @@ class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest): ...@@ -157,7 +157,7 @@ class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5) attrs, True), (1e-3, 1e-3)
# self.trt_param.precision = paddle_infer.PrecisionType.Int8 # self.trt_param.precision = paddle_infer.PrecisionType.Int8
# yield self.create_inference_config(), generate_trt_nodes_num( # yield self.create_inference_config(), generate_trt_nodes_num(
# attrs, True), (1e-5, 1e-5) # attrs, True), (1e-5, 1e-5)
......
...@@ -105,7 +105,7 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest): ...@@ -105,7 +105,7 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -114,7 +114,7 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest): ...@@ -114,7 +114,7 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
pass pass
...@@ -200,7 +200,7 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest): ...@@ -200,7 +200,7 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -209,7 +209,7 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest): ...@@ -209,7 +209,7 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
pass pass
...@@ -316,7 +316,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest): ...@@ -316,7 +316,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -325,7 +325,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest): ...@@ -325,7 +325,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
pass pass
...@@ -460,14 +460,14 @@ class TrtConvertElementwiseTest_two_input_without_broadcast( ...@@ -460,14 +460,14 @@ class TrtConvertElementwiseTest_two_input_without_broadcast(
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32 self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 3), 1e-5 yield self.create_inference_config(), (1, 3), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 3), 1e-5 yield self.create_inference_config(), (1, 3), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
pass pass
...@@ -590,14 +590,14 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest): ...@@ -590,14 +590,14 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
self.trt_param.precision = paddle_infer.PrecisionType.Float32 self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 3), 1e-5 yield self.create_inference_config(), (1, 3), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 3), 1e-5 yield self.create_inference_config(), (1, 3), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32 self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 3), 1e-5 yield self.create_inference_config(), (1, 3), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 3), 1e-5 yield self.create_inference_config(), (1, 3), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
pass pass
...@@ -706,14 +706,14 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest): ...@@ -706,14 +706,14 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
self.trt_param.precision = paddle_infer.PrecisionType.Float32 self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (0, 3), 1e-5 yield self.create_inference_config(), (0, 3), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (0, 3), 1e-5 yield self.create_inference_config(), (0, 3), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32 self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 2), 1e-5 yield self.create_inference_config(), (1, 2), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 2), 1e-5 yield self.create_inference_config(), (1, 2), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
pass pass
......
...@@ -103,7 +103,7 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest): ...@@ -103,7 +103,7 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -112,7 +112,7 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest): ...@@ -112,7 +112,7 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def test(self): def test(self):
self.run_test() self.run_test()
...@@ -196,11 +196,12 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest): ...@@ -196,11 +196,12 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest):
# for static_shape # for static_shape
clear_dynamic_shape() clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -209,7 +210,7 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest): ...@@ -209,7 +210,7 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def test(self): def test(self):
self.run_test() self.run_test()
...@@ -297,7 +298,7 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest): ...@@ -297,7 +298,7 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -306,7 +307,7 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest): ...@@ -306,7 +307,7 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def test(self): def test(self):
self.run_test() self.run_test()
...@@ -394,7 +395,7 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest): ...@@ -394,7 +395,7 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -403,7 +404,7 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest): ...@@ -403,7 +404,7 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def test(self): def test(self):
self.run_test() self.run_test()
......
...@@ -95,11 +95,12 @@ class TrtConvertFlattenContiguousRangeTest(TrtLayerAutoScanTest): ...@@ -95,11 +95,12 @@ class TrtConvertFlattenContiguousRangeTest(TrtLayerAutoScanTest):
# for static_shape # for static_shape
clear_dynamic_shape() clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -108,7 +109,7 @@ class TrtConvertFlattenContiguousRangeTest(TrtLayerAutoScanTest): ...@@ -108,7 +109,7 @@ class TrtConvertFlattenContiguousRangeTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def test(self): def test(self):
self.run_test() self.run_test()
......
...@@ -41,8 +41,8 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest): ...@@ -41,8 +41,8 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest):
return np.ones([1, 3, 32, 32]).astype(np.float32) return np.ones([1, 3, 32, 32]).astype(np.float32)
for threshold in [6.0, 7.0, 100.0, 0.0, -1.0]: for threshold in [6.0, 7.0, 100.0, 0.0, -1.0]:
for scale in [5.0, 6.0, 7.0, -1.0, 0.0, 100.0]: for scale in [5.0, 7.0, -1.0, 0.0, 100.0]:
for offset in [3.0, 4.0, 5.0, -1.0, 0.0, 100.0]: for offset in [3.0, 5.0, -1.0, 0.0, 100.0]:
dics = [{ dics = [{
"threshold": threshold, "threshold": threshold,
"scale": scale, "scale": scale,
......
...@@ -143,7 +143,7 @@ class TrtConvertMishTest(TrtLayerAutoScanTest): ...@@ -143,7 +143,7 @@ class TrtConvertMishTest(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -152,7 +152,7 @@ class TrtConvertMishTest(TrtLayerAutoScanTest): ...@@ -152,7 +152,7 @@ class TrtConvertMishTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
......
...@@ -19,6 +19,7 @@ import paddle.inference as paddle_infer ...@@ -19,6 +19,7 @@ import paddle.inference as paddle_infer
from functools import partial from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set from typing import Optional, List, Callable, Dict, Any, Set
import unittest import unittest
import itertools
class TrtConvertPool2dTest(TrtLayerAutoScanTest): class TrtConvertPool2dTest(TrtLayerAutoScanTest):
...@@ -52,69 +53,71 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): ...@@ -52,69 +53,71 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest):
def generate_weight1(attrs: List[Dict[str, Any]]): def generate_weight1(attrs: List[Dict[str, Any]]):
return np.random.random([24, 3, 3, 3]).astype(np.float32) return np.random.random([24, 3, 3, 3]).astype(np.float32)
for strides in [[1, 1], [1, 2], [2, 2]]: strides_options = [[1, 2]]
for paddings in [[0, 2], [0, 3]]: paddings_options = [[0, 2]]
for pooling_type in ['max', 'avg']: pooling_type_options = ['max', 'avg']
for padding_algotithm in ['EXPLICIT', 'SAME', 'VAILD']: padding_algorithm_options = ['EXPLICIT', 'SAME', 'VAILD']
for ksize in [[2, 3], [3, 3]]: ksize_options = [[2, 3], [3, 3]]
for data_format in ['NCHW']: data_format_options = ['NCHW']
for global_pooling in [True, False]: global_pooling_options = [True, False]
for exclusive in [False, True]: exclusive_options = [True, False]
for adaptive in [True, False]: adaptive_option = [True, False]
for ceil_mode in [False, True]: ceil_mode_options = [True, False]
dics = [{ configurations = [
"pooling_type": strides_options, paddings_options, pooling_type_options,
pooling_type, padding_algorithm_options, ksize_options, data_format_options,
"ksize": ksize, global_pooling_options, exclusive_options, adaptive_option,
"data_fromat": data_format, ceil_mode_options
"padding_algorithm": ]
padding_algotithm,
"paddings": paddings, for (strides, paddings, pooling_type, padding_algorithm, ksize,
"strides": strides, data_format, global_pooling, exclusive, adaptive,
"data_format": data_format, ceil_mode) in itertools.product(*configurations):
"global_pooling":
global_pooling, attrs = [{
"exclusive": exclusive, "strides": strides,
"adaptive": adaptive, "paddings": paddings,
"ceil_mode": ceil_mode "pooling_type": pooling_type,
}] "padding_algorithm": padding_algorithm,
"ksize": ksize,
ops_config = [{ "data_format": data_format,
"op_type": "global_pooling": global_pooling,
"pool2d", "exclusive": exclusive,
"op_inputs": { "adaptive": adaptive,
"X": ["input_data"], "ceil_mode": ceil_mode,
}, }]
"op_outputs": {
"Out": ["output_data"] ops_config = [{
}, "op_type": "pool2d",
"op_attrs": "op_inputs": {
dics[0] "X": ["input_data"]
}] },
ops = self.generate_op_config( "op_outputs": {
ops_config) "Out": ["output_data"]
},
program_config = ProgramConfig( "op_attrs": attrs[0]
ops=ops, }]
weights={},
inputs={ ops = self.generate_op_config(ops_config)
"input_data":
TensorConfig( program_config = ProgramConfig(
data_gen=partial( ops=ops,
generate_input1, weights={},
dics)) inputs={
}, "input_data":
outputs=["output_data"]) TensorConfig(data_gen=partial(generate_input1, attrs))
},
yield program_config outputs=["output_data"])
yield program_config
def sample_predictor_configs( def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float): self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} self.dynamic_shape.max_input_shape = {"input_data": [1, 3, 64, 64]}
self.dynamic_shape.opt_input_shape = {"input_data": [1, 3, 64, 64]} self.dynamic_shape.opt_input_shape = {"input_data": [1, 3, 64, 64]}
def clear_dynamic_shape(): def clear_dynamic_shape():
...@@ -136,7 +139,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): ...@@ -136,7 +139,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -145,7 +148,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): ...@@ -145,7 +148,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
......
...@@ -192,7 +192,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): ...@@ -192,7 +192,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -201,7 +201,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): ...@@ -201,7 +201,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
ver = paddle_infer.get_trt_compile_version() ver = paddle_infer.get_trt_compile_version()
......
...@@ -137,7 +137,7 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest): ...@@ -137,7 +137,7 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -146,7 +146,7 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest): ...@@ -146,7 +146,7 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
......
...@@ -198,7 +198,7 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): ...@@ -198,7 +198,7 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest):
# attrs, False), 1e-5 # attrs, False), 1e-5
# self.trt_param.precision = paddle_infer.PrecisionType.Half # self.trt_param.precision = paddle_infer.PrecisionType.Half
# yield self.create_inference_config(), generate_trt_nodes_num( # yield self.create_inference_config(), generate_trt_nodes_num(
# attrs, False), 1e-5 # attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -207,7 +207,7 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): ...@@ -207,7 +207,7 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def add_skip_trt_case(self): def add_skip_trt_case(self):
pass pass
......
...@@ -118,7 +118,7 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest): ...@@ -118,7 +118,7 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest):
attrs, False), 1e-5 attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5 attrs, False), (1e-3, 1e-3)
# for dynamic_shape # for dynamic_shape
generate_dynamic_shape(attrs) generate_dynamic_shape(attrs)
...@@ -127,7 +127,7 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest): ...@@ -127,7 +127,7 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest):
attrs, True), 1e-5 attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num( yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5 attrs, True), (1e-3, 1e-3)
def test(self): def test(self):
self.run_test() self.run_test()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册