未验证 提交 f0422a28 编写于 作者: Z zhoutianzi666 提交者: GitHub

[Paddle-TRT]fix slice, bilinear_interp_v2 in trt 7011 (#50187)

* fix bug
* disable bilinear_interp_v2
* add verison check in  py UT
上级 fd679d31
......@@ -112,8 +112,8 @@ class SliceOpConverter : public OpConverter {
}
}
// CI failed in trt 6015 but success in 7134, may be a trt bug
#if IS_TRT_VERSION_GE(7134)
// CI failed in trt 6015 but success in 7011/7134, may be a trt bug
#if IS_TRT_VERSION_GE(7000)
auto* size_tensor =
Sub(Min(Concat(end_vec_tensor), shape_tensor), start_tensor);
#else
......
......@@ -199,8 +199,8 @@ void TensorRTEngine::FreezeNetwork() {
LOG(INFO) << "Run Paddle-TRT Dynamic Shape mode.";
for (int i = 0; i < max_profile_num_; i++) {
for (auto &input : min_input_shape_) {
#if IS_TRT_VERSION_LT(7000)
// trt6 will check all_of input > 0
#if IS_TRT_VERSION_LT(7100)
// trt6/trt7011 will check all_of input > 0
if (!(std::all_of(input.second.begin(),
input.second.end(),
[](int x) { return x > 0; }) &&
......
......@@ -792,6 +792,10 @@ struct SimpleOpTypeSetTeller : public Teller {
}
if (op_type == "bilinear_interp_v2") {
// trt 7011 result in test_solov2_trt_fp32.py TRT fp32 diff
#if IS_TRT_VERSION_LT(7100)
return false;
#endif
std::vector<std::string> attrs{"data_layout",
"interp_method",
"align_corners",
......
......@@ -22,7 +22,6 @@ import unittest
class TrtConvertBilinearInterpV2Test(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
inputs = program_config.inputs
weights = program_config.weights
......@@ -33,13 +32,13 @@ class TrtConvertBilinearInterpV2Test(TrtLayerAutoScanTest):
return True
def sample_program_configs(self):
def generate_input1(attrs: List[Dict[str, Any]]):
return np.ones([1, 3, 64, 64]).astype(np.float32)
def generate_input2(attrs: List[Dict[str, Any]]):
return np.random.uniform(low=0.5, high=6.0,
size=(2)).astype("float32")
return np.random.uniform(low=0.5, high=6.0, size=(2)).astype(
"float32"
)
for data_layout in ["NCHW", "NHWC"]:
for scale_y in [2.0, -1.0, 0.0]:
......@@ -47,48 +46,55 @@ class TrtConvertBilinearInterpV2Test(TrtLayerAutoScanTest):
scale = [scale_y, scale_x]
for out_h in [32, 64, 128, 192]:
for out_w in [32, 64]:
dics = [{
"data_layout": data_layout,
"interp_method": "bilinear",
"align_corners": False,
"align_mode": 0,
"scale": scale,
"out_h": out_h,
"out_w": out_w
}]
ops_config = [{
"op_type": "bilinear_interp_v2",
"op_inputs": {
"X": ["input_data"],
"Scale": ["input_scale"]
},
"op_outputs": {
"Out": ["bilinear_interp_v2_output_data"]
},
"op_attrs": dics[0]
}]
dics = [
{
"data_layout": data_layout,
"interp_method": "bilinear",
"align_corners": False,
"align_mode": 0,
"scale": scale,
"out_h": out_h,
"out_w": out_w,
}
]
ops_config = [
{
"op_type": "bilinear_interp_v2",
"op_inputs": {
"X": ["input_data"],
"Scale": ["input_scale"],
},
"op_outputs": {
"Out": [
"bilinear_interp_v2_output_data"
]
},
"op_attrs": dics[0],
}
]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={
"input_scale":
TensorConfig(
data_gen=partial(generate_input2, dics))
"input_scale": TensorConfig(
data_gen=partial(generate_input2, dics)
)
},
inputs={
"input_data":
TensorConfig(
data_gen=partial(generate_input1, dics))
"input_data": TensorConfig(
data_gen=partial(generate_input1, dics)
)
},
outputs=["bilinear_interp_v2_output_data"])
outputs=["bilinear_interp_v2_output_data"],
)
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
self, program_config
) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 64, 64]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]}
......@@ -100,6 +106,123 @@ class TrtConvertBilinearInterpV2Test(TrtLayerAutoScanTest):
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
# Here is consistent with op_teller.cc
ver = paddle_infer.get_trt_compile_version()
if ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 7100:
return 0, 3
return 1, 2
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False
), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False
), 1e-2
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True
), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True
), 1e-2
def test(self):
self.run_test()
class TrtConvertBilinearInterpV2Test1(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input1(attrs: List[Dict[str, Any]]):
return np.random.random(attrs[0]['input_size']).astype(np.float32)
for data_layout in ["NCHW", "NHWC"]:
for input_size, scale, out_h, out_w in [
([1, 258, 40, 40], [], 24, 24),
([1, 258, 40, 40], [2.0, 2.0], -1, -1),
([1, 256, 160, 160], [0.5, 0.5], -1, -1),
([1, 258, 20, 20], [], 16, 16),
]:
dics = [
{
"data_layout": data_layout,
"interp_method": "bilinear",
"align_corners": False,
"align_mode": 0,
"scale": scale,
"out_h": out_h,
"out_w": out_w,
# below attrs are used just for unitest, not used by inference
"input_size": input_size,
}
]
ops_config = [
{
"op_type": "bilinear_interp_v2",
"op_inputs": {
"X": ["input_data"],
},
"op_outputs": {
"Out": ["bilinear_interp_v2_output_data"]
},
"op_attrs": dics[0],
}
]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data": TensorConfig(
data_gen=partial(generate_input1, dics)
)
},
outputs=["bilinear_interp_v2_output_data"],
)
yield program_config
def sample_predictor_configs(
self, program_config
) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {
"input_data": attrs[0]['input_size']
}
self.dynamic_shape.max_input_shape = {
"input_data": [attrs[0]['input_size'][0] + 2]
+ attrs[0]['input_size'][1:4]
}
self.dynamic_shape.opt_input_shape = {
"input_data": attrs[0]['input_size']
}
def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
# Here is consistent with op_teller.cc
ver = paddle_infer.get_trt_compile_version()
if ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 7100:
return 0, 3
return 1, 2
attrs = [
......@@ -110,19 +233,23 @@ class TrtConvertBilinearInterpV2Test(TrtLayerAutoScanTest):
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
attrs, False
), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-2
attrs, False
), 1e-2
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5
attrs, True
), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-2
attrs, True
), 1e-2
def test(self):
self.run_test()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册