未验证 提交 b0b75169 编写于 作者: Z zlsh80826 提交者: GitHub

Reduce trt convert unit test problem size (#41701)

上级 404c4a6b
...@@ -28,16 +28,16 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -28,16 +28,16 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
def sample_program_configs(self): def sample_program_configs(self):
def generate_input1(dims, batch, attrs: List[Dict[str, Any]]): def generate_input1(dims, batch, attrs: List[Dict[str, Any]]):
if dims == 1: if dims == 1:
return np.ones([64]).astype(np.float32) return np.ones([32]).astype(np.float32)
elif dims == 2: elif dims == 2:
return np.ones([3, 64]).astype(np.float32) return np.ones([3, 32]).astype(np.float32)
elif dims == 3: elif dims == 3:
return np.ones([3, 64, 64]).astype(np.float32) return np.ones([3, 32, 32]).astype(np.float32)
else: else:
return np.ones([batch, 3, 64, 64]).astype(np.float32) return np.ones([batch, 3, 32, 32]).astype(np.float32)
for dims in [1, 2, 3, 4]: for dims in [1, 2, 3, 4]:
for batch in [1, 2, 4]: for batch in [1, 4]:
for op_type in ["relu", "sigmoid", "tanh", "relu6"]: for op_type in ["relu", "sigmoid", "tanh", "relu6"]:
self.dims = dims self.dims = dims
dics = [{}] dics = [{}]
...@@ -70,27 +70,25 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -70,27 +70,25 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
if self.dims == 1: if self.dims == 1:
self.dynamic_shape.min_input_shape = {"input_data": [1]} self.dynamic_shape.min_input_shape = {"input_data": [1]}
self.dynamic_shape.max_input_shape = {"input_data": [128]} self.dynamic_shape.max_input_shape = {"input_data": [64]}
self.dynamic_shape.opt_input_shape = {"input_data": [64]} self.dynamic_shape.opt_input_shape = {"input_data": [32]}
elif self.dims == 2: elif self.dims == 2:
self.dynamic_shape.min_input_shape = {"input_data": [1, 32]} self.dynamic_shape.min_input_shape = {"input_data": [1, 16]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 64]} self.dynamic_shape.max_input_shape = {"input_data": [4, 32]}
self.dynamic_shape.opt_input_shape = {"input_data": [3, 64]} self.dynamic_shape.opt_input_shape = {"input_data": [3, 32]}
elif self.dims == 3: elif self.dims == 3:
self.dynamic_shape.min_input_shape = {"input_data": [1, 32, 32]} self.dynamic_shape.min_input_shape = {"input_data": [1, 16, 16]}
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 32]}
"input_data": [10, 64, 64] self.dynamic_shape.opt_input_shape = {"input_data": [3, 32, 32]}
}
self.dynamic_shape.opt_input_shape = {"input_data": [3, 64, 64]}
else: else:
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32] "input_data": [1, 3, 16, 16]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [4, 3, 64, 64] "input_data": [4, 3, 32, 32]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 64, 64] "input_data": [1, 3, 32, 32]
} }
def clear_dynamic_shape(): def clear_dynamic_shape():
......
...@@ -54,7 +54,7 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest): ...@@ -54,7 +54,7 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest):
for dims in [2, 3, 4]: for dims in [2, 3, 4]:
for num_input in [0, 1]: for num_input in [0, 1]:
for batch in [1, 2, 4]: for batch in [1, 4]:
for epsilon in [1e-6, 1e-5, 1e-4]: for epsilon in [1e-6, 1e-5, 1e-4]:
for data_layout in ["NCHW"]: for data_layout in ["NCHW"]:
for momentum in [0.9, 0.8]: for momentum in [0.9, 0.8]:
...@@ -134,33 +134,33 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest): ...@@ -134,33 +134,33 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest):
if self.dims == 4: if self.dims == 4:
if attrs[0]['data_layout'] == "NCHW": if attrs[0]['data_layout'] == "NCHW":
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"batch_norm_input": [1, 3, 24, 24] "batch_norm_input": [1, 3, 12, 12]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"batch_norm_input": [4, 3, 48, 48] "batch_norm_input": [4, 3, 24, 24]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"batch_norm_input": [1, 3, 24, 48] "batch_norm_input": [1, 3, 24, 24]
} }
elif attrs[0]['data_layout'] == "NHWC": elif attrs[0]['data_layout'] == "NHWC":
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"batch_norm_input": [1, 24, 24, 3] "batch_norm_input": [1, 12, 12, 3]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"batch_norm_input": [4, 48, 48, 3] "batch_norm_input": [4, 24, 24, 3]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"batch_norm_input": [1, 24, 48, 3] "batch_norm_input": [1, 24, 24, 3]
} }
elif self.dims == 3: elif self.dims == 3:
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"batch_norm_input": [1, 3, 24] "batch_norm_input": [1, 3, 12]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"batch_norm_input": [4, 3, 48] "batch_norm_input": [4, 3, 24]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"batch_norm_input": [1, 3, 48] "batch_norm_input": [1, 3, 24]
} }
elif self.dims == 2: elif self.dims == 2:
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
......
...@@ -28,13 +28,13 @@ class TrtConvertClipTest(TrtLayerAutoScanTest): ...@@ -28,13 +28,13 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
def sample_program_configs(self): def sample_program_configs(self):
def generate_input1(dims, batch, attrs: List[Dict[str, Any]]): def generate_input1(dims, batch, attrs: List[Dict[str, Any]]):
if dims == 1: if dims == 1:
return np.ones([64]).astype(np.float32) return np.ones([32]).astype(np.float32)
elif dims == 2: elif dims == 2:
return np.ones([3, 64]).astype(np.float32) return np.ones([3, 32]).astype(np.float32)
elif dims == 3: elif dims == 3:
return np.ones([3, 64, 64]).astype(np.float32) return np.ones([3, 32, 32]).astype(np.float32)
else: else:
return np.ones([batch, 3, 64, 64]).astype(np.float32) return np.ones([batch, 3, 32, 32]).astype(np.float32)
def generate_weight1(attrs: List[Dict[str, Any]]): def generate_weight1(attrs: List[Dict[str, Any]]):
return np.array([np.random.uniform(1, 10)]).astype("float32") return np.array([np.random.uniform(1, 10)]).astype("float32")
...@@ -43,7 +43,7 @@ class TrtConvertClipTest(TrtLayerAutoScanTest): ...@@ -43,7 +43,7 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
return np.array([np.random.uniform(10, 20)]).astype("float32") return np.array([np.random.uniform(10, 20)]).astype("float32")
for dims in [1, 2, 3, 4]: for dims in [1, 2, 3, 4]:
for batch in [1, 2, 4]: for batch in [1, 4]:
for op_inputs in [{ for op_inputs in [{
"X": ["input_data"] "X": ["input_data"]
}, { }, {
...@@ -89,27 +89,25 @@ class TrtConvertClipTest(TrtLayerAutoScanTest): ...@@ -89,27 +89,25 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
if self.dims == 1: if self.dims == 1:
self.dynamic_shape.min_input_shape = {"input_data": [1]} self.dynamic_shape.min_input_shape = {"input_data": [1]}
self.dynamic_shape.max_input_shape = {"input_data": [128]} self.dynamic_shape.max_input_shape = {"input_data": [64]}
self.dynamic_shape.opt_input_shape = {"input_data": [64]} self.dynamic_shape.opt_input_shape = {"input_data": [32]}
elif self.dims == 2: elif self.dims == 2:
self.dynamic_shape.min_input_shape = {"input_data": [1, 32]} self.dynamic_shape.min_input_shape = {"input_data": [1, 16]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 64]} self.dynamic_shape.max_input_shape = {"input_data": [4, 32]}
self.dynamic_shape.opt_input_shape = {"input_data": [3, 64]} self.dynamic_shape.opt_input_shape = {"input_data": [3, 32]}
elif self.dims == 3: elif self.dims == 3:
self.dynamic_shape.min_input_shape = {"input_data": [1, 32, 32]} self.dynamic_shape.min_input_shape = {"input_data": [1, 16, 16]}
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 32]}
"input_data": [10, 64, 64] self.dynamic_shape.opt_input_shape = {"input_data": [3, 32, 32]}
}
self.dynamic_shape.opt_input_shape = {"input_data": [3, 64, 64]}
else: else:
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32] "input_data": [1, 3, 16, 16]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [4, 3, 64, 64] "input_data": [4, 3, 32, 32]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 64, 64] "input_data": [1, 3, 32, 32]
} }
def clear_dynamic_shape(): def clear_dynamic_shape():
......
...@@ -46,20 +46,16 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): ...@@ -46,20 +46,16 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest):
self.trt_param.workspace_size = 1073741824 self.trt_param.workspace_size = 1073741824
def generate_input1(batch, attrs: List[Dict[str, Any]]): def generate_input1(batch, attrs: List[Dict[str, Any]]):
if attrs[0]['groups'] == 1: return np.ones(
return np.ones([batch, 3, 64, 64]).astype(np.float32) [batch, attrs[0]['groups'] * 3, 64, 64]).astype(np.float32)
elif attrs[0]['groups'] == 2:
return np.ones([batch, 6, 64, 64]).astype(np.float32)
else:
return np.ones([batch, 9, 64, 64]).astype(np.float32)
def generate_weight1(attrs: List[Dict[str, Any]]): def generate_weight1(attrs: List[Dict[str, Any]]):
return np.random.random([24, 3, 3, 3]).astype(np.float32) return np.random.random([24, 3, 3, 3]).astype(np.float32)
for batch in [1, 2, 4]: for batch in [1, 4]:
for strides in [[1, 1], [2, 2], [1, 2]]: for strides in [[1, 1], [2, 2], [1, 2]]:
for paddings in [[0, 3], [1, 2, 3, 4]]: for paddings in [[0, 3], [1, 2, 3, 4]]:
for groups in [1, 2, 3]: for groups in [1, 3]:
for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']: for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']:
for dilations in [[1, 1], [2, 2], [1, 2]]: for dilations in [[1, 1], [2, 2], [1, 2]]:
for data_format in ['NCHW']: for data_format in ['NCHW']:
...@@ -116,45 +112,19 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): ...@@ -116,45 +112,19 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest):
def sample_predictor_configs( def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float): self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
if attrs[0]['groups'] == 1: input_groups = attrs[0]['groups'] * 3
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32], "input_data": [1, input_groups, 32, 32],
"output_data": [1, 24, 32, 32] "output_data": [1, 24, 32, 32]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [4, 3, 64, 64], "input_data": [4, input_groups, 64, 64],
"output_data": [4, 24, 64, 64] "output_data": [4, 24, 64, 64]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 64, 64], "input_data": [1, input_groups, 64, 64],
"output_data": [1, 24, 64, 64] "output_data": [1, 24, 64, 64]
} }
elif attrs[0]['groups'] == 2:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 6, 32, 32],
"output_data": [1, 24, 32, 32]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 6, 64, 64],
"output_data": [4, 24, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 6, 64, 64],
"output_data": [1, 24, 64, 64]
}
else:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 9, 32, 32],
"output_data": [1, 24, 32, 32]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 9, 64, 64],
"output_data": [4, 24, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 9, 64, 64],
"output_data": [1, 24, 64, 64]
}
def clear_dynamic_shape(): def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {} self.dynamic_shape.min_input_shape = {}
......
...@@ -49,10 +49,8 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): ...@@ -49,10 +49,8 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
self.trt_param.workspace_size = 1073741824 self.trt_param.workspace_size = 1073741824
def generate_input1(batch, attrs: List[Dict[str, Any]]): def generate_input1(batch, attrs: List[Dict[str, Any]]):
if attrs[0]['groups'] == 2: return np.ones(
return np.ones([batch, 6, 64, 64]).astype(np.float32) [batch, attrs[0]['groups'] * 3, 64, 64]).astype(np.float32)
else:
return np.ones([batch, 9, 64, 64]).astype(np.float32)
def generate_weight1(attrs: List[Dict[str, Any]]): def generate_weight1(attrs: List[Dict[str, Any]]):
return np.random.random([24, 3, 3, 3]).astype(np.float32) return np.random.random([24, 3, 3, 3]).astype(np.float32)
...@@ -60,7 +58,7 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): ...@@ -60,7 +58,7 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
def generate_weight2(attrs: List[Dict[str, Any]]): def generate_weight2(attrs: List[Dict[str, Any]]):
return np.random.random([24, 1, 1]).astype(np.float32) return np.random.random([24, 1, 1]).astype(np.float32)
for batch in [1, 2, 4]: for batch in [1, 4]:
for strides in [[1, 1], [2, 2], [1, 2]]: for strides in [[1, 1], [2, 2], [1, 2]]:
for paddings in [[0, 3], [1, 2, 3, 4]]: for paddings in [[0, 3], [1, 2, 3, 4]]:
for groups in [2, 3]: for groups in [2, 3]:
...@@ -126,32 +124,19 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): ...@@ -126,32 +124,19 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
def sample_predictor_configs( def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float): self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
if attrs[0]['groups'] == 2: input_groups = attrs[0]['groups'] * 3
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_data": [1, 6, 32, 32], "input_data": [1, input_groups, 32, 32],
"output_data": [1, 24, 32, 32] "output_data": [1, 24, 32, 32]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [4, 6, 64, 64], "input_data": [4, input_groups, 64, 64],
"output_data": [4, 24, 64, 64] "output_data": [4, 24, 64, 64]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [1, 6, 64, 64], "input_data": [1, input_groups, 64, 64],
"output_data": [1, 24, 64, 64] "output_data": [1, 24, 64, 64]
} }
else:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 9, 32, 32],
"output_data": [1, 24, 32, 32]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 9, 64, 64],
"output_data": [4, 24, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 9, 64, 64],
"output_data": [1, 24, 64, 64]
}
def clear_dynamic_shape(): def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {} self.dynamic_shape.min_input_shape = {}
......
...@@ -32,7 +32,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest): ...@@ -32,7 +32,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
def generate_weight(): def generate_weight():
return np.random.randn(32).astype(np.float32) return np.random.randn(32).astype(np.float32)
for batch in [1, 2, 4]: for batch in [1, 4]:
for shape in [[32], [batch, 32], [batch, 32, 32], for shape in [[32], [batch, 32], [batch, 32, 32],
[batch, 32, 16, 32]]: [batch, 32, 16, 32]]:
for op_type in ["elementwise_add", "elementwise_mul"]: for op_type in ["elementwise_add", "elementwise_mul"]:
...@@ -72,7 +72,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest): ...@@ -72,7 +72,7 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
# The input.dims[1] must be equal to the weight's length. # The input.dims[1] must be equal to the weight's length.
if self.dims == 1: if self.dims == 1:
self.dynamic_shape.min_input_shape = {"input_data": [4]} self.dynamic_shape.min_input_shape = {"input_data": [4]}
self.dynamic_shape.max_input_shape = {"input_data": [256]} self.dynamic_shape.max_input_shape = {"input_data": [32]}
self.dynamic_shape.opt_input_shape = {"input_data": [16]} self.dynamic_shape.opt_input_shape = {"input_data": [16]}
elif self.dims == 2: elif self.dims == 2:
self.dynamic_shape.min_input_shape = {"input_data": [1, 32]} self.dynamic_shape.min_input_shape = {"input_data": [1, 32]}
...@@ -80,19 +80,17 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest): ...@@ -80,19 +80,17 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
self.dynamic_shape.opt_input_shape = {"input_data": [2, 32]} self.dynamic_shape.opt_input_shape = {"input_data": [2, 32]}
elif self.dims == 3: elif self.dims == 3:
self.dynamic_shape.min_input_shape = {"input_data": [1, 32, 4]} self.dynamic_shape.min_input_shape = {"input_data": [1, 32, 4]}
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 32]}
"input_data": [4, 32, 256] self.dynamic_shape.opt_input_shape = {"input_data": [2, 32, 32]}
}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 32, 16]}
elif self.dims == 4: elif self.dims == 4:
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_data": [1, 32, 4, 4] "input_data": [1, 32, 4, 4]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [4, 32, 128, 256] "input_data": [4, 32, 32, 32]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [2, 32, 32, 16] "input_data": [4, 32, 16, 32]
} }
def clear_dynamic_shape(): def clear_dynamic_shape():
......
...@@ -28,13 +28,13 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest): ...@@ -28,13 +28,13 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest):
def sample_program_configs(self): def sample_program_configs(self):
def generate_input1(dims, attrs: List[Dict[str, Any]]): def generate_input1(dims, attrs: List[Dict[str, Any]]):
if dims == 1: if dims == 1:
return np.ones([64]).astype(np.float32) return np.ones([32]).astype(np.float32)
elif dims == 2: elif dims == 2:
return np.ones([3, 64]).astype(np.float32) return np.ones([3, 32]).astype(np.float32)
elif dims == 3: elif dims == 3:
return np.ones([3, 64, 64]).astype(np.float32) return np.ones([3, 32, 32]).astype(np.float32)
else: else:
return np.ones([1, 3, 64, 64]).astype(np.float32) return np.ones([1, 3, 32, 32]).astype(np.float32)
for dims in [1, 2, 3, 4]: for dims in [1, 2, 3, 4]:
for approximate in [True, False]: for approximate in [True, False]:
...@@ -69,27 +69,25 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest): ...@@ -69,27 +69,25 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
if self.dims == 1: if self.dims == 1:
self.dynamic_shape.min_input_shape = {"input_data": [1]} self.dynamic_shape.min_input_shape = {"input_data": [1]}
self.dynamic_shape.max_input_shape = {"input_data": [128]} self.dynamic_shape.max_input_shape = {"input_data": [64]}
self.dynamic_shape.opt_input_shape = {"input_data": [64]} self.dynamic_shape.opt_input_shape = {"input_data": [32]}
elif self.dims == 2: elif self.dims == 2:
self.dynamic_shape.min_input_shape = {"input_data": [1, 32]} self.dynamic_shape.min_input_shape = {"input_data": [1, 16]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 64]} self.dynamic_shape.max_input_shape = {"input_data": [4, 32]}
self.dynamic_shape.opt_input_shape = {"input_data": [3, 64]} self.dynamic_shape.opt_input_shape = {"input_data": [3, 32]}
elif self.dims == 3: elif self.dims == 3:
self.dynamic_shape.min_input_shape = {"input_data": [1, 32, 32]} self.dynamic_shape.min_input_shape = {"input_data": [1, 16, 16]}
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 32]}
"input_data": [10, 64, 64] self.dynamic_shape.opt_input_shape = {"input_data": [3, 32, 32]}
}
self.dynamic_shape.opt_input_shape = {"input_data": [3, 64, 64]}
else: else:
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32] "input_data": [1, 3, 16, 16]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [4, 3, 64, 64] "input_data": [4, 3, 32, 32]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 64, 64] "input_data": [1, 3, 32, 32]
} }
def clear_dynamic_shape(): def clear_dynamic_shape():
......
...@@ -29,8 +29,8 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest): ...@@ -29,8 +29,8 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest):
def generate_input(shape): def generate_input(shape):
return np.random.random(shape).astype(np.float32) return np.random.random(shape).astype(np.float32)
for batch in [1, 2, 4]: for batch in [1, 4]:
for shape in [[batch, 64], [batch, 32, 64], [batch, 64, 32, 128]]: for shape in [[batch, 32], [batch, 16, 32], [batch, 32, 16, 128]]:
self.input_dim = len(shape) self.input_dim = len(shape)
for slope in [0.1, 0.5]: for slope in [0.1, 0.5]:
for offset in [0.2, 0.7]: for offset in [0.2, 0.7]:
...@@ -63,23 +63,21 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest): ...@@ -63,23 +63,21 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
if self.input_dim == 2: if self.input_dim == 2:
self.dynamic_shape.min_input_shape = {"input_data": [1, 8]} self.dynamic_shape.min_input_shape = {"input_data": [1, 8]}
self.dynamic_shape.max_input_shape = {"input_data": [64, 128]} self.dynamic_shape.max_input_shape = {"input_data": [4, 32]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 16]} self.dynamic_shape.opt_input_shape = {"input_data": [2, 16]}
elif self.input_dim == 3: elif self.input_dim == 3:
self.dynamic_shape.min_input_shape = {"input_data": [1, 8, 8]} self.dynamic_shape.min_input_shape = {"input_data": [1, 8, 8]}
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {"input_data": [4, 16, 32]}
"input_data": [64, 128, 256] self.dynamic_shape.opt_input_shape = {"input_data": [4, 16, 32]}
}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 16, 64]}
elif self.input_dim == 4: elif self.input_dim == 4:
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_data": [1, 8, 8, 4] "input_data": [1, 8, 8, 4]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [64, 128, 256, 512] "input_data": [4, 32, 16, 128]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [2, 16, 64, 128] "input_data": [4, 32, 16, 128]
} }
def clear_dynamic_shape(): def clear_dynamic_shape():
......
...@@ -37,7 +37,7 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest): ...@@ -37,7 +37,7 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest):
def sample_program_configs(self): def sample_program_configs(self):
def generate_input1(attrs: List[Dict[str, Any]]): def generate_input1(attrs: List[Dict[str, Any]]):
return np.ones([1, 3, 64, 64]).astype(np.float32) return np.ones([1, 3, 32, 32]).astype(np.float32)
for threshold in [6.0, 7.0, 100.0, 0.0, -1.0]: for threshold in [6.0, 7.0, 100.0, 0.0, -1.0]:
for scale in [5.0, 6.0, 7.0, -1.0, 0.0, 100.0]: for scale in [5.0, 6.0, 7.0, -1.0, 0.0, 100.0]:
...@@ -74,9 +74,9 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest): ...@@ -74,9 +74,9 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest):
def sample_predictor_configs( def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float): self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 16, 16]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} self.dynamic_shape.max_input_shape = {"input_data": [2, 3, 32, 32]}
self.dynamic_shape.opt_input_shape = {"input_data": [1, 3, 64, 64]} self.dynamic_shape.opt_input_shape = {"input_data": [1, 3, 32, 32]}
def clear_dynamic_shape(): def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {} self.dynamic_shape.min_input_shape = {}
......
...@@ -136,7 +136,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): ...@@ -136,7 +136,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest):
"input_data": [1, 1], "input_data": [1, 1],
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [4, 64], "input_data": [4, 32],
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [2, 3], "input_data": [2, 3],
...@@ -146,7 +146,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): ...@@ -146,7 +146,7 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest):
"input_data": [1, 1, 1, 1], "input_data": [1, 1, 1, 1],
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [4, 64, 128, 128], "input_data": [4, 3, 16, 32],
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [2, 3, 16, 32], "input_data": [2, 3, 16, 32],
...@@ -156,10 +156,10 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): ...@@ -156,10 +156,10 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest):
"input_data": [1, 1, 1], "input_data": [1, 1, 1],
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_data": [4, 64, 256], "input_data": [4, 3, 32],
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_data": [2, 3, 128], "input_data": [2, 3, 16],
} }
def clear_dynamic_shape(): def clear_dynamic_shape():
......
...@@ -94,14 +94,14 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest): ...@@ -94,14 +94,14 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest):
"scale_input": [1, 3, 24, 24] "scale_input": [1, 3, 24, 24]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"scale_input": [9, 3, 48, 48] "scale_input": [4, 3, 24, 24]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"scale_input": [1, 3, 48, 24] "scale_input": [1, 3, 24, 24]
} }
elif self.dims == 3: elif self.dims == 3:
self.dynamic_shape.min_input_shape = {"scale_input": [1, 3, 24]} self.dynamic_shape.min_input_shape = {"scale_input": [1, 3, 24]}
self.dynamic_shape.max_input_shape = {"scale_input": [9, 6, 48]} self.dynamic_shape.max_input_shape = {"scale_input": [4, 3, 24]}
self.dynamic_shape.opt_input_shape = {"scale_input": [1, 3, 24]} self.dynamic_shape.opt_input_shape = {"scale_input": [1, 3, 24]}
elif self.dims == 2: elif self.dims == 2:
self.dynamic_shape.min_input_shape = {"scale_input": [1, 24]} self.dynamic_shape.min_input_shape = {"scale_input": [1, 24]}
......
...@@ -69,7 +69,7 @@ class TrtConvertStackTest(TrtLayerAutoScanTest): ...@@ -69,7 +69,7 @@ class TrtConvertStackTest(TrtLayerAutoScanTest):
return np.ones([24]).astype(np.float32) return np.ones([24]).astype(np.float32)
for dims in [1, 2, 3, 4]: for dims in [1, 2, 3, 4]:
for batch in [1, 2, 4]: for batch in [1, 4]:
for axis in [-2, -1, 0, 1, 2, 3]: for axis in [-2, -1, 0, 1, 2, 3]:
self.dims = dims self.dims = dims
dics = [{"axis": axis}, {}] dics = [{"axis": axis}, {}]
......
...@@ -37,7 +37,7 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest): ...@@ -37,7 +37,7 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest):
def generate_input2(attrs: List[Dict[str, Any]], batch): def generate_input2(attrs: List[Dict[str, Any]], batch):
return np.random.random([batch, 2]).astype(np.int32) return np.random.random([batch, 2]).astype(np.int32)
for batch in [1, 2, 4]: for batch in [1, 4]:
for class_num in [80, 30]: for class_num in [80, 30]:
for anchors in [[10, 13, 16, 30, 33, 23]]: for anchors in [[10, 13, 16, 30, 33, 23]]:
for downsample_ratio in [32, 16]: for downsample_ratio in [32, 16]:
...@@ -97,24 +97,24 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest): ...@@ -97,24 +97,24 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest):
if attrs[0]['iou_aware'] == True: if attrs[0]['iou_aware'] == True:
channel = 3 * (attrs[0]['class_num'] + 6) channel = 3 * (attrs[0]['class_num'] + 6)
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"scale_input": [1, channel, 24, 24] "scale_input": [1, channel, 12, 12]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"scale_input": [4, channel, 48, 48] "scale_input": [4, channel, 24, 24]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"scale_input": [1, channel, 24, 48] "scale_input": [1, channel, 24, 24]
} }
else: else:
channel = 3 * (attrs[0]['class_num'] + 5) channel = 3 * (attrs[0]['class_num'] + 5)
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"scale_input": [1, channel, 24, 24] "scale_input": [1, channel, 12, 12]
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"scale_input": [4, channel, 48, 48] "scale_input": [4, channel, 24, 24]
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"scale_input": [1, channel, 24, 48] "scale_input": [1, channel, 24, 24]
} }
def clear_dynamic_shape(): def clear_dynamic_shape():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册