未验证 提交 1a14d011 编写于 作者: Z zlsh80826 提交者: GitHub

Reduce squeeze2_matmul_fuse_pass, flattent tests time (#47098)

* Add missing fp32 config and reduce the testing combination

* Reduce trt matmul pass test max examples
上级 be273ea9
......@@ -31,7 +31,7 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest):
def generate_input(batch):
return np.random.random([batch, 32]).astype(np.float32)
for batch in [1, 2, 4]:
for batch in [1, 4]:
for axis in [0, 1]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
......@@ -128,7 +128,7 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest):
def generate_input(batch):
return np.random.random([batch, 32, 64]).astype(np.float32)
for batch in [1, 2, 4]:
for batch in [1, 4]:
for axis in [0, 1, 2]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
......@@ -166,8 +166,8 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 8, 8]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 64, 768]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 32, 256]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 64]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 32, 64]}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
......@@ -226,7 +226,7 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
def generate_input(batch):
return np.random.random([batch, 8, 8, 8]).astype(np.float32)
for batch in [1, 2, 4]:
for batch in [1, 4]:
for axis in [0, 1, 2, 3]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
......@@ -264,7 +264,7 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 4, 4, 4]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 64, 64]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 32, 32]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 16, 16, 8]}
def clear_dynamic_shape():
......@@ -294,6 +294,7 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
......@@ -323,7 +324,7 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
def generate_input(batch):
return np.random.random([batch, 8, 8, 8]).astype(np.float32)
for batch in [1, 2, 4]:
for batch in [1, 4]:
for axis in [0, 1, 2, 3, 4]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
......@@ -361,7 +362,7 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 4, 4, 4]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 64, 64]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 16, 16, 8]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 16, 16, 8]}
def clear_dynamic_shape():
......@@ -391,6 +392,7 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
......
......@@ -153,7 +153,7 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest):
def test(self):
self.run_and_statis(quant=False,
max_examples=50,
max_examples=25,
passes=["trt_squeeze2_matmul_fuse_pass"])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册