未验证 提交 7096da0c 编写于 作者: Z zhenlin 提交者: GitHub

add_conv_bn_fuse_pass_test (#37975)

* add_conv_bn_fuse_pass_test

* add trt NHWC bug into invalid program for avoiding error crash

* add trt NHWC bug into invalid program for avoiding error crash

* add trt NHWC bug into invalid program for avoiding error crash

* fix trt pass target optype bug based on has_bias value

* fix trt pass target optype bug based on has_bias value
上级 33fbb66e
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -12,165 +12,199 @@ ...@@ -12,165 +12,199 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest from auto_scan_test import PassAutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np import numpy as np
from inference_pass_test import InferencePassTest import paddle.inference as paddle_infer
import paddle.fluid as fluid from functools import partial
import paddle.fluid.core as core from typing import Optional, List, Callable, Dict, Any, Set
from paddle.fluid.core import PassVersionChecker import unittest
import hypothesis
class ConvBnFusePassExplicitPaddingTest(InferencePassTest): from hypothesis import given, settings, seed, example, assume, reproduce_failure
def setUp(self): import hypothesis.strategies as st
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32") class TestConvBnFusePass(PassAutoScanTest):
conv_out = fluid.layers.conv2d( def is_program_valid(self, program_config: ProgramConfig) -> bool:
input=data, attrs = [
num_filters=6, program_config.ops[i].attrs
filter_size=6, for i in range(len(program_config.ops))
groups=3, ]
padding=[1, 1, 1, 1], # mainly for TRT, which is invalid for current pass test framework!!
bias_attr=False, if attrs[0]['data_format'] == "NHWC":
act=None) return False
bn_out = fluid.layers.batch_norm(conv_out, is_test=True)
return True
self.feeds = {
"data": np.random.random([1, 3, 64, 64]).astype("float32"), def sample_program_config(self, draw):
} padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VALID"]))
self.fetch_list = [bn_out] groups = draw(st.integers(min_value=1, max_value=3))
data_format = draw(st.sampled_from(["NCHW", "NHWC"]))
def test_check_output(self): axis = draw(st.sampled_from([1]))
self.check_output() filter_channel = draw(st.integers(min_value=1, max_value=16)) * 4
self.assertTrue(PassVersionChecker.IsCompatible('conv_bn_fuse_pass')) filter_size = draw(st.integers(min_value=1, max_value=4))
in_channel = groups * filter_channel
out_channel_factor = draw(st.integers(min_value=1, max_value=16)) * 4
class ConvBnFusePassValidPaddingTest(InferencePassTest): out_channel = groups * out_channel_factor
def setUp(self): batch_size = draw(st.integers(min_value=1, max_value=4))
with fluid.program_guard(self.main_program, self.startup_program): dilations = draw(
data = fluid.data( st.lists(
name="data", shape=[-1, 3, 64, 64], dtype="float32") st.integers(
conv_out = fluid.layers.conv2d( min_value=1, max_value=2), min_size=2, max_size=2))
input=data, paddings = draw(
num_filters=6, st.lists(
filter_size=6, st.integers(
groups=3, min_value=0, max_value=2), min_size=2, max_size=2))
padding='VALID', strides = draw(
bias_attr=False, st.lists(
act=None) st.integers(
bn_out = fluid.layers.batch_norm(conv_out, is_test=True) min_value=1, max_value=2), min_size=2, max_size=2))
has_bias = draw(st.booleans())
self.feeds = { use_mkldnn = draw(st.booleans())
"data": np.random.random([1, 3, 64, 64]).astype("float32"), epsilon = draw(st.floats(min_value=0.0, max_value=0.001))
}
self.fetch_list = [bn_out] x_shape = [
batch_size, in_channel, 64, 64
def test_check_output(self): ] if data_format == "NCHW" else [batch_size, 64, 64, in_channel]
self.check_output() w_shape = [out_channel, filter_channel, filter_size, filter_size]
self.assertTrue(PassVersionChecker.IsCompatible('conv_bn_fuse_pass')) scale_shape = [out_channel]
bias_shape = [out_channel]
var_shape = [out_channel]
class ConvBnFusePassSamePaddingTest(InferencePassTest): mean_shape = [out_channel]
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program): def generate_conv2d_Input():
data = fluid.data( return np.random.random(x_shape).astype(np.float32)
name="data", shape=[-1, 3, 64, 64], dtype="float32")
conv_out = fluid.layers.conv2d( def generate_conv2d_Filter():
input=data, return np.random.random(w_shape).astype(np.float32)
num_filters=6,
filter_size=6, def generate_conv2d_Bias():
groups=3, return np.random.random(bias_shape).astype(np.float32)
padding='SAME',
bias_attr=False, def generate_bn_Scale():
act=None) return np.random.random(scale_shape).astype(np.float32)
bn_out = fluid.layers.batch_norm(conv_out, is_test=True)
def generate_bn_Bias():
self.feeds = { return np.random.random(bias_shape).astype(np.float32)
"data": np.random.random([1, 3, 64, 64]).astype("float32"),
} def generate_bn_Mean():
self.fetch_list = [bn_out] return np.random.random(mean_shape).astype(np.float32)
def test_check_output(self): def generate_bn_Var():
self.check_output() return np.random.random(var_shape).astype(np.float32)
self.assertTrue(PassVersionChecker.IsCompatible('conv_bn_fuse_pass'))
conv2d_op = OpConfig(
"conv2d",
class ConvEltwiseAddBnFuseExplicitPaddingPass(InferencePassTest): inputs={
def setUp(self): "Input": ["conv2d_input"],
with fluid.program_guard(self.main_program, self.startup_program): "Filter": ["conv2d_weight"],
data = fluid.data( },
name="data", shape=[-1, 3, 64, 64], dtype="float32") outputs={"Output": ["conv2d_out"]},
conv_out = fluid.layers.conv2d( data_format=data_format,
input=data, dilations=dilations,
num_filters=6, padding_algorithm=padding_algorithm,
filter_size=6, groups=groups,
groups=3, paddings=paddings,
padding=[1, 1, 1, 1], strides=strides,
bias_attr=None, use_mkldnn=use_mkldnn,
act=None) has_bias=has_bias,
bn_out = fluid.layers.batch_norm(conv_out, is_test=True) is_test=True)
bn_op = OpConfig(
self.feeds = { "batch_norm",
"data": np.random.random([1, 3, 64, 64]).astype("float32"), inputs={
} "X": ["conv2d_out"],
self.fetch_list = [bn_out] "Scale": ["batch_norm_Scale"],
"Bias": ["batch_norm_Bias"],
def test_check_output(self): "Mean": ["batch_norm_Mean"],
self.check_output() "Variance": ["batch_norm_Variance"],
self.assertTrue( },
PassVersionChecker.IsCompatible('conv_eltwiseadd_bn_fuse_pass')) outputs={
"Y": ["batch_norm_Y"],
"MeanOut": ["batch_norm_Mean"],
class ConvEltwiseAddBnFuseValidPaddingPass(InferencePassTest): "VarianceOut": ["batch_norm_Variance"],
def setUp(self): "SavedMean": ["batch_norm_SavedMean"],
with fluid.program_guard(self.main_program, self.startup_program): "SavedVariance": ["batch_norm_SavedVariance"],
data = fluid.data( "ReserveSpace": ["batch_norm_ReserveSpace"],
name="data", shape=[-1, 3, 64, 64], dtype="float32") },
conv_out = fluid.layers.conv2d( epsilon=epsilon,
input=data, trainable_statistics=False,
num_filters=6, data_layout=data_format,
filter_size=6, is_test=True)
groups=3, if has_bias == True:
padding='VALID', conv2d_op.inputs["Bias"] = ["conv2d_bias"]
bias_attr=None, ops = [conv2d_op, bn_op]
act=None)
bn_out = fluid.layers.batch_norm(conv_out, is_test=True) program_config = ProgramConfig(
ops=ops,
self.feeds = { inputs={
"data": np.random.random([1, 3, 64, 64]).astype("float32"), "conv2d_input":
} TensorConfig(data_gen=partial(generate_conv2d_Input)),
self.fetch_list = [bn_out] },
weights={
def test_check_output(self): "conv2d_weight":
self.check_output() TensorConfig(data_gen=partial(generate_conv2d_Filter)),
self.assertTrue( "batch_norm_Scale": TensorConfig(data_gen=generate_bn_Scale),
PassVersionChecker.IsCompatible('conv_eltwiseadd_bn_fuse_pass')) "batch_norm_Bias": TensorConfig(data_gen=generate_bn_Bias),
"batch_norm_Mean": TensorConfig(data_gen=generate_bn_Mean),
"batch_norm_Variance": TensorConfig(data_gen=generate_bn_Var),
class ConvEltwiseAddBnFuseSamePaddingPass(InferencePassTest): },
def setUp(self): outputs=["batch_norm_Y"])
with fluid.program_guard(self.main_program, self.startup_program): if has_bias == True:
data = fluid.data( program_config.weights["conv2d_bias"] = TensorConfig(
name="data", shape=[-1, 3, 64, 64], dtype="float32") data_gen=partial(generate_conv2d_Bias))
conv_out = fluid.layers.conv2d( return program_config
input=data,
num_filters=6, def sample_predictor_configs(self, program_config):
filter_size=6, # for mkldnn
groups=3, if program_config.ops[0].attrs['use_mkldnn']:
padding='SAME', config = self.create_inference_config()
bias_attr=None, config.enable_mkldnn()
act=None) yield config, ['conv2d'], (1e-5, 1e-5)
bn_out = fluid.layers.batch_norm(conv_out, is_test=True) else:
config = self.create_inference_config()
self.feeds = { yield config, ['conv2d', 'elementwise_add'], (1e-5, 1e-5)
"data": np.random.random([1, 3, 64, 64]).astype("float32"),
} config = self.create_inference_config(use_gpu=True)
self.fetch_list = [bn_out] yield config, ['conv2d', 'elementwise_add'], (1e-5, 1e-5)
def test_check_output(self): config = self.create_trt_inference_config()
self.check_output() config.enable_tensorrt_engine(
self.assertTrue( workspace_size=1 << 20,
PassVersionChecker.IsCompatible('conv_eltwiseadd_bn_fuse_pass')) max_batch_size=4,
min_subgraph_size=1,
precision_mode=paddle_infer.PrecisionType.Float32,
use_static=False,
use_calib_mode=False)
if program_config.ops[0].attrs['has_bias']:
yield config, ['conv2d', 'elementwise_add'], (1e-5, 1e-5)
else: # it will enter conv_elementwise_add_fuse_pass
yield config, ['conv2d_fusion'], (1e-5, 1e-5)
def add_ignore_pass_case(self):
def teller1(program_config, predictor_config):
if program_config.ops[0].attrs['data_format'] == "NHWC":
return True
return False
# mkldnn Output has diff with bias!
def teller2(program_config, predictor_config):
return predictor_config.mkldnn_enabled() and program_config.ops[
0].attrs['has_bias'] == True
self.add_ignore_check_case(
teller1, IgnoreReasons.PASS_ACCURACY_ERROR,
"The output format of conv2d is wrong when data_format attribute is NHWC"
)
self.add_ignore_check_case(
teller2, IgnoreReasons.PASS_ACCURACY_ERROR,
"Currently mkldnn Output has diff with bias!")
def test(self):
self.run_and_statis(
quant=False,
passes=["conv_bn_fuse_pass"], )
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册