diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc index dfb030a7cc76875e2106f78e8a5a89e1784da65c..76c6ca24aaaf062dad71ff3d39aef77d74582ba3 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc @@ -147,12 +147,19 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const { } // namespace paddle REGISTER_PASS(conv_bias_mkldnn_fuse_pass, paddle::framework::ir::ConvBiasFusePass); -REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass, - paddle::framework::ir::Conv2DTransposeBiasFusePass); -REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass, - paddle::framework::ir::Conv3DBiasFusePass); REGISTER_PASS_CAPABILITY(conv_bias_mkldnn_fuse_pass) .AddCombination( paddle::framework::compatible::OpVersionComparatorCombination() .EQ("conv2d", 0) .EQ("elementwise_add", 0)); + +REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass, + paddle::framework::ir::Conv2DTransposeBiasFusePass); +REGISTER_PASS_CAPABILITY(conv_transpose_bias_mkldnn_fuse_pass) + .AddCombination( + paddle::framework::compatible::OpVersionComparatorCombination() + .EQ("conv2d_transpose", 0) + .EQ("elementwise_add", 0)); + +REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass, + paddle::framework::ir::Conv3DBiasFusePass); diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py index 5eb397b5a95b240dcaff9dee3758646b35ab5022..6c8b9d4d3a8792e5f7251a1868408f52933cd269 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py @@ -20,11 +20,11 @@ from inference_pass_test import InferencePassTest import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.core import AnalysisConfig -"""Test for fusion of conv and bias.""" +from paddle.fluid.core import PassVersionChecker #padding SAME -class ConvBiasMkldnnFusePassTest(InferencePassTest): +class ConvBiasMkldnnFusePassSamePadTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( @@ -48,10 +48,12 @@ class ConvBiasMkldnnFusePassTest(InferencePassTest): def test_check_output(self): use_gpu = False self.check_output_with_option(use_gpu) + self.assertTrue( + PassVersionChecker.IsCompatible("conv_bias_mkldnn_fuse_pass")) #padding VALID -class ConvBiasMkldnnFusePassTest1(InferencePassTest): +class ConvBiasMkldnnFusePassValidPadTest(ConvBiasMkldnnFusePassSamePadTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( @@ -72,13 +74,9 @@ class ConvBiasMkldnnFusePassTest1(InferencePassTest): self.fetch_list = [conv_out] self.enable_mkldnn = True - def test_check_output(self): - use_gpu = False - self.check_output_with_option(use_gpu) - -#padding number -class ConvBiasMkldnnFusePassTest2(InferencePassTest): +#padding EXPLICT NUMBER +class ConvBiasMkldnnFusePassExplictPadTest(ConvBiasMkldnnFusePassSamePadTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( @@ -99,13 +97,8 @@ class ConvBiasMkldnnFusePassTest2(InferencePassTest): self.fetch_list = [conv_out] self.enable_mkldnn = True - def test_check_output(self): - use_gpu = False - self.check_output_with_option(use_gpu) - -#dilation not supported yet, just print warning log and does not fuse -class ConvBiasMkldnnFusePassTest3(InferencePassTest): +class ConvBiasMkldnnFusePassGroupTest(ConvBiasMkldnnFusePassSamePadTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( @@ -118,7 +111,6 @@ class ConvBiasMkldnnFusePassTest3(InferencePassTest): num_filters=3, filter_size=3, padding="VALID", - dilation=2, groups=3, bias_attr=param_attr, use_cudnn=False, @@ -131,13 +123,9 @@ class ConvBiasMkldnnFusePassTest3(InferencePassTest): self.fetch_list = [conv_out] self.enable_mkldnn = True - def test_check_output(self): - use_gpu = False - self.check_output_with_option(use_gpu) - -#all conv params except for dilation -class ConvBiasMkldnnFusePassTest4(InferencePassTest): +class ConvBiasMkldnnFusePassDialtionsGroupsTest( + ConvBiasMkldnnFusePassSamePadTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( @@ -150,6 +138,7 @@ class ConvBiasMkldnnFusePassTest4(InferencePassTest): num_filters=3, filter_size=3, padding="VALID", + dilation=2, groups=3, bias_attr=param_attr, use_cudnn=False, @@ -162,9 +151,33 @@ class ConvBiasMkldnnFusePassTest4(InferencePassTest): self.fetch_list = [conv_out] self.enable_mkldnn = True + +class ConvTransposeMkldnnFusePassDialtionsGroupsTest(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data = fluid.data(name="data", shape=[-1, 3, 5, 5], dtype="float32") + param_attr = fluid.ParamAttr( + initializer=fluid.initializer.Xavier(uniform=False), + learning_rate=0.001) + conv_out = fluid.layers.conv2d_transpose( + input=data, + num_filters=3, + filter_size=3, + padding="SAME", + dilation=1, + bias_attr=param_attr, + use_cudnn=False) + + self.feeds = {"data": np.random.random((1, 3, 5, 5)).astype("float32")} + self.fetch_list = [conv_out] + self.enable_mkldnn = True + def test_check_output(self): use_gpu = False self.check_output_with_option(use_gpu) + self.assertTrue( + PassVersionChecker.IsCompatible( + "conv_transpose_bias_mkldnn_fuse_pass")) if __name__ == "__main__":