diff --git a/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.cc b/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.cc index 7612df9ab915011001b57b37e4fd559d393302a7..3f88a460d140f6d7389194a29e37128f5ba5b458 100644 --- a/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.cc @@ -19,6 +19,7 @@ #include #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace framework { @@ -334,3 +335,8 @@ void EmbeddingEltwiseLayerNormFusePass::ApplyImpl(Graph* graph) const { REGISTER_PASS(embedding_eltwise_layernorm_fuse_pass, paddle::framework::ir::EmbeddingEltwiseLayerNormFusePass); +REGISTER_PASS_CAPABILITY(embedding_eltwise_layernorm_fuse_pass) + .AddCombination( + paddle::framework::compatible::OpVersionComparatorCombination() + .EQ("lookup_table", 0) + .EQ("elementweise_add", 0)); diff --git a/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass_tester.cc b/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass_tester.cc index 71c9dbae1a46af1ecae0aaff3fde52de8142d4bb..727e42629f9fab9183668ae0cc84ae54eb01982c 100644 --- a/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass_tester.cc @@ -16,12 +16,13 @@ limitations under the License. */ #include #include "paddle/fluid/framework/ir/pass_tester_helper.h" +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace framework { namespace ir { -TEST(SkipLayerNormFusePass, basic) { +TEST(EmbeddingElewiseLayernormFusePass, basic) { // inputs operator output // -------------------------------------------------------------------- // (x, y) elementwise_add -> elementwise_out @@ -91,6 +92,12 @@ TEST(SkipLayerNormFusePass, basic) { "The number of fusion nodes does not meet expectations after fuse")); } +TEST(EmbeddingElewiseLayernormFusePass, pass_op_version_check) { + ASSERT_TRUE( + paddle::framework::compatible::PassVersionCheckerRegistrar::GetInstance() + .IsPassCompatible("embedding_eltwise_layernorm_fuse_pass")); +} + } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc index 82e0af3c198750296032769f2f3b04658871adb7..f7a8e3e3f6c3c77e978c57eeb7515d8cfce86471 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc @@ -17,6 +17,7 @@ #include #include #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { @@ -84,6 +85,19 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const { VLOG(3) << "do not perform " + type() + "+bias fuse"; return; } + if (conv->Op()->HasAttr("dilations")) { + auto dilations = + BOOST_GET_CONST(std::vector, conv->Op()->GetAttr("dilations")); + for (const auto& d : dilations) { + if (d != 1) { + LOG(WARNING) + << "dilation conv not supported in MKLDNN, fuse not apply " + << "and set conv attribute use_mkldnn = false"; + conv->Op()->SetAttr("use_mkldnn", false); + return; + } + } + } auto* eltwise_bias_tensor = scope->FindVar(eltwise_bias->Name())->GetMutable(); @@ -151,3 +165,8 @@ REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass, paddle::framework::ir::Conv2DTransposeBiasFusePass); REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass, paddle::framework::ir::Conv3DBiasFusePass); +REGISTER_PASS_CAPABILITY(conv_bias_mkldnn_fuse_pass) + .AddCombination( + paddle::framework::compatible::OpVersionComparatorCombination() + .EQ("conv2d", 0) + .EQ("elementwise_add", 0)); diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc index 88aac001a93ae836d62fe3bf3fc502960eebe70f..455350d2f703c52a9ef3e5714a60573408310080 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc @@ -18,6 +18,7 @@ #include "paddle/fluid/platform/place.h" #include "paddle/fluid/framework/op_proto_maker.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/imperative/type_defs.h" namespace paddle { @@ -149,6 +150,12 @@ TEST(ConvBiasFusePass, conv2d_transpose) { ASSERT_EQ(pass.type(), std::string("conv2d_transpose")); } +TEST(ConvBiasFusePass, pass_op_version_check) { + ASSERT_TRUE( + paddle::framework::compatible::PassVersionCheckerRegistrar::GetInstance() + .IsPassCompatible("conv_bias_mkldnn_fuse_pass")); +} + } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc index af2b1308e084ee937f26bf90caf2df6fb44e044b..2fb131aceaad28a365e8202dca35cfe53f8f54da 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -19,6 +19,7 @@ #include #include #include "paddle/fluid/framework/ir/graph_traits.h" +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace framework { @@ -341,3 +342,8 @@ void ResidualConnectionMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { REGISTER_PASS(conv_elementwise_add_mkldnn_fuse_pass, paddle::framework::ir::ResidualConnectionMKLDNNFusePass); +REGISTER_PASS_CAPABILITY(conv_elementwise_add_mkldnn_fuse_pass) + .AddCombination( + paddle::framework::compatible::OpVersionComparatorCombination() + .EQ("conv2d", 0) + .EQ("elementwise_add", 0)); diff --git a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index 8a13596cd50087475bf12b6cfa5920b82e24de31..fd4910fc8e95cd98fe9feaba51e70d5a143ad443 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -17,6 +17,7 @@ #include "paddle/fluid/framework/ir/graph_traits.h" #include "paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.h" +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace framework { @@ -267,6 +268,12 @@ TEST(ConvElementwiseAddMKLDNNFusePass, NoFusion) { AssertOpsCount(graph, 2, 1); } +TEST(ConvElementwiseAddMKLDNNFusePass, pass_op_version_check) { + ASSERT_TRUE( + paddle::framework::compatible::PassVersionCheckerRegistrar::GetInstance() + .IsPassCompatible("conv_elementwise_add_mkldnn_fuse_pass")); +} + } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc index c5965701a53d4312d89f1e09f17840b09f1bd5f5..df5ba3314e637fefe930d4c45f431314dd7d8493 100644 --- a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h" #include "paddle/fluid/framework/ir/graph_pattern_detector.h" +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace framework { @@ -57,3 +58,7 @@ void DepthwiseConvMKLDNNPass::ApplyImpl(ir::Graph* graph) const { REGISTER_PASS(depthwise_conv_mkldnn_pass, paddle::framework::ir::DepthwiseConvMKLDNNPass); +REGISTER_PASS_CAPABILITY(depthwise_conv_mkldnn_pass) + .AddCombination( + paddle::framework::compatible::OpVersionComparatorCombination().EQ( + "depthwise_conv2d", 0)); diff --git a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass_tester.cc index a37565236cd440bd803184d038ad4deb3c0b6150..c6c72ba33d6295d90c502ab88d7d712d76a11aad 100644 --- a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass_tester.cc @@ -16,6 +16,8 @@ #include +#include "paddle/fluid/framework/op_version_registry.h" + namespace paddle { namespace framework { namespace ir { @@ -70,6 +72,12 @@ ProgramDesc BuildProgramDesc() { return prog; } +TEST(DepthwiseConvMKLDNNPass, pass_op_version_check) { + ASSERT_TRUE( + paddle::framework::compatible::PassVersionCheckerRegistrar::GetInstance() + .IsPassCompatible("depthwise_conv_mkldnn_pass")); +} + TEST(DepthwiseConvMKLDNNPass, basic) { auto prog = BuildProgramDesc(); diff --git a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc index 198107ea082dc86d9e65a926bf9befe2fc4abfa4..9d2b4ebaf8ccf33e175e46c08657e7eeed467055 100644 --- a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc +++ b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc @@ -19,6 +19,7 @@ #include #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/platform/errors.h" namespace paddle { @@ -707,3 +708,13 @@ REGISTER_PASS(multihead_matmul_fuse_pass, REGISTER_PASS(multihead_matmul_fuse_pass_v2, paddle::framework::ir::MultiHeadMatmulV2FusePass); +REGISTER_PASS_CAPABILITY(multihead_matmul_fuse_pass_v2) + .AddCombination( + paddle::framework::compatible::OpVersionComparatorCombination() + .EQ("mul", 0) + .EQ("elementwise_add", 0) + .EQ("reshape2", 0) + .EQ("transpose2", 0) + .EQ("scale", 0) + .EQ("matmul", 0) + .EQ("softmax", 0)); diff --git a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass_tester.cc b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass_tester.cc index d8a06b037bdefbe8776c9b95b36be80afb988393..2eda643d4e53aa061908f02c9d31b765241c318b 100644 --- a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass_tester.cc @@ -12,6 +12,7 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/multihead_matmul_fuse_pass.h" // NOLINT #include #include "paddle/fluid/framework/ir/pass_tester_helper.h" +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace framework { @@ -133,6 +134,12 @@ TEST(MultiHeadMatmulFusePass, basic) { num_fused_nodes_after)); } +TEST(MultiHeadMatmulFusePass, pass_op_version_check) { + ASSERT_TRUE( + paddle::framework::compatible::PassVersionCheckerRegistrar::GetInstance() + .IsPassCompatible("multihead_matmul_fuse_pass_v2")); +} + } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc b/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc index 9dddc9154f8fc39144b38535824999b933a92106..2e3cd16d5ce49fdd6186f98c72d77c75c4053559 100644 --- a/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/ir/graph_pattern_detector.h" +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace framework { @@ -180,3 +181,8 @@ void SkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const { REGISTER_PASS(skip_layernorm_fuse_pass, paddle::framework::ir::SkipLayerNormFusePass); +REGISTER_PASS_CAPABILITY(skip_layernorm_fuse_pass) + .AddCombination( + paddle::framework::compatible::OpVersionComparatorCombination() + .EQ("elementwise_add", 0) + .EQ("layer_norm", 0)); diff --git a/paddle/fluid/framework/ir/skip_layernorm_fuse_pass_tester.cc b/paddle/fluid/framework/ir/skip_layernorm_fuse_pass_tester.cc index d2d7469872857a070294520a589fee4ca383f065..eff5dcddf54ee49be5b14a7bdfa609079f925036 100644 --- a/paddle/fluid/framework/ir/skip_layernorm_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/skip_layernorm_fuse_pass_tester.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/ir/pass_tester_helper.h" +#include "paddle/fluid/framework/op_version_registry.h" namespace paddle { namespace framework { @@ -54,6 +55,12 @@ TEST(SkipLayerNormFusePass, basic) { "The number of fusion nodes does not meet expectations after fuse")); } +TEST(SkipLayerNormFusePass, pass_op_version_check) { + ASSERT_TRUE( + paddle::framework::compatible::PassVersionCheckerRegistrar::GetInstance() + .IsPassCompatible("skip_layernorm_fuse_pass")); +} + } // namespace ir } // namespace framework } // namespace paddle diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..5eb397b5a95b240dcaff9dee3758646b35ab5022 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py @@ -0,0 +1,171 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from inference_pass_test import InferencePassTest +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid.core import AnalysisConfig +"""Test for fusion of conv and bias.""" + + +#padding SAME +class ConvBiasMkldnnFusePassTest(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32") + param_attr = fluid.ParamAttr( + initializer=fluid.initializer.Xavier(uniform=False), + learning_rate=0.001) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + padding="SAME", + bias_attr=param_attr) + + self.feeds = { + "data": np.random.random((1, 3, 100, 100)).astype("float32") + } + self.fetch_list = [conv_out] + self.enable_mkldnn = True + + def test_check_output(self): + use_gpu = False + self.check_output_with_option(use_gpu) + + +#padding VALID +class ConvBiasMkldnnFusePassTest1(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32") + param_attr = fluid.ParamAttr( + initializer=fluid.initializer.Xavier(uniform=False), + learning_rate=0.001) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + padding="VALID", + bias_attr=param_attr) + + self.feeds = { + "data": np.random.random((1, 3, 100, 100)).astype("float32") + } + self.fetch_list = [conv_out] + self.enable_mkldnn = True + + def test_check_output(self): + use_gpu = False + self.check_output_with_option(use_gpu) + + +#padding number +class ConvBiasMkldnnFusePassTest2(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32") + param_attr = fluid.ParamAttr( + initializer=fluid.initializer.Xavier(uniform=False), + learning_rate=0.001) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + padding=[2, 4, 6, 8], + bias_attr=param_attr) + + self.feeds = { + "data": np.random.random((1, 3, 100, 100)).astype("float32") + } + self.fetch_list = [conv_out] + self.enable_mkldnn = True + + def test_check_output(self): + use_gpu = False + self.check_output_with_option(use_gpu) + + +#dilation not supported yet, just print warning log and does not fuse +class ConvBiasMkldnnFusePassTest3(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32") + param_attr = fluid.ParamAttr( + initializer=fluid.initializer.Xavier(uniform=False), + learning_rate=0.001) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + padding="VALID", + dilation=2, + groups=3, + bias_attr=param_attr, + use_cudnn=False, + act="softmax", + data_format="NCHW") + + self.feeds = { + "data": np.random.random((1, 3, 100, 100)).astype("float32") + } + self.fetch_list = [conv_out] + self.enable_mkldnn = True + + def test_check_output(self): + use_gpu = False + self.check_output_with_option(use_gpu) + + +#all conv params except for dilation +class ConvBiasMkldnnFusePassTest4(InferencePassTest): + def setUp(self): + with fluid.program_guard(self.main_program, self.startup_program): + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32") + param_attr = fluid.ParamAttr( + initializer=fluid.initializer.Xavier(uniform=False), + learning_rate=0.001) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + padding="VALID", + groups=3, + bias_attr=param_attr, + use_cudnn=False, + act="softmax", + data_format="NCHW") + + self.feeds = { + "data": np.random.random((1, 3, 100, 100)).astype("float32") + } + self.fetch_list = [conv_out] + self.enable_mkldnn = True + + def test_check_output(self): + use_gpu = False + self.check_output_with_option(use_gpu) + + +if __name__ == "__main__": + unittest.main()