未验证 提交 c5c13473 编写于 作者: C cc 提交者: GitHub

Add compatibility check for four mkldnn pass (#27364)

* Add pass compatibility check for four mkldnn pass, test=develop
上级 059bfd69
......@@ -14,6 +14,7 @@
#include "paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.h"
#include <vector>
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
......@@ -103,12 +104,32 @@ REGISTER_PASS(conv_activation_mkldnn_fuse_pass,
REGISTER_PASS(conv_relu_mkldnn_fuse_pass,
paddle::framework::ir::ConvActivationFusePass);
REGISTER_PASS_CAPABILITY(conv_relu_mkldnn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("conv2d", 0)
.EQ("relu", 0));
REGISTER_PASS(conv_leaky_relu_mkldnn_fuse_pass,
paddle::framework::ir::Conv2DLeakyReLUFusePass);
REGISTER_PASS_CAPABILITY(conv_leaky_relu_mkldnn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("conv2d", 0)
.LE("leaky_relu", 1));
REGISTER_PASS(conv_relu6_mkldnn_fuse_pass,
paddle::framework::ir::Conv2DReLU6FusePass);
REGISTER_PASS_CAPABILITY(conv_relu6_mkldnn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("conv2d", 0)
.EQ("relu6", 0));
REGISTER_PASS(conv_swish_mkldnn_fuse_pass,
paddle::framework::ir::Conv2DSwishFusePass);
REGISTER_PASS_CAPABILITY(conv_swish_mkldnn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("conv2d", 0)
.EQ("swish", 0));
......@@ -14,6 +14,7 @@
#include "paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass.h"
#include <vector>
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
......@@ -123,3 +124,10 @@ void ConvConcatReLUFusePass::ApplyImpl(ir::Graph* graph) const {
REGISTER_PASS(conv_concat_relu_mkldnn_fuse_pass,
paddle::framework::ir::ConvConcatReLUFusePass);
REGISTER_PASS_CAPABILITY(conv_concat_relu_mkldnn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("conv2d", 0)
.EQ("concat", 0)
.EQ("relu", 0));
......@@ -15,6 +15,7 @@
#include "paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.h"
#include <paddle/fluid/string/pretty_log.h>
#include <vector>
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
......@@ -98,3 +99,10 @@ void MatmulTransposeReshapeMKLDNNPass::ApplyImpl(ir::Graph *graph) const {
REGISTER_PASS(matmul_transpose_reshape_fuse_pass,
paddle::framework::ir::MatmulTransposeReshapeMKLDNNPass);
REGISTER_PASS_CAPABILITY(matmul_transpose_reshape_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("matmul", 0)
.EQ("transpose", 0)
.EQ("reshape", 0));
......@@ -18,6 +18,7 @@ limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/string/pretty_log.h"
namespace paddle {
......@@ -90,3 +91,9 @@ void ScaleMatmulFusePass::ApplyImpl(ir::Graph* graph) const {
REGISTER_PASS(scale_matmul_fuse_pass,
paddle::framework::ir::ScaleMatmulFusePass);
REGISTER_PASS_CAPABILITY(scale_matmul_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("scale", 0)
.EQ("matmul", 0));
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import PassVersionChecker
class ConvActivationMkldnnFusePassTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 100, 100], dtype="float32")
conv_out = fluid.layers.conv2d(
data,
num_filters=self.conv_num_filters,
filter_size=self.conv_filter_size,
bias_attr=self.conv_bias_attr,
act=self.act)
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.fetch_list = [conv_out]
self.enable_mkldnn = True
def set_params(self):
self.conv_num_filters = 3
self.conv_filter_size = 3
self.conv_bias_attr = False
self.act = "relu"
self.pass_name = 'conv_relu_mkldnn_fuse_pass'
def test_check_output(self):
use_gpu = False
self.check_output_with_option(use_gpu)
def test_pass_compatible(self):
self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))
class ConvActivationMkldnnFusePassTest_1(ConvActivationMkldnnFusePassTest):
def set_params(self):
self.conv_num_filters = 5
self.conv_filter_size = 5
self.conv_bias_attr = True
self.act = "relu"
self.pass_name = 'conv_relu_mkldnn_fuse_pass'
class ConvActivationMkldnnFusePassTest_2(ConvActivationMkldnnFusePassTest):
def set_params(self):
self.conv_num_filters = 3
self.conv_filter_size = 3
self.conv_bias_attr = False
self.act = "leaky_relu"
self.pass_name = 'conv_leaky_relu_mkldnn_fuse_pass'
class ConvActivationMkldnnFusePassTest_3(ConvActivationMkldnnFusePassTest):
def set_params(self):
self.conv_num_filters = 5
self.conv_filter_size = 5
self.conv_bias_attr = True
self.act = "leaky_relu"
self.pass_name = 'conv_leaky_relu_mkldnn_fuse_pass'
class ConvActivationMkldnnFusePassTest_4(ConvActivationMkldnnFusePassTest):
def set_params(self):
self.conv_num_filters = 3
self.conv_filter_size = 3
self.conv_bias_attr = False
self.act = "relu6"
self.pass_name = 'conv_relu6_mkldnn_fuse_pass'
class ConvActivationMkldnnFusePassTest_4(ConvActivationMkldnnFusePassTest):
def set_params(self):
self.conv_num_filters = 5
self.conv_filter_size = 5
self.conv_bias_attr = True
self.act = "swish"
self.pass_name = 'conv_swish_mkldnn_fuse_pass'
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import PassVersionChecker
class ConvConcatReluMkldnnFusePassTest_0(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data_1 = fluid.data(
name="data_1", shape=[-1, 3, 100, 100], dtype="float32")
data_2 = fluid.data(
name="data_2", shape=[-1, 3, 100, 100], dtype="float32")
conv_1 = fluid.layers.conv2d(
data_1,
num_filters=self.conv1_num_filters,
filter_size=self.conv1_filter_size,
padding=self.conv1_padding,
bias_attr=self.conv1_bias_attr)
conv_2 = fluid.layers.conv2d(
data_2,
num_filters=self.conv2_num_filters,
filter_size=self.conv2_filter_size,
padding=self.conv2_padding,
bias_attr=self.conv2_bias_attr)
concat = fluid.layers.concat(
[conv_1, conv_2], axis=self.concat_axis)
out = fluid.layers.relu(concat)
self.feeds = {
"data_1": np.random.random((1, 3, 100, 100)).astype("float32"),
"data_2": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.fetch_list = [out]
self.enable_mkldnn = True
def set_params(self):
self.conv1_num_filters = 3
self.conv1_filter_size = 3
self.conv1_padding = 0
self.conv1_bias_attr = False
self.conv2_num_filters = 3
self.conv2_filter_size = 3
self.conv2_padding = 0
self.conv2_bias_attr = False
self.concat_axis = 0
self.pass_name = "conv_concat_relu_mkldnn_fuse_pass"
def test_check_output(self):
use_gpu = False
self.check_output_with_option(use_gpu)
def test_pass_compatible(self):
self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))
class ConvConcatReluMkldnnFusePassTest_1(ConvConcatReluMkldnnFusePassTest_0):
def set_params(self):
self.conv1_num_filters = 3
self.conv1_filter_size = 3
self.conv1_padding = 0
self.conv1_bias_attr = False
self.conv2_num_filters = 5
self.conv2_filter_size = 5
self.conv2_padding = 1
self.conv2_bias_attr = True
self.concat_axis = 1
self.pass_name = "conv_concat_relu_mkldnn_fuse_pass"
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import PassVersionChecker
class MatmulTransposeReshapeMkldnnFusePassTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=self.data_shape, dtype="float32")
weight = fluid.layers.create_parameter(
shape=self.weight_shape, dtype="float32")
matmul = fluid.layers.matmul(
data,
weight,
transpose_x=self.transpose_x,
transpose_y=self.transpose_y)
transpose = fluid.layers.transpose(matmul, self.tranpose_perm)
reshape = fluid.layers.reshape(transpose, shape=self.reshape_shape)
self.fetch_list = [reshape]
self.enable_mkldnn = True
def set_params(self):
self.data_shape = [-1, 3, 100, 110]
self.weight_shape = [1, 3, 110, 100]
self.feeds = {
"data": np.random.random((1, 3, 100, 110)).astype("float32")
}
self.transpose_x = False
self.transpose_y = False
self.tranpose_perm = [0, 2, 1, 3]
self.reshape_shape = [3, 100, 100]
self.pass_name = 'matmul_transpose_reshape_fuse_pass'
def test_check_output(self):
use_gpu = False
self.check_output_with_option(use_gpu)
def test_pass_compatible(self):
self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))
class MatmulTransposeReshapeMkldnnFusePassTest_1(
MatmulTransposeReshapeMkldnnFusePassTest):
def set_params(self):
self.data_shape = [-1, 3, 100, 100]
self.weight_shape = [1, 3, 100, 100]
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.transpose_x = True
self.transpose_y = True
self.tranpose_perm = [0, 2, 1, 3]
self.reshape_shape = [6, 50, 100]
self.pass_name = 'matmul_transpose_reshape_fuse_pass'
if __name__ == "__main__":
unittest.main()
......@@ -20,26 +20,54 @@ from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import PassVersionChecker
class ConvBnFusePassMKLDNNTest(InferencePassTest):
class ScaleMatmulMkldnnFusePassTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 100, 100], dtype="float32")
conv_out = fluid.layers.conv2d(
data, num_filters=3, filter_size=3, bias_attr=False, act="relu")
name="data", shape=[1, 3, 100, 100], dtype="float32")
weight = fluid.layers.create_parameter(
shape=[1, 3, 100, 100], dtype="float32")
scale = fluid.layers.scale(data, scale=self.scale_scale)
matmul = fluid.layers.matmul(
scale,
weight,
transpose_x=self.transpose_x,
transpose_y=self.transpose_y)
self.fetch_list = [matmul]
self.enable_mkldnn = True
def set_params(self):
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.fetch_list = [conv_out]
self.enable_mkldnn = True
self.scale_scale = 2.0
self.transpose_x = False
self.transpose_y = False
self.pass_name = "scale_matmul_fuse_pass"
def test_check_output(self):
use_gpu = False
self.check_output_with_option(use_gpu)
def test_pass_compatible(self):
self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))
class ScaleMatmulMkldnnFusePassTest_1(ScaleMatmulMkldnnFusePassTest):
def set_params(self):
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.scale_scale = 5.0
self.transpose_x = True
self.transpose_y = True
self.pass_name = "scale_matmul_fuse_pass"
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册