From f88065d31724a9aad7c9fbfc521c074ff0b42fae Mon Sep 17 00:00:00 2001 From: baoachun <962571062@qq.com> Date: Thu, 23 Dec 2021 14:55:57 +0800 Subject: [PATCH] add mkldnn conv_elementwise_add_mkldnn_fuse_pass ut (#37612) * add mkldnn conv_elementwise_add_mkldnn_fuse_pass ut * update mkldnn conv_elementwise_add_mkldnn_fuse_pass ut * update conv_elementwise_add_mkldnn_fuse_pass ut * update conv_elementwise_add_mkldnn_fuse_pass ut * update conv_elementwise_add_mkldnn_fuse_pass ut * restrict conv2d data_format in conv_elementwise_add_mkldnn_fuse_pass * update conv_elementwise_add_mkldnn_fuse_pass OpCompat * update conv_elementwise_add_mkldnn_fuse_pass ut * update ut --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 4 +- .../unittests/ir/inference/CMakeLists.txt | 1 + ...t_mkldnn_conv_elementwise_add_fuse_pass.py | 160 ++++++++++++++++++ 3 files changed, 163 insertions(+), 2 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_elementwise_add_fuse_pass.py diff --git a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc index 8031f56752a..c537d057385 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -117,7 +117,7 @@ ResidualConnectionMKLDNNFusePass::ResidualConnectionMKLDNNFusePass() { .IsType>() .End() .AddAttr("data_format") - .IsStringIn({"NCHW", "NHWC", "AnyLayout"}) + .IsStringIn({"NCHW", "AnyLayout"}) .End(); AddOpCompat(OpCompat("elementwise_add")) @@ -131,7 +131,7 @@ ResidualConnectionMKLDNNFusePass::ResidualConnectionMKLDNNFusePass() { .IsTensor() .End() .AddAttr("axis") - .IsIntIn({-1, 0}) + .IsIntIn({-1, 0, 1}) .End(); } diff --git a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt index 75d182bd01e..055cb8ff91d 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt @@ -91,6 +91,7 @@ if (WITH_MKLDNN AND TENSORRT_FOUND AND WITH_GPU) endif() if (WITH_MKLDNN) + set_tests_properties(test_mkldnn_conv_elementwise_add_fuse_pass PROPERTIES TIMEOUT 120) set_tests_properties(test_mkldnn_depthwise_conv_pass PROPERTIES TIMEOUT 120) set_tests_properties(test_mkldnn_reshape_transpose_matmul_fuse_pass PROPERTIES TIMEOUT 100) set_tests_properties(test_mkldnn_prelu_op PROPERTIES TIMEOUT 300) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_elementwise_add_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_elementwise_add_fuse_pass.py new file mode 100644 index 00000000000..66c547de2c2 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_elementwise_add_fuse_pass.py @@ -0,0 +1,160 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import PassAutoScanTest, SkipReasons +from program_config import TensorConfig, ProgramConfig, OpConfig +import numpy as np +import paddle.inference as paddle_infer +from functools import partial +from typing import Optional, List, Callable, Dict, Any, Set +import unittest + +import hypothesis +from hypothesis import given, settings, seed, example, assume +import hypothesis.strategies as st + + +class TestConvElementwiseAddMkldnnFusePass(PassAutoScanTest): + def is_program_valid(self, program_config: ProgramConfig) -> bool: + attrs = [ + program_config.ops[i].attrs + for i in range(len(program_config.ops)) + ] + # If the problem has been fixed, the judgment + # needs to be deleted!!! + if attrs[1]['data_format'] == "NHWC": + return False + + return True + + def sample_program_config(self, draw): + data_format = draw(st.sampled_from(["NCHW", "NHWC"])) + dilations = draw(st.sampled_from([[1, 1], [2, 2], [1, 2]])) + padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VALID"])) + groups = draw(st.sampled_from([1, 2, 4])) + paddings = draw(st.sampled_from([[0, 3], [1, 1], [1, 2, 3, 4]])) + strides = draw(st.sampled_from([[1, 1], [2, 2], [1, 2]])) + axis = draw(st.sampled_from([-1, 0, 1])) + batch_size = draw(st.integers(min_value=1, max_value=4)) + + def generate_input1(): + if data_format == "NCHW": + return np.random.random( + [batch_size, 48, 64, 64]).astype(np.float32) + else: + return np.random.random( + [batch_size, 64, 64, 48]).astype(np.float32) + + def generate_weight1(): + return np.random.random( + [48, int(48 / groups), 3, 3]).astype(np.float32) + + def compute_out_shape(padding_alg): + import paddle + import paddle.nn as nn + + x_var = paddle.uniform( + (batch_size, 48, 64, 64), dtype='float32', min=-1., max=1.) + if padding_alg == "EXPLICIT": + conv = nn.Conv2D(48, 48, (3, 3), strides, paddings, dilations, + 1) + else: + conv = nn.Conv2D(48, 48, (3, 3), strides, padding_alg, + dilations, 1) + y_var = conv(x_var) + return y_var.shape + + def generate_weight2(): + return np.random.random([48]).astype(np.float32) + + if compute_out_shape(padding_algorithm) != (batch_size, 48, 64, 64): + axis = 1 + + relu_op = OpConfig( + type="relu", + inputs={"X": ["input_data1"]}, + outputs={"Out": ["sigmoid_out"]}, + attrs={}) + + conv2d_op = OpConfig( + type="conv2d", + inputs={"Input": ["sigmoid_out"], + "Filter": ["conv_weight"]}, + outputs={"Output": ["conv_output"]}, + attrs={ + "data_format": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides + }) + + if axis == -1 or axis == 0: + elt_op = OpConfig( + type="elementwise_add", + inputs={"X": ["input_data1"], + "Y": ["conv_output"]}, + outputs={"Out": ["elementwise_output"]}, + attrs={'axis': axis}) + else: + elt_op = OpConfig( + type="elementwise_add", + inputs={"X": ["conv_output"], + "Y": ["elementwise_weight"]}, + outputs={"Out": ["elementwise_output"]}, + attrs={'axis': axis}) + + model_net = [relu_op, conv2d_op, elt_op] + + if axis == 1: + program_config = ProgramConfig( + ops=model_net, + weights={ + "conv_weight": + TensorConfig(data_gen=partial(generate_weight1)), + "elementwise_weight": + TensorConfig(data_gen=partial(generate_weight2)) + }, + inputs={ + "input_data1": + TensorConfig(data_gen=partial(generate_input1)) + }, + outputs=["elementwise_output"]) + else: + program_config = ProgramConfig( + ops=model_net, + weights={ + "conv_weight": + TensorConfig(data_gen=partial(generate_weight1)) + }, + inputs={ + "input_data1": + TensorConfig(data_gen=partial(generate_input1)) + }, + outputs=["elementwise_output"]) + + return program_config + + def sample_predictor_configs(self, program_config): + config = self.create_inference_config(use_mkldnn=True) + yield config, ["relu", "conv2d"], (1e-5, 1e-5) + + def test(self): + self.run_and_statis( + quant=False, passes=["conv_elementwise_add_mkldnn_fuse_pass"]) + + +if __name__ == "__main__": + unittest.main() -- GitLab