From 18c0a00206129933750517a81fcba93fa256c5a7 Mon Sep 17 00:00:00 2001 From: Hulek Date: Mon, 2 Jan 2023 12:21:42 +0100 Subject: [PATCH] Scale Matmul Fuse pass rewritten (#49105) --- paddle/fluid/framework/ir/CMakeLists.txt | 4 - .../mkldnn/scale_matmul_fuse_pass_tester.cc | 117 ------------- .../test_mkldnn_scale_matmul_fuse_pass.py | 155 ++++++++---------- 3 files changed, 69 insertions(+), 207 deletions(-) delete mode 100644 paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass_tester.cc diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index 3c2e8bf85a7..088847d1f6f 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -429,10 +429,6 @@ if(WITH_MKLDNN) test_conv_batch_norm_mkldnn_fuse_pass SRCS mkldnn/mkldnn_conv_bn_fuse_pass_tester.cc DEPS ${TEST_CONV_BN_PASS_DEPS}) - cc_test( - test_scale_matmul_fuse_pass - SRCS mkldnn/scale_matmul_fuse_pass_tester.cc - DEPS scale_matmul_fuse_pass) cc_test( test_mkldnn_placement_pass SRCS mkldnn/mkldnn_placement_pass_tester.cc diff --git a/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass_tester.cc deleted file mode 100644 index ed6e63615f7..00000000000 --- a/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass_tester.cc +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -#include "paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.h" - -namespace paddle { -namespace framework { -namespace ir { - -void SetOp(ProgramDesc* prog, - const std::string& type, - const std::vector& inputs, - const std::vector& outputs, - float scale = 1.0f, - float bias = 0.0f) { - auto* op = prog->MutableBlock(0)->AppendOp(); - - op->SetType(type); - if (type == "scale") { - op->SetInput("X", {inputs[0]}); - op->SetAttr("scale", scale); - op->SetAttr("bias", bias); - } else if (type == "matmul") { - op->SetAttr("transpose_X", false); - op->SetAttr("transpose_Y", false); - op->SetInput("X", {inputs[0]}); - if (inputs.size() > 1) op->SetInput("Y", {inputs[1]}); - op->SetAttr("alpha", scale); - } else { - FAIL() << "Unexpected operator type."; - } - op->SetOutput("Out", {outputs[0]}); -} - -// a->scale->b -// (b,c)->matmul->d -ProgramDesc BuildProgramDesc(float scale, float bias, float alpha) { - ProgramDesc prog; - - for (auto& v : std::vector({"a", "b", "c", "d"})) { - prog.MutableBlock(0)->Var(v); - } - SetOp(&prog, "scale", {"a"}, {"b"}, scale, bias); - SetOp(&prog, "matmul", {"b", "c"}, {"d"}, alpha); - return prog; -} - -void MainTest(const ProgramDesc& prog, - int removed_nodes_count, - const std::vector scale_in_out, - const std::vector matmul_in_out, - float alpha) { - std::unique_ptr graph(new ir::Graph(prog)); - int original_nodes_num = graph->Nodes().size(); - auto pass = PassRegistry::Instance().Get("scale_matmul_fuse_pass"); - graph.reset(pass->Apply(graph.release())); - int current_nodes_num = graph->Nodes().size(); - - for (auto* node : graph->Nodes()) { - if (node->IsOp()) { - auto* op = node->Op(); - if (op->Type() == "scale") { - EXPECT_EQ(op->Input("X")[0], scale_in_out[0]); - EXPECT_EQ(op->Output("Out")[0], scale_in_out[1]); - } else if (op->Type() == "matmul") { - EXPECT_EQ(op->Input("X")[0], matmul_in_out[0]); - EXPECT_EQ(op->Input("Y")[0], matmul_in_out[1]); - EXPECT_EQ(op->Output("Out")[0], matmul_in_out[2]); - EXPECT_EQ(op->GetAttrIfExists("alpha"), alpha); - } - } - } - EXPECT_EQ(original_nodes_num - removed_nodes_count, current_nodes_num); -} - -TEST(ScaleMatmulFusePass, scale_matmul_with_no_bias) { - auto bias = 0.0f; - auto scale = 2.34f; - auto alpha = 3.45f; - int removed_nodes_count = 2; - MainTest(BuildProgramDesc(scale, bias, alpha), - removed_nodes_count, - {}, - {"a", "c", "d"}, - scale * alpha); -} - -TEST(ScaleMatmulFusePass, scale_matmul_with_bias) { - auto bias = 1.0f; - auto scale = 2.34f; - auto alpha = 3.45f; - int removed_nodes_count = 0; - MainTest(BuildProgramDesc(scale, bias, alpha), - removed_nodes_count, - {"a", "b"}, - {"b", "c", "d"}, - alpha); -} - -} // namespace ir -} // namespace framework -} // namespace paddle - -USE_PASS(scale_matmul_fuse_pass); diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py index d696fa44f5a..5c1a1162561 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py @@ -37,74 +37,55 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest): input_dim = draw(st.sampled_from([1, 32, 64])) def generate_input(attrs, type): - if attrs[1]['transpose_X'] and attrs[1]['transpose_Y']: - shape_x = [ - attrs[2]['batch_size'], - attrs[2]['channel'], - attrs[2]['input_dim'], - 32, - ] - shape_y = [ - attrs[2]['batch_size'], - attrs[2]['channel'], - 64, - attrs[2]['input_dim'], - ] - elif attrs[1]['transpose_X']: - shape_x = [ - attrs[2]['batch_size'], - attrs[2]['channel'], - attrs[2]['input_dim'], - 32, - ] - shape_y = [ - attrs[2]['batch_size'], - attrs[2]['channel'], - attrs[2]['input_dim'], - 64, - ] - elif attrs[1]['transpose_Y']: - shape_x = [ - attrs[2]['batch_size'], - attrs[2]['channel'], - 32, - attrs[2]['input_dim'], - ] - shape_y = [ - attrs[2]['batch_size'], - attrs[2]['channel'], - 8, - attrs[2]['input_dim'], - ] + is_transpose_X = attrs[1]['transpose_X'] + is_transpose_Y = attrs[1]['transpose_Y'] + + if is_transpose_X: + shape_x_3 = attrs[2]['input_dim'] + shape_x_4 = 32 else: - shape_x = [ - attrs[2]['batch_size'], - attrs[2]['channel'], - 32, - attrs[2]['input_dim'], - ] - shape_y = [ - attrs[2]['batch_size'], - attrs[2]['channel'], - attrs[2]['input_dim'], - 16, - ] - - if type == "x": - return np.random.random(shape_x).astype(np.float32) + shape_x_3 = 32 + shape_x_4 = attrs[2]['input_dim'] + + if is_transpose_X and is_transpose_Y: + shape_y_3 = 64 + shape_y_4 = attrs[2]['input_dim'] + elif is_transpose_X: + shape_y_3 = attrs[2]['input_dim'] + shape_y_4 = 64 + elif is_transpose_Y: + shape_y_3 = 8 + shape_y_4 = attrs[2]['input_dim'] else: - return np.random.random(shape_y).astype(np.float32) + shape_y_3 = attrs[2]['input_dim'] + shape_y_4 = 16 + + shape_x = [ + attrs[2]['batch_size'], + attrs[2]['channel'], + shape_x_3, + shape_x_4, + ] + shape_y = [ + attrs[2]['batch_size'], + attrs[2]['channel'], + shape_y_3, + shape_y_4, + ] + + shape = shape_x if type == 'x' else shape_y + return np.random.random(shape).astype(np.float32) attrs = [ { - "scale": scale, - "bias": bias, - "bias_after_scale": bias_after_scale, + 'scale': scale, + 'bias': bias, + 'bias_after_scale': bias_after_scale, }, { - "transpose_X": transpose_X, - "transpose_Y": transpose_Y, - "alpha": alpha, + 'transpose_X': transpose_X, + 'transpose_Y': transpose_Y, + 'alpha': alpha, }, { 'batch_size': batch_size, @@ -115,29 +96,29 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest): ops_config = [ { - "op_type": "scale", - "op_inputs": {"X": ["input_data1"]}, - "op_outputs": {"Out": ["scale_output"]}, - "op_attrs": { - "scale": attrs[0]['scale'], - "bias": attrs[0]['bias'], - "bias_after_scale": attrs[0]['bias_after_scale'], + 'op_type': 'scale', + 'op_inputs': {'X': ['input_data1']}, + 'op_outputs': {'Out': ['scale_output']}, + 'op_attrs': { + 'scale': attrs[0]['scale'], + 'bias': attrs[0]['bias'], + 'bias_after_scale': attrs[0]['bias_after_scale'], }, }, { - "op_type": "matmul", - "op_inputs": {"X": ["scale_output"], "Y": ["input_data2"]}, - "op_outputs": {"Out": ["matmul_output"]}, - "op_attrs": { + 'op_type': 'matmul', + 'op_inputs': {'X': ['scale_output'], 'Y': ['input_data2']}, + 'op_outputs': {'Out': ['matmul_output']}, + 'op_attrs': { 'transpose_X': attrs[1]['transpose_X'], 'transpose_Y': attrs[1]['transpose_Y'], 'alpha': attrs[1]['alpha'], - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [], + 'fused_reshape_X': [], + 'fused_reshape_Y': [], + 'fused_transpose_X': [], + 'fused_transpose_Y': [], + 'fused_reshape_Out': [], + 'fused_transpose_Out': [], }, }, ] @@ -148,25 +129,27 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest): ops=ops, weights={}, inputs={ - "input_data1": TensorConfig( - data_gen=partial(generate_input, attrs, "x") + 'input_data1': TensorConfig( + data_gen=partial(generate_input, attrs, 'x') ), - "input_data2": TensorConfig( - data_gen=partial(generate_input, attrs, "y") + 'input_data2': TensorConfig( + data_gen=partial(generate_input, attrs, 'y') ), }, - outputs=["matmul_output"], + outputs=['matmul_output'], ) return program_config def sample_predictor_configs(self, program_config): - config = self.create_inference_config(use_mkldnn=True) + config = self.create_inference_config( + use_mkldnn=True, passes=['scale_matmul_fuse_pass'] + ) yield config, ['matmul'], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, passes=["scale_matmul_fuse_pass"]) + self.run_and_statis(quant=False, passes=['scale_matmul_fuse_pass']) -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() -- GitLab