From bce1e5724fc65ffe0ea18eeee7690d2ea9a15c99 Mon Sep 17 00:00:00 2001 From: baoachun <962571062@qq.com> Date: Wed, 15 Dec 2021 20:28:22 +0800 Subject: [PATCH] update mkldnn scale_matmul fuse pass ut (#37210) * update mkldnn scale_matmul fuse pass ut * update mkldnn scale_matmul_fuse_pass ut --- .../test_mkldnn_scale_matmul_fuse_pass.py | 196 +++++++++++++----- 1 file changed, 141 insertions(+), 55 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py index 55a6b543f0a..86acbe615b3 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,61 +12,147 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - -import unittest +from auto_scan_test import PassAutoScanTest, SkipReasons +from program_config import TensorConfig, ProgramConfig import numpy as np -from inference_pass_test import InferencePassTest -import paddle.fluid as fluid -import paddle.fluid.core as core -from paddle.fluid.core import AnalysisConfig -from paddle.fluid.core import PassVersionChecker - - -class ScaleMatmulMkldnnFusePassTest(InferencePassTest): - def setUp(self): - self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( - name="data", shape=[1, 3, 100, 100], dtype="float32") - weight = fluid.layers.create_parameter( - shape=[1, 3, 100, 100], dtype="float32") - scale = fluid.layers.scale(data, scale=self.scale_scale) - matmul = fluid.layers.matmul( - scale, - weight, - transpose_x=self.transpose_x, - transpose_y=self.transpose_y) - - self.fetch_list = [matmul] - self.enable_mkldnn = True - - def set_params(self): - self.feeds = { - "data": np.random.random((1, 3, 100, 100)).astype("float32") - } - self.scale_scale = 2.0 - self.transpose_x = False - self.transpose_y = False - self.pass_name = "scale_matmul_fuse_pass" - - def test_check_output(self): - use_gpu = False - self.check_output_with_option(use_gpu) - - def test_pass_compatible(self): - self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name)) - - -class ScaleMatmulMkldnnFusePassTest_1(ScaleMatmulMkldnnFusePassTest): - def set_params(self): - self.feeds = { - "data": np.random.random((1, 3, 100, 100)).astype("float32") - } - self.scale_scale = 5.0 - self.transpose_x = True - self.transpose_y = True - self.pass_name = "scale_matmul_fuse_pass" +import paddle.inference as paddle_infer +from functools import partial +from typing import Optional, List, Callable, Dict, Any, Set +import unittest + +import hypothesis +from hypothesis import given, settings, seed, example, assume +import hypothesis.strategies as st + + +class TestScaleMatmulMkldnnFusePass(PassAutoScanTest): + def is_program_valid(self, program_config: ProgramConfig) -> bool: + return True + + def sample_program_config(self, draw): + scale = draw(st.floats(min_value=0.01, max_value=2)) + bias = 0.0 + bias_after_scale = draw(st.booleans()) + transpose_X = draw(st.booleans()) + transpose_Y = draw(st.booleans()) + alpha = draw(st.floats(min_value=0.01, max_value=2)) + batch_size = draw(st.integers(min_value=1, max_value=4)) + channel = draw(st.integers(min_value=1, max_value=64)) + input_dim = draw(st.sampled_from([1, 32, 64])) + + def generate_input(attrs, type): + if attrs[1]['transpose_X'] and attrs[1]['transpose_Y']: + shape_x = [ + attrs[2]['batch_size'], attrs[2]['channel'], + attrs[2]['input_dim'], 32 + ] + shape_y = [ + attrs[2]['batch_size'], attrs[2]['channel'], 64, + attrs[2]['input_dim'] + ] + elif attrs[1]['transpose_X']: + shape_x = [ + attrs[2]['batch_size'], attrs[2]['channel'], + attrs[2]['input_dim'], 32 + ] + shape_y = [ + attrs[2]['batch_size'], attrs[2]['channel'], + attrs[2]['input_dim'], 64 + ] + elif attrs[1]['transpose_Y']: + shape_x = [ + attrs[2]['batch_size'], attrs[2]['channel'], 32, + attrs[2]['input_dim'] + ] + shape_y = [ + attrs[2]['batch_size'], attrs[2]['channel'], 8, + attrs[2]['input_dim'] + ] + else: + shape_x = [ + attrs[2]['batch_size'], attrs[2]['channel'], 32, + attrs[2]['input_dim'] + ] + shape_y = [ + attrs[2]['batch_size'], attrs[2]['channel'], + attrs[2]['input_dim'], 16 + ] + + if type == "x": + return np.random.random(shape_x).astype(np.float32) + else: + return np.random.random(shape_y).astype(np.float32) + + attrs = [{ + "scale": scale, + "bias": bias, + "bias_after_scale": bias_after_scale + }, { + "transpose_X": transpose_X, + "transpose_Y": transpose_Y, + "alpha": alpha + }, { + 'batch_size': batch_size, + 'channel': channel, + 'input_dim': input_dim + }] + + ops_config = [{ + "op_type": "scale", + "op_inputs": { + "X": ["input_data1"] + }, + "op_outputs": { + "Out": ["scale_output"] + }, + "op_attrs": { + "scale": attrs[0]['scale'], + "bias": attrs[0]['bias'], + "bias_after_scale": attrs[0]['bias_after_scale'] + }, + }, { + "op_type": "matmul", + "op_inputs": { + "X": ["scale_output"], + "Y": ["input_data2"] + }, + "op_outputs": { + "Out": ["matmul_output"] + }, + "op_attrs": { + 'transpose_X': attrs[1]['transpose_X'], + 'transpose_Y': attrs[1]['transpose_Y'], + 'alpha': attrs[1]['alpha'], + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [] + } + }] + + ops = self.generate_op_config(ops_config) + + program_config = ProgramConfig( + ops=ops, + weights={}, + inputs={ + "input_data1": + TensorConfig(data_gen=partial(generate_input, attrs, "x")), + "input_data2": + TensorConfig(data_gen=partial(generate_input, attrs, "y")) + }, + outputs=["matmul_output"]) + + return program_config + + def sample_predictor_configs(self, program_config): + config = self.create_inference_config(use_mkldnn=True) + yield config, ['matmul'], (1e-5, 1e-5) + + def test(self): + self.run_and_statis(quant=False, passes=["scale_matmul_fuse_pass"]) if __name__ == "__main__": -- GitLab