未验证 提交 aadc8674 编写于 作者: B baoachun 提交者: GitHub

update squared_mat_sub_fuse_pass ut (#37838)

* update squared_mat_sub_fuse_pass ut

* update ut

* update ut
上级 dc7597e3
...@@ -398,8 +398,7 @@ SquaredMatSubFusePass::SquaredMatSubFusePass() { ...@@ -398,8 +398,7 @@ SquaredMatSubFusePass::SquaredMatSubFusePass() {
.IsTensor() .IsTensor()
.End() .End()
.AddAttr("alpha") .AddAttr("alpha")
.IsNumGE(0.99f) .IsNumEQ(1.0f)
.IsNumLE(1.01f)
.End() .End()
.AddAttr("transpose_X") .AddAttr("transpose_X")
.IsBoolEQ(false) .IsBoolEQ(false)
...@@ -465,6 +464,10 @@ SquaredMatSubFusePass::SquaredMatSubFusePass() { ...@@ -465,6 +464,10 @@ SquaredMatSubFusePass::SquaredMatSubFusePass() {
.End() .End()
// type:float,there is no restriction // type:float,there is no restriction
.AddAttr("value") .AddAttr("value")
.End()
.AddAttr("str_value")
.IsStringEQ("")
.IsOptional()
.End(); .End();
} }
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -12,53 +12,160 @@ ...@@ -12,53 +12,160 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function from auto_scan_test import PassAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig, OpConfig
import unittest
import numpy as np import numpy as np
from inference_pass_test import InferencePassTest import paddle.inference as paddle_infer
import paddle from functools import partial
import paddle.fluid as fluid from typing import Optional, List, Callable, Dict, Any, Set
import paddle.fluid.core as core import unittest
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import PassVersionChecker import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
class TestSquaredMatSubFusePass(PassAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_config(self, draw):
transpose_X = False
transpose_Y = False
alpha1 = 1.0
alpha2 = 1.0
axis1 = draw(st.sampled_from([-1, 0]))
place_type = draw(st.sampled_from([-1, 0]))
has_str_value = draw(st.booleans())
str_value = ''
value = draw(st.floats(min_value=-10, max_value=10))
shape = draw(st.sampled_from([[1]]))
axis2 = draw(st.sampled_from([-1, 0]))
input_dim = draw(st.sampled_from([32, 64]))
def generate_input(type):
shape_x = [32, input_dim]
shape_y = [input_dim, 16]
if type == "x":
return np.random.random(shape_x).astype(np.float32)
else:
return np.random.random(shape_y).astype(np.float32)
matmul_op1 = OpConfig(
type="matmul",
inputs={"X": ["input_data1"],
"Y": ["input_data2"]},
outputs={"Out": ["matmul1_output"]},
attrs={
"transpose_X": transpose_X,
"transpose_Y": transpose_Y,
"alpha": alpha1,
"fused_reshape_X": [],
"fused_reshape_Y": [],
"fused_transpose_X": [],
"fused_transpose_Y": [],
"fused_reshape_Out": [],
"fused_transpose_Out": []
})
square_op1 = OpConfig(
type="square",
inputs={"X": ["matmul1_output"]},
outputs={"Out": ["square1_output"]},
attrs={})
square_op2 = OpConfig(
type="square",
inputs={"X": ["input_data1"]},
outputs={"Out": ["square2_output"]},
attrs={})
square_op3 = OpConfig(
type="square",
inputs={"X": ["input_data2"]},
outputs={"Out": ["square3_output"]},
attrs={})
class SquaredMatSubFusePassTest(InferencePassTest): matmul_op2 = OpConfig(
def setUp(self): type="matmul",
with fluid.program_guard(self.main_program, self.startup_program): inputs={"X": ["square2_output"],
data_a = fluid.data(name="data_a", shape=[128, 1], dtype="float32") "Y": ["square3_output"]},
data_b = fluid.data(name="data_b", shape=[256, 1], dtype="float32") outputs={"Out": ["matmul2_output"]},
attrs={
"transpose_X": transpose_X,
"transpose_Y": transpose_Y,
"alpha": alpha2,
"fused_reshape_X": [],
"fused_reshape_Y": [],
"fused_transpose_X": [],
"fused_transpose_Y": [],
"fused_reshape_Out": [],
"fused_transpose_Out": []
})
fc_a = fluid.layers.fc(data_a, size=256) elt_sub_op = OpConfig(
fc_b = fluid.layers.fc(data_b, size=64) type="elementwise_sub",
inputs={"X": ["square1_output"],
"Y": ["matmul2_output"]},
outputs={"Out": ["sub_out"]},
attrs={"axis": axis1})
data_a_square = paddle.square(fc_a) if has_str_value:
data_b_square = paddle.square(fc_b) fill_constant_op = OpConfig(
type="fill_constant",
inputs={},
outputs={"Out": ["constant_out"]},
attrs={
"dtype": 5,
"place_type": place_type,
"str_value": str_value,
"value": value,
"shape": shape
})
else:
fill_constant_op = OpConfig(
type="fill_constant",
inputs={},
outputs={"Out": ["constant_out"]},
attrs={
"dtype": 5,
"place_type": place_type,
"value": value,
"shape": shape
})
matmul_ab = paddle.matmul(fc_a, fc_b) elt_mul_op = OpConfig(
matmul_ab_square = paddle.square(matmul_ab) type="elementwise_mul",
matmul_square_ab = paddle.matmul(data_a_square, data_b_square) inputs={"X": ["sub_out"],
"Y": ["constant_out"]},
outputs={"Out": ["mul_out"]},
attrs={"axis": axis2})
scale = paddle.fluid.layers.fill_constant( model_net = [
shape=[1], value=0.5, dtype='float32') matmul_op1, square_op1, square_op2, square_op3, matmul_op2,
elt_sub_op, fill_constant_op, elt_mul_op
]
sub_val = paddle.fluid.layers.elementwise_sub(matmul_ab_square, program_config = ProgramConfig(
matmul_square_ab) ops=model_net,
squared_mat_sub_out = fluid.layers.elementwise_mul(sub_val, scale) weights={},
inputs={
"input_data1":
TensorConfig(data_gen=partial(generate_input, "x")),
"input_data2":
TensorConfig(data_gen=partial(generate_input, "y"))
},
outputs=["mul_out"])
self.feeds = { return program_config
"data_a": np.random.random((128, 1)).astype("float32"),
"data_b": np.random.random((256, 1)).astype("float32")
}
self.fetch_list = [squared_mat_sub_out]
def test_check_output(self): def sample_predictor_configs(self, program_config):
use_gpu = False config = self.create_inference_config()
self.check_output_with_option(use_gpu) yield config, ["fusion_squared_mat_sub"], (1e-5, 1e-5)
self.assertTrue( def test(self):
PassVersionChecker.IsCompatible('squared_mat_sub_fuse_pass')) self.run_and_statis(quant=False, passes=["squared_mat_sub_fuse_pass"])
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册