未验证 提交 f664a533 编写于 作者: B baoachun 提交者: GitHub

add matmulv2_transpose_reshape_pass ut (#37416)

* update mkldnn matmul_v2_transpose_reshape_fuse_pass ut

* update mkldnn matmul_v2_transpose_reshape_fuse_pass ut

* update ut

* update ut
上级 b0c7144a
...@@ -194,9 +194,32 @@ class MatMulV2Op : public framework::OperatorWithKernel { ...@@ -194,9 +194,32 @@ class MatMulV2Op : public framework::OperatorWithKernel {
"received %d", "received %d",
reshape_out_size)); reshape_out_size));
auto it = std::find(reshape_out.begin(), reshape_out.end(), -1); // int num_negative = std::count(reshape_out.begin(), reshape_out.end(),
// -1);
// PADDLE_ENFORCE_LE(num_negative, 1,
// platform::errors::InvalidArgument(
// "The max number of -1 in fused_reshape_Out is 1 "
// "but received %d.",
// num_negative));
// auto it_zero = std::find(reshape_out.begin(), reshape_out.end(), 0);
// if (it_zero != reshape_out.end()) {
// for (uint64_t i = 0; i < reshape_out.size(); i++) {
// if (reshape_out[i] == 0) {
// PADDLE_ENFORCE_LT(
// i, ddim_out.size(),
// platform::errors::InvalidArgument(
// "The index of 0 in fused_reshape_Out ",
// "should be less than output dim size, ",
// "but the index is %d and output dim size is %d", i,
// ddim_out.size()));
// reshape_out[i] = ddim_out.at(i);
// }
// }
// }
// if "-1" is present then one of reshape dims must be infered // if "-1" is present then one of reshape dims must be infered
auto it = std::find(reshape_out.begin(), reshape_out.end(), -1);
if (it != reshape_out.end()) { if (it != reshape_out.end()) {
int index = std::distance(reshape_out.begin(), it); int index = std::distance(reshape_out.begin(), it);
......
...@@ -101,6 +101,7 @@ if (WITH_MKLDNN) ...@@ -101,6 +101,7 @@ if (WITH_MKLDNN)
set_tests_properties(test_mkldnn_conv_hard_sigmoid_fuse_pass PROPERTIES TIMEOUT 300) set_tests_properties(test_mkldnn_conv_hard_sigmoid_fuse_pass PROPERTIES TIMEOUT 300)
set_tests_properties(test_mkldnn_conv_hard_swish_fuse_pass PROPERTIES TIMEOUT 300) set_tests_properties(test_mkldnn_conv_hard_swish_fuse_pass PROPERTIES TIMEOUT 300)
set_tests_properties(test_mkldnn_batch_norm_act_fuse_pass PROPERTIES TIMEOUT 100) set_tests_properties(test_mkldnn_batch_norm_act_fuse_pass PROPERTIES TIMEOUT 100)
set_tests_properties(test_mkldnn_matmul_v2_transpose_reshape_fuse_pass PROPERTIES TIMEOUT 100)
set_tests_properties(test_mkldnn_conv_transpose_bias_fuse_pass PROPERTIES TIMEOUT 100) set_tests_properties(test_mkldnn_conv_transpose_bias_fuse_pass PROPERTIES TIMEOUT 100)
set_tests_properties(test_conv_eltwiseadd_bn_fuse_pass PROPERTIES TIMEOUT 300) set_tests_properties(test_conv_eltwiseadd_bn_fuse_pass PROPERTIES TIMEOUT 300)
endif() endif()
......
...@@ -12,71 +12,142 @@ ...@@ -12,71 +12,142 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function from auto_scan_test import PassAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig, OpConfig
import unittest
import numpy as np import numpy as np
from inference_pass_test import InferencePassTest import paddle.inference as paddle_infer
import paddle from functools import partial
import paddle.fluid as fluid from typing import Optional, List, Callable, Dict, Any, Set
import paddle.fluid.core as core import unittest
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import PassVersionChecker import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
class TestMatmulV2OneDNNTransposeReshapeFusePass(InferencePassTest):
def setUp(self):
self.set_params() class TestMatmulv2TransposeReshapeMkldnnFusePass(PassAutoScanTest):
self.tranpose_perm = [0, 2, 1, 3] def is_program_valid(self, program_config: ProgramConfig) -> bool:
self.pass_name = 'matmul_v2_transpose_reshape_fuse_pass' if program_config.inputs["input_data1"].shape[
-4] != 1 and program_config.inputs["input_data2"].shape[
with fluid.program_guard(self.main_program, self.startup_program): -4] != 1:
data = fluid.data( if program_config.inputs["input_data1"].shape[
name="data", shape=self.data_shape, dtype="float32") -4] != program_config.inputs["input_data2"].shape[-4]:
weight = fluid.layers.create_parameter( return False
shape=self.weight_shape, dtype="float32")
matmul = paddle.matmul( if program_config.inputs["input_data1"].shape[
data, -3] != 1 and program_config.inputs["input_data2"].shape[
weight, -3] != 1:
transpose_x=self.transpose_x, if program_config.inputs["input_data1"].shape[
transpose_y=self.transpose_y) -3] != program_config.inputs["input_data2"].shape[-3]:
transpose = fluid.layers.transpose(matmul, self.tranpose_perm) return False
reshape = fluid.layers.reshape(transpose, shape=self.reshape_shape)
attrs = [
self.fetch_list = [reshape] program_config.ops[i].attrs
self.enable_mkldnn = True for i in range(len(program_config.ops))
]
def set_params(self): # If the problem has been fixed, the judgment
self.data_shape = [-1, 3, 100, 110] # needs to be deleted!!!
self.weight_shape = [1, 3, 110, 100] if 0 in attrs[2]['shape']:
self.feeds = { return False
"data": np.random.random((1, 3, 100, 110)).astype("float32")
} return True
self.transpose_x = False
self.transpose_y = False def sample_program_config(self, draw):
self.reshape_shape = [3, 100, 100] transpose_X = draw(st.booleans())
transpose_Y = draw(st.booleans())
def test_check_output(self): axis = draw(st.sampled_from([[0, 2, 1, 3]]))
use_gpu = False shape = draw(st.sampled_from([[0, -1, 128], [-1, 1, 64], [1, -1, 32]]))
self.check_output_with_option(use_gpu) batch_size1 = draw(st.integers(min_value=1, max_value=4))
batch_size2 = draw(st.integers(min_value=1, max_value=4))
def test_pass_compatible(self): channel1 = draw(st.sampled_from([1, 16, 32, 64]))
self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name)) channel2 = draw(st.sampled_from([1, 16, 32, 64]))
input_dim = draw(st.sampled_from([16, 32, 64]))
class TestMatmulV2OneDNNTransposeReshapeFusePassDifferentDims( def generate_input(type):
TestMatmulV2OneDNNTransposeReshapeFusePass): if transpose_X and transpose_Y:
def set_params(self): shape_x = [batch_size1, channel1, input_dim, 32]
self.data_shape = [-1, 4, 100, 80] shape_y = [batch_size2, channel2, 64, input_dim]
self.weight_shape = [1, 4, 80, 100] elif transpose_X:
self.feeds = { shape_x = [batch_size1, channel1, input_dim, 32]
"data": np.random.random((1, 4, 100, 80)).astype("float32") shape_y = [batch_size2, channel2, input_dim, 64]
} elif transpose_Y:
self.transpose_x = True shape_x = [batch_size1, channel1, 32, input_dim]
self.transpose_y = True shape_y = [batch_size2, channel2, 8, input_dim]
self.reshape_shape = [8, 40, 80] else:
shape_x = [batch_size1, channel1, 32, input_dim]
shape_y = [batch_size2, channel2, input_dim, 16]
if type == "x":
return np.random.random(shape_x).astype(np.float32)
else:
return np.random.random(shape_y).astype(np.float32)
matmul_op = OpConfig(
type="matmul_v2",
inputs={"X": ["input_data1"],
"Y": ["input_data2"]},
outputs={"Out": ["matmul_output"]},
attrs={
"trans_x": transpose_X,
"trans_y": transpose_Y,
"fused_reshape_X": [],
"fused_reshape_Y": [],
"fused_transpose_X": [],
"fused_transpose_Y": [],
"fused_reshape_Out": [],
"fused_transpose_Out": []
})
transpose2_op = OpConfig(
type="transpose2",
inputs={"X": ["matmul_output"]},
outputs={
"Out": ["transpose2_output"],
"XShape": ["transpose2_xshape"]
},
attrs={'axis': axis})
reshape2_op = OpConfig(
type="reshape2",
inputs={"X": ["transpose2_output"]},
outputs={
"Out": ["reshape2_output"],
"XShape": ["reshape2_xshape"]
},
attrs={'shape': shape})
model_net = [matmul_op, transpose2_op, reshape2_op]
program_config = ProgramConfig(
ops=model_net,
weights={},
inputs={
"input_data1":
TensorConfig(data_gen=partial(generate_input, "x")),
"input_data2":
TensorConfig(data_gen=partial(generate_input, "y"))
},
outputs=["reshape2_output"])
return program_config
def sample_predictor_configs(self, program_config):
# map_matmul_v2_to_matmul_pass will affect the type of final fused op
fused_op = "matmul_v2"
input1_dim1 = program_config.inputs["input_data1"].shape[0]
input2_dim1 = program_config.inputs["input_data2"].shape[0]
input1_dim2 = program_config.inputs["input_data1"].shape[1]
input2_dim2 = program_config.inputs["input_data2"].shape[1]
if input1_dim1 == input2_dim1 and input1_dim2 == input2_dim2:
fused_op = "matmul"
config = self.create_inference_config(use_mkldnn=True)
yield config, [fused_op], (1e-5, 1e-5)
def test(self):
self.run_and_statis(
quant=False, passes=["matmul_v2_transpose_reshape_fuse_pass"])
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册