From 30e4cacd7c04bc3bfff2cbfebc7634f3f29362c4 Mon Sep 17 00:00:00 2001 From: wawltor Date: Tue, 21 Apr 2020 14:06:30 +0800 Subject: [PATCH] Add the support dygraph attribute of op mm, support the out attribute chery-pick from the pr#23978 --- paddle/fluid/pybind/op_function_generator.cc | 3 +- python/paddle/fluid/dygraph/nn.py | 5 +-- .../fluid/tests/unittests/test_matmul_op.py | 36 +++++++++++++++++++ python/paddle/tensor/linalg.py | 8 +++-- python/paddle/tensor/math.py | 7 ++-- 5 files changed, 51 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/pybind/op_function_generator.cc b/paddle/fluid/pybind/op_function_generator.cc index da30a12c184..62f1de35ae6 100644 --- a/paddle/fluid/pybind/op_function_generator.cc +++ b/paddle/fluid/pybind/op_function_generator.cc @@ -37,7 +37,8 @@ std::map> op_passing_out_map = { {"momentum", {"ParamOut", "VelocityOut"}}, {"batch_norm", {"MeanOut", "VarianceOut"}}, {"accuracy", {"Correct", "Total"}}, - {"fill_constant", {"Out"}}}; + {"fill_constant", {"Out"}}, + {"matmul", {"Out"}}}; // clang-format off const char* OUT_INITIALIZER_TEMPLATE = diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 6b617f04681..8f6b0a7d5a3 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -936,8 +936,9 @@ class Linear(layers.Layer): def forward(self, input): if in_dygraph_mode(): - pre_bias = core.ops.matmul(input, self.weight, 'transpose_X', False, - 'transpose_Y', False, "alpha", 1) + pre_bias = _varbase_creator(dtype=input.dtype) + core.ops.matmul(input, self.weight, pre_bias, 'transpose_X', False, + 'transpose_Y', False, "alpha", 1) pre_act = dygraph_utils._append_bias_in_dygraph( pre_bias, self.bias, axis=len(input.shape) - 1) diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index 76199abb310..018b5c62862 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -280,6 +280,42 @@ class API_TestMm(unittest.TestCase): "two value is\ {}\n{}, check diff!".format(np_res, expected_result)) + def test_dygraph_with_out(self): + device = fluid.CPUPlace() + with fluid.dygraph.guard(device): + input_array1 = np.random.rand(3, 4).astype("float64") + input_array2 = np.random.rand(4, 3).astype("float64") + out_array = np.random.rand(3, 3).astype("float64") + data1 = fluid.dygraph.to_variable(input_array1) + data2 = fluid.dygraph.to_variable(input_array2) + paddle_out_holder = fluid.dygraph.to_variable(out_array) + out = paddle.mm(data1, data2, out=paddle_out_holder) + self.assertTrue(np.allclose(paddle_out_holder.numpy(), out.numpy())) + + def test_dygraph_without_out(self): + device = fluid.CPUPlace() + with fluid.dygraph.guard(device): + input_array1 = np.random.rand(3, 4).astype("float64") + input_array2 = np.random.rand(4, 3).astype("float64") + data1 = fluid.dygraph.to_variable(input_array1) + data2 = fluid.dygraph.to_variable(input_array2) + out = paddle.mm(data1, data2) + expected_result = np.matmul(input_array1, input_array2) + self.assertTrue(np.allclose(expected_result, out.numpy())) + + +class Test_API_Matmul(unittest.TestCase): + def test_dygraph_without_out(self): + device = fluid.CPUPlace() + with fluid.dygraph.guard(device): + input_array1 = np.random.rand(3, 4).astype("float64") + input_array2 = np.random.rand(4, 3).astype("float64") + data1 = fluid.dygraph.to_variable(input_array1) + data2 = fluid.dygraph.to_variable(input_array2) + out = paddle.matmul(data1, data2) + expected_result = np.matmul(input_array1, input_array2) + self.assertTrue(np.allclose(expected_result, out.numpy())) + class API_TestMmError(unittest.TestCase): def test_errors(self): diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 05ab142a902..aedc88db156 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -14,7 +14,7 @@ from paddle.common_ops_import import * from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype, check_type -from ..fluid.framework import in_dygraph_mode +from ..fluid.framework import in_dygraph_mode, _varbase_creator __all__ = [ 'matmul', @@ -109,8 +109,10 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): } if in_dygraph_mode(): - return core.ops.matmul(x, y, 'transpose_X', transpose_x, 'transpose_Y', - transpose_y, 'alpha', float(alpha)) + out = _varbase_creator(dtype=x.dtype) + core.ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y', + transpose_y, 'alpha', float(alpha)) + return out def __check_input(x, y): var_names = {'x': x, 'y': y} diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 4c9bb0f0a82..ac9a56a90fe 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -19,7 +19,7 @@ from __future__ import print_function from paddle.common_ops_import import * from ..fluid import layers -from ..fluid.framework import core +from ..fluid.framework import core, _varbase_creator from ..fluid.layers.layer_function_generator import _generate_doc_string_ # TODO: define math functions @@ -902,7 +902,10 @@ def mm(input, mat2, out=None, name=None): out = paddle.mm(x, mat2) # out shape is [2, 2] """ if in_dygraph_mode(): - return core.ops.matmul(input, mat2) + if out is None: + out = _varbase_creator(dtype=input.dtype) + core.ops.matmul(input, mat2, out) + return out def __check_input(x, y): var_names = {'x': x, 'y': y} -- GitLab