diff --git a/paddle/fluid/pybind/op_function_generator.cc b/paddle/fluid/pybind/op_function_generator.cc index da30a12c1849dc0c8f7a1bdc782c7a1264cda2e7..62f1de35ae602b7c800a88de323e22d812bfeb4f 100644 --- a/paddle/fluid/pybind/op_function_generator.cc +++ b/paddle/fluid/pybind/op_function_generator.cc @@ -37,7 +37,8 @@ std::map> op_passing_out_map = { {"momentum", {"ParamOut", "VelocityOut"}}, {"batch_norm", {"MeanOut", "VarianceOut"}}, {"accuracy", {"Correct", "Total"}}, - {"fill_constant", {"Out"}}}; + {"fill_constant", {"Out"}}, + {"matmul", {"Out"}}}; // clang-format off const char* OUT_INITIALIZER_TEMPLATE = diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 6b617f04681c6be0ea1f7fadfd9cb2306456d15e..8f6b0a7d5a30ce84daf21fc8122f7e7439349a8f 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -936,8 +936,9 @@ class Linear(layers.Layer): def forward(self, input): if in_dygraph_mode(): - pre_bias = core.ops.matmul(input, self.weight, 'transpose_X', False, - 'transpose_Y', False, "alpha", 1) + pre_bias = _varbase_creator(dtype=input.dtype) + core.ops.matmul(input, self.weight, pre_bias, 'transpose_X', False, + 'transpose_Y', False, "alpha", 1) pre_act = dygraph_utils._append_bias_in_dygraph( pre_bias, self.bias, axis=len(input.shape) - 1) diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index 76199abb310a0f08db9d9c46107b4db180cd5e88..018b5c62862f075c4ce4243bf3c008bddcf416be 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -280,6 +280,42 @@ class API_TestMm(unittest.TestCase): "two value is\ {}\n{}, check diff!".format(np_res, expected_result)) + def test_dygraph_with_out(self): + device = fluid.CPUPlace() + with fluid.dygraph.guard(device): + input_array1 = np.random.rand(3, 4).astype("float64") + input_array2 = np.random.rand(4, 3).astype("float64") + out_array = np.random.rand(3, 3).astype("float64") + data1 = fluid.dygraph.to_variable(input_array1) + data2 = fluid.dygraph.to_variable(input_array2) + paddle_out_holder = fluid.dygraph.to_variable(out_array) + out = paddle.mm(data1, data2, out=paddle_out_holder) + self.assertTrue(np.allclose(paddle_out_holder.numpy(), out.numpy())) + + def test_dygraph_without_out(self): + device = fluid.CPUPlace() + with fluid.dygraph.guard(device): + input_array1 = np.random.rand(3, 4).astype("float64") + input_array2 = np.random.rand(4, 3).astype("float64") + data1 = fluid.dygraph.to_variable(input_array1) + data2 = fluid.dygraph.to_variable(input_array2) + out = paddle.mm(data1, data2) + expected_result = np.matmul(input_array1, input_array2) + self.assertTrue(np.allclose(expected_result, out.numpy())) + + +class Test_API_Matmul(unittest.TestCase): + def test_dygraph_without_out(self): + device = fluid.CPUPlace() + with fluid.dygraph.guard(device): + input_array1 = np.random.rand(3, 4).astype("float64") + input_array2 = np.random.rand(4, 3).astype("float64") + data1 = fluid.dygraph.to_variable(input_array1) + data2 = fluid.dygraph.to_variable(input_array2) + out = paddle.matmul(data1, data2) + expected_result = np.matmul(input_array1, input_array2) + self.assertTrue(np.allclose(expected_result, out.numpy())) + class API_TestMmError(unittest.TestCase): def test_errors(self): diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 05ab142a902d11741abb2d030492aef10df6979c..aedc88db156e34ee0a444cd9f950e78b3ba82125 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -14,7 +14,7 @@ from paddle.common_ops_import import * from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype, check_type -from ..fluid.framework import in_dygraph_mode +from ..fluid.framework import in_dygraph_mode, _varbase_creator __all__ = [ 'matmul', @@ -109,8 +109,10 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): } if in_dygraph_mode(): - return core.ops.matmul(x, y, 'transpose_X', transpose_x, 'transpose_Y', - transpose_y, 'alpha', float(alpha)) + out = _varbase_creator(dtype=x.dtype) + core.ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y', + transpose_y, 'alpha', float(alpha)) + return out def __check_input(x, y): var_names = {'x': x, 'y': y} diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 4c9bb0f0a8273ea147152c3a99b3c6bc2321a2dc..ac9a56a90fef1a1facda4980ce6d8230315da4f8 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -19,7 +19,7 @@ from __future__ import print_function from paddle.common_ops_import import * from ..fluid import layers -from ..fluid.framework import core +from ..fluid.framework import core, _varbase_creator from ..fluid.layers.layer_function_generator import _generate_doc_string_ # TODO: define math functions @@ -902,7 +902,10 @@ def mm(input, mat2, out=None, name=None): out = paddle.mm(x, mat2) # out shape is [2, 2] """ if in_dygraph_mode(): - return core.ops.matmul(input, mat2) + if out is None: + out = _varbase_creator(dtype=input.dtype) + core.ops.matmul(input, mat2, out) + return out def __check_input(x, y): var_names = {'x': x, 'y': y}