diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 8f370019a5655e65eaa3a963beeab62ac559b6ae..4163d5ed955829b66956b39a7b26a6753ef0b367 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -139,7 +139,6 @@ from .tensor.math import elementwise_floordiv #DEFINE_ALIAS from .tensor.math import elementwise_max #DEFINE_ALIAS from .tensor.math import elementwise_min #DEFINE_ALIAS from .tensor.math import elementwise_mod #DEFINE_ALIAS -from .tensor.math import elementwise_mul #DEFINE_ALIAS from .tensor.math import elementwise_pow #DEFINE_ALIAS from .tensor.math import elementwise_sub #DEFINE_ALIAS from .tensor.math import exp #DEFINE_ALIAS @@ -169,6 +168,7 @@ from .tensor.math import max #DEFINE_ALIAS from .tensor.math import min #DEFINE_ALIAS from .tensor.math import mm #DEFINE_ALIAS from .tensor.math import div #DEFINE_ALIAS +from .tensor.math import multiply #DEFINE_ALIAS from .tensor.math import add #DEFINE_ALIAS from .tensor.math import atan #DEFINE_ALIAS from .tensor.math import logsumexp #DEFINE_ALIAS diff --git a/python/paddle/fluid/tests/unittests/test_multiply.py b/python/paddle/fluid/tests/unittests/test_multiply.py new file mode 100644 index 0000000000000000000000000000000000000000..64421f6a1c6a018fdf82a7518f647099830972b3 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_multiply.py @@ -0,0 +1,140 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import paddle +import paddle.tensor as tensor +import paddle.fluid as fluid +from paddle.fluid import Program, program_guard +import numpy as np +import unittest + + +class TestMultiplyAPI(unittest.TestCase): + """TestMultiplyAPI.""" + + def __run_static_graph_case(self, x_data, y_data, axis=-1): + with program_guard(Program(), Program()): + x = paddle.nn.data(name='x', shape=x_data.shape, dtype=x_data.dtype) + y = paddle.nn.data(name='y', shape=y_data.shape, dtype=y_data.dtype) + res = tensor.multiply(x, y, axis=axis) + + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + exe = fluid.Executor(place) + outs = exe.run(fluid.default_main_program(), + feed={'x': x_data, + 'y': y_data}, + fetch_list=[res]) + res = outs[0] + return res + + def __run_dynamic_graph_case(self, x_data, y_data, axis=-1): + paddle.enable_imperative() + x = paddle.imperative.to_variable(x_data) + y = paddle.imperative.to_variable(y_data) + res = paddle.multiply(x, y, axis=axis) + return res.numpy() + + def test_multiply(self): + """test_multiply.""" + np.random.seed(7) + # test static computation graph: 1-d array + x_data = np.random.rand(200) + y_data = np.random.rand(200) + res = self.__run_static_graph_case(x_data, y_data) + self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + + # test static computation graph: 2-d array + x_data = np.random.rand(2, 500) + y_data = np.random.rand(2, 500) + res = self.__run_static_graph_case(x_data, y_data) + self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + + # test static computation graph: broadcast + x_data = np.random.rand(2, 500) + y_data = np.random.rand(500) + res = self.__run_static_graph_case(x_data, y_data) + self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + + # test static computation graph: broadcast with axis + x_data = np.random.rand(2, 300, 40) + y_data = np.random.rand(300) + res = self.__run_static_graph_case(x_data, y_data, axis=1) + expected = np.multiply(x_data, y_data[..., np.newaxis]) + self.assertTrue(np.allclose(res, expected)) + + # test dynamic computation graph: 1-d array + x_data = np.random.rand(200) + y_data = np.random.rand(200) + res = self.__run_dynamic_graph_case(x_data, y_data) + self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + + # test dynamic computation graph: 2-d array + x_data = np.random.rand(20, 50) + y_data = np.random.rand(20, 50) + res = self.__run_dynamic_graph_case(x_data, y_data) + self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + + # test dynamic computation graph: broadcast + x_data = np.random.rand(2, 500) + y_data = np.random.rand(500) + res = self.__run_dynamic_graph_case(x_data, y_data) + self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + + # test dynamic computation graph: broadcast with axis + x_data = np.random.rand(2, 300, 40) + y_data = np.random.rand(300) + res = self.__run_dynamic_graph_case(x_data, y_data, axis=1) + expected = np.multiply(x_data, y_data[..., np.newaxis]) + self.assertTrue(np.allclose(res, expected)) + + +class TestMultiplyError(unittest.TestCase): + """TestMultiplyError.""" + + def test_errors(self): + """test_errors.""" + # test static computation graph: dtype can not be int8 + paddle.disable_imperative() + with program_guard(Program(), Program()): + x = paddle.nn.data(name='x', shape=[100], dtype=np.int8) + y = paddle.nn.data(name='y', shape=[100], dtype=np.int8) + self.assertRaises(TypeError, tensor.multiply, x, y) + + # test static computation graph: inputs must be broadcastable + with program_guard(Program(), Program()): + x = paddle.nn.data(name='x', shape=[20, 50], dtype=np.float64) + y = paddle.nn.data(name='y', shape=[20], dtype=np.float64) + self.assertRaises(fluid.core.EnforceNotMet, tensor.multiply, x, y) + + np.random.seed(7) + # test dynamic computation graph: dtype can not be int8 + paddle.enable_imperative() + x_data = np.random.randn(200).astype(np.int8) + y_data = np.random.randn(200).astype(np.int8) + x = paddle.imperative.to_variable(x_data) + y = paddle.imperative.to_variable(y_data) + self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y) + + # test dynamic computation graph: inputs must be broadcastable + x_data = np.random.rand(200, 5) + y_data = np.random.rand(200) + x = paddle.imperative.to_variable(x_data) + y = paddle.imperative.to_variable(y_data) + self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index f16404001eaf43fe5fb0e0f127e5439f83ce06f4..7a583e0c38dd620f700e95ba06d9e3ec41042fb0 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -112,7 +112,6 @@ from .math import elementwise_floordiv #DEFINE_ALIAS from .math import elementwise_max #DEFINE_ALIAS from .math import elementwise_min #DEFINE_ALIAS from .math import elementwise_mod #DEFINE_ALIAS -from .math import elementwise_mul #DEFINE_ALIAS from .math import elementwise_pow #DEFINE_ALIAS from .math import elementwise_sub #DEFINE_ALIAS from .math import exp #DEFINE_ALIAS @@ -142,6 +141,7 @@ from .math import max #DEFINE_ALIAS from .math import min #DEFINE_ALIAS from .math import mm #DEFINE_ALIAS from .math import div #DEFINE_ALIAS +from .math import multiply #DEFINE_ALIAS from .math import add #DEFINE_ALIAS from .math import atan #DEFINE_ALIAS from .math import logsumexp #DEFINE_ALIAS diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index b4a9c7a468e2f61c79082a746cb319975d99a441..89b6b4e47393a61b5a12c6730ff0e0aee1b8f81d 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -76,7 +76,6 @@ __all__ = [ 'elementwise_max', 'elementwise_min', 'elementwise_mod', - 'elementwise_mul', 'elementwise_pow', 'elementwise_sub', 'exp', @@ -107,6 +106,7 @@ __all__ = [ 'min', 'mm', 'div', + 'multiply', 'add', 'atan', 'logsumexp', @@ -580,11 +580,49 @@ Examples: return _elementwise_op(LayerHelper(op_type, **locals())) +def multiply(x, y, axis=-1, name=None): + """ + :alias_main: paddle.multiply + :alias: paddle.multiply,paddle.tensor.multiply,paddle.tensor.math.multiply + +Examples: + + .. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) + y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) + x = paddle.imperative.to_variable(x_data) + y = paddle.imperative.to_variable(y_data) + res = paddle.multiply(x, y) + print(res.numpy()) # [[5, 12], [21, 32]] + + x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) + y_data = np.array([1, 2], dtype=np.float32) + x = paddle.imperative.to_variable(x_data) + y = paddle.imperative.to_variable(y_data) + res = paddle.multiply(x, y, axis=1) + print(res.numpy()) # [[[1, 2, 3], [2, 4, 6]]] + + """ + op_type = 'elementwise_mul' + act = None + if in_dygraph_mode(): + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type) + + return _elementwise_op(LayerHelper(op_type, **locals())) + + for func in [ add, div, + multiply, ]: - proto_dict = {'add': 'elementwise_add', 'div': 'elementwise_div'} + proto_dict = {'add': 'elementwise_add', 'div': 'elementwise_div', 'multiply': 'elementwise_mul'} op_proto = OpProtoHolder.instance().get_op_proto(proto_dict[func.__name__]) if func.__name__ in ['add']: alias_main = ':alias_main: paddle.%(func)s' % {'func': func.__name__} @@ -601,9 +639,6 @@ for func in [ ] else: additional_args_lines = [ - "out (Variable, optinal): The Variable that stores results of the operation. If out is None, \ - a new Variable will be created to store the results." - , "name (string, optional): Name of the output. \ Default is None. It's used to print debug info for developers. Details: \ :ref:`api_guide_Name` "