diff --git a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc index af80666b9542db1073f3b07618433671652fffa4..8c2e62bed195f27e228d5dd460ba21ed87c3f5d2 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc @@ -25,14 +25,14 @@ class ElementwiseModOpMaker : public ElementwiseOpMaker { void AddInputX() override { AddInput("X", - "(Variable), Tensor or LoDTensor of any dimensions. Its dtype " - "should be int32, int64."); + "(Tensor), Tensor of any dimensions. Its dtype " + "should be int32, int64, float32 or float64."); } void AddInputY() override { AddInput("Y", - "(Variable), Tensor or LoDTensor of any dimensions. Its dtype " - "should be int32, int64."); + "(Tensor), Tensor of any dimensions. Its dtype " + "should be int32, int64, float32 or float64."); } std::string GetOpFuntionality() const override { diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 8153589e0c5fc2ae4329390bb129d7ac7b49f9b2..48f61027bbdf60aad47d25fe88a88f3f65675764 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -176,7 +176,11 @@ from .tensor.math import maximum #DEFINE_ALIAS from .tensor.math import min #DEFINE_ALIAS from .tensor.math import minimum #DEFINE_ALIAS from .tensor.math import mm #DEFINE_ALIAS -from .tensor.math import div #DEFINE_ALIAS +from .tensor.math import divide #DEFINE_ALIAS +from .tensor.math import floor_divide #DEFINE_ALIAS +from .tensor.math import remainder #DEFINE_ALIAS +from .tensor.math import mod #DEFINE_ALIAS +from .tensor.math import floor_mod #DEFINE_ALIAS from .tensor.math import multiply #DEFINE_ALIAS from .tensor.math import add #DEFINE_ALIAS from .tensor.math import atan #DEFINE_ALIAS diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d33c1c3ef9ae7357ac6feb0362b0deada992f441..34e0850991ec0ae57d89ec91030e6bf6a29e2787 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11484,6 +11484,7 @@ Examples: return _elementwise_op(LayerHelper('elementwise_add', **locals())) +@deprecated(since="2.0.0", update_to="paddle.divide") def elementwise_div(x, y, axis=-1, act=None, name=None): """ :alias_main: paddle.elementwise_div @@ -11907,6 +11908,7 @@ Examples: return _elementwise_op(LayerHelper('elementwise_pow', **locals())) +@deprecated(since="2.0.0", update_to="paddle.remainder") def elementwise_mod(x, y, axis=-1, act=None, name=None): """ :alias_main: paddle.elementwise_mod @@ -11944,6 +11946,7 @@ Examples: return _elementwise_op(LayerHelper('elementwise_mod', **locals())) +@deprecated(since="2.0.0", update_to="paddle.floor_divide") def elementwise_floordiv(x, y, axis=-1, act=None, name=None): """ :alias_main: paddle.elementwise_floordiv diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index de0fc591b664728387ccb988f3611fe034989627..3cfbac8b613c125956861f73b1bab24c34e05572 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -240,22 +240,22 @@ class TestElementwiseDivBroadcast(unittest.TestCase): self.assertEqual((out_result == (2 / x)).all(), True) -class TestDivOp(unittest.TestCase): +class TestDivideOp(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): x = fluid.data(name="x", shape=[2, 3], dtype="float32") y = fluid.data(name='y', shape=[2, 3], dtype='float32') - y_1 = paddle.div(x, y, name='div_res') + y_1 = paddle.divide(x, y, name='div_res') self.assertEqual(('div_res' in y_1.name), True) def test_dygraph(self): with fluid.dygraph.guard(): np_x = np.array([2, 3, 4]).astype('float64') np_y = np.array([1, 5, 2]).astype('float64') - x = fluid.dygraph.to_variable(np_x) - y = fluid.dygraph.to_variable(np_y) - z = paddle.div(x, y) + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.divide(x, y) np_z = z.numpy() z_expected = np.array([2., 0.6, 2.]) self.assertEqual((np_z == z_expected).all(), True) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py index 104e896b6e440f5657a90e0ce741b49f72ba75c6..0c4e19c9816eb03812af252a78c3f2f87cfc153b 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py @@ -15,6 +15,8 @@ from __future__ import print_function import unittest import numpy as np +import paddle +import paddle.fluid as fluid import paddle.fluid.core as core from op_test import OpTest @@ -65,5 +67,26 @@ class TestElementwiseModOp_scalar(TestElementwiseModOp): self.out = np.floor_divide(self.x, self.y) +class TestFloorDivideOp(unittest.TestCase): + def test_name(self): + with fluid.program_guard(fluid.Program()): + x = fluid.data(name="x", shape=[2, 3], dtype="int64") + y = fluid.data(name='y', shape=[2, 3], dtype='int64') + + y_1 = paddle.floor_divide(x, y, name='div_res') + self.assertEqual(('div_res' in y_1.name), True) + + def test_dygraph(self): + with fluid.dygraph.guard(): + np_x = np.array([2, 3, 8, 7]).astype('int64') + np_y = np.array([1, 5, 3, 3]).astype('int64') + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.floor_divide(x, y) + np_z = z.numpy() + z_expected = np.array([2, 0, 2, 2]) + self.assertEqual((np_z == z_expected).all(), True) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py index 2c0fdf51769782e046b1b18ebd31782c81fd49f0..601076a1f49ab5603757496d858428847fedd75f 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py @@ -15,6 +15,8 @@ from __future__ import print_function import unittest import numpy as np +import paddle +import paddle.fluid as fluid import paddle.fluid.core as core from op_test import OpTest @@ -82,5 +84,26 @@ class TestElementwiseModOpDouble(TestElementwiseModOpFloat): self.dtype = np.float64 +class TestRemainderOp(unittest.TestCase): + def test_name(self): + with fluid.program_guard(fluid.Program()): + x = fluid.data(name="x", shape=[2, 3], dtype="int64") + y = fluid.data(name='y', shape=[2, 3], dtype='int64') + + y_1 = paddle.remainder(x, y, name='div_res') + self.assertEqual(('div_res' in y_1.name), True) + + def test_dygraph(self): + with fluid.dygraph.guard(): + np_x = np.array([2, 3, 8, 7]).astype('int64') + np_y = np.array([1, 5, 3, 3]).astype('int64') + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.remainder(x, y) + np_z = z.numpy() + z_expected = np.array([0, 3, 2, 1]) + self.assertEqual((np_z == z_expected).all(), True) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 7e23779846b9f560fa6204dd28b24263ecdb87ac..e0830505c050dbebe83baeb78aed7d8faef11fa9 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -144,7 +144,11 @@ from .math import maximum #DEFINE_ALIAS from .math import min #DEFINE_ALIAS from .math import minimum #DEFINE_ALIAS from .math import mm #DEFINE_ALIAS -from .math import div #DEFINE_ALIAS +from .math import divide #DEFINE_ALIAS +from .math import floor_divide #DEFINE_ALIAS +from .math import remainder #DEFINE_ALIAS +from .math import mod #DEFINE_ALIAS +from .math import floor_mod #DEFINE_ALIAS from .math import multiply #DEFINE_ALIAS from .math import add #DEFINE_ALIAS from .math import atan #DEFINE_ALIAS diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index a0fe0b62fc33e3139cf5c21578bbfb7490c08bf3..bb08333c2b9d45bc8c8e42c8f39c6c01237a2c51 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -111,7 +111,11 @@ __all__ = [ 'min', 'minimum', 'mm', - 'div', + 'divide', + 'floor_divide', + 'remainder', + 'mod', + 'floor_mod', 'multiply', 'add', 'atan', @@ -263,114 +267,143 @@ Examples: return _elementwise_op(LayerHelper(op_type, **locals())) -def div(x, y, name=None): +def divide(x, y, name=None): """ -Examples: + Divide two tensors element-wise. The equation is: - .. code-block:: python + .. math:: + out = x / y - import paddle - import paddle.fluid as fluid - import numpy as np + **Note**: + ``paddle.divide`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` . - def gen_data(): - return { - "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') - } + Args: + x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64. + y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') - z = paddle.div(x, y) - # z = x / y + Returns: + N-D Tensor. A location into which the result is stored. It's dimension equals with $x$. - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + Examples: - print(z_value) # [2., 0.6, 2.] + .. code-block:: python + import paddle + import numpy as np - .. code-block:: python + paddle.disable_static() - import paddle - import paddle.fluid as fluid - import numpy as np + np_x = np.array([2, 3, 4]).astype('float64') + np_y = np.array([1, 5, 2]).astype('float64') + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.divide(x, y) + print(z.numpy()) # [2., 0.6, 2.] - def gen_data(): - return { - "x": np.ones((2, 3, 4, 5)).astype('float32'), - "y": np.zeros((4, 5)).astype('float32') - } + """ + op_type = 'elementwise_div' + axis = -1 + act = None + if in_dygraph_mode(): + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type) - x = fluid.data(name="x", shape=[2, 3, 4, 5], dtype='float32') - y = fluid.data(name="y", shape=[4, 5], dtype='float32') - z = paddle.div(x, y, name='z') - # z = x / y + return _elementwise_op(LayerHelper(op_type, **locals())) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) +def floor_divide(x, y, name=None): + """ + Floor divide two tensors element-wise. The equation is: - print(z_value[0]) - print(z_value[0].shape) # z.shape=[2,3,4,5] + .. math:: + out = x // y + **Note**: + ``paddle.floor_divide`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` . - .. code-block:: python + Args: + x (Tensor): the input tensor, it's data type should be int32, int64. + y (Tensor): the input tensor, it's data type should be int32, int64. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - import paddle - import paddle.fluid as fluid - import numpy as np + Returns: + N-D Tensor. A location into which the result is stored. It's dimension equals with $x$. - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[5]).astype('float32') - } + Examples: - x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.data(name="y", shape=[5], dtype='float32') - z = paddle.div(x, y) - # z = x / y + .. code-block:: python - place = fluid.CPUPlace() - exe = fluid.Executor(place) + import paddle + import numpy as np - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value[0]) - print(z_value[0].shape) # z.shape=[2,3,4,5] + paddle.disable_static() + np_x = np.array([2, 3, 8, 7]) + np_y = np.array([1, 5, 3, 3]) + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.floor_divide(x, y) + print(z.numpy()) # [2, 0, 2, 2] - .. code-block:: python + """ + op_type = 'elementwise_floordiv' + axis = -1 + if in_dygraph_mode(): + return _elementwise_op_in_dygraph( + x, y, axis=axis, op_name=op_type) - import paddle - import paddle.fluid as fluid - import numpy as np + return _elementwise_op(LayerHelper(op_type, **locals())) - with fluid.dygraph.guard(fluid.CPUPlace()): - np_x = np.array([2, 3, 4]).astype('float64') - np_y = np.array([1, 5, 2]).astype('float64') - x = fluid.dygraph.to_variable(np_x) - y = fluid.dygraph.to_variable(np_y) - z = paddle.div(x, y) - np_z = z.numpy() - print(np_z) # [2., 0.6, 2.] +def remainder(x, y, name=None): """ - op_type = 'elementwise_div' + Mod two tensors element-wise. The equation is: + + .. math:: + out = x \% y + + **Note**: + ``paddle.remainder`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` . + + Args: + x (Tensor): the input tensor, it's data type should be int32, int64. + y (Tensor): the input tensor, it's data type should be int32, int64. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Returns: + N-D Tensor. A location into which the result is stored. It's dimension equals with $x$. + + Examples: + + .. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + np_x = np.array([2, 3, 8, 7]) + np_y = np.array([1, 5, 3, 3]) + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.remainder(x, y) + print(z.numpy()) # [0, 3, 2, 1] + + """ + op_type = 'elementwise_mod' axis = -1 - act = None if in_dygraph_mode(): return _elementwise_op_in_dygraph( - x, y, axis=axis, act=act, op_name=op_type) + x, y, axis=axis, op_name=op_type) return _elementwise_op(LayerHelper(op_type, **locals())) +mod = remainder #DEFINE_ALIAS +floor_mod = remainder #DEFINE_ALIAS + + def multiply(x, y, axis=-1, name=None): """ :alias_main: paddle.multiply @@ -512,7 +545,6 @@ Examples: for func in [ add, - div, maximum, minimum, multiply