From 9cda9ec24dfe10e03b1d11dc89ba5b5c0156b3c3 Mon Sep 17 00:00:00 2001 From: levi131 <83750468+levi131@users.noreply.github.com> Date: Wed, 9 Jun 2021 21:34:28 +0800 Subject: [PATCH] Add API paddle.neg() and paddle.lgamma(), along with some unittests for paddle.neg(). (#33248) * add paddle.neg api * add test for neg * fix an English gammar error in comment * add lgamma api * support api paddle.tensor.neg() and paddle.tensor.lgamma() * modify test_neg_op.py --- python/paddle/__init__.py | 4 + python/paddle/fluid/layers/ops.py | 14 +++ .../fluid/tests/unittests/test_neg_op.py | 91 +++++++++++++++++++ python/paddle/tensor/__init__.py | 4 + python/paddle/tensor/math.py | 25 +++++ 5 files changed, 138 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/test_neg_op.py diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 72d6f9562f1..b0f0f326bd7 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -207,6 +207,8 @@ from .tensor.math import isnan # noqa: F401 from .tensor.math import prod # noqa: F401 from .tensor.math import broadcast_shape # noqa: F401 from .tensor.math import conj # noqa: F401 +from .tensor.math import neg # noqa: F401 +from .tensor.math import lgamma # noqa: F401 from .tensor.random import multinomial # noqa: F401 from .tensor.random import standard_normal # noqa: F401 @@ -424,6 +426,8 @@ __all__ = [ #noqa 'prod', 'broadcast_shape', 'conj', + 'neg', + 'lgamma', 'square', 'divide', 'ceil', diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 813f671e020..eee4bbbb1d5 100755 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -53,6 +53,7 @@ __unary_func__ = [ 'round', 'reciprocal', 'square', + 'lgamma', ] __inplace_unary_func__ = [ @@ -396,6 +397,19 @@ Examples: """) +add_sample_code(globals()["lgamma"], r""" +Examples: + .. code-block:: python + + import paddle + + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.lgamma(x) + print(out) + # [1.31452441, 1.76149750, 2.25271273, 1.09579802] + +""") + add_sample_code(globals()["softplus"], r""" Examples: .. code-block:: python diff --git a/python/paddle/fluid/tests/unittests/test_neg_op.py b/python/paddle/fluid/tests/unittests/test_neg_op.py new file mode 100644 index 00000000000..e7b16bde023 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_neg_op.py @@ -0,0 +1,91 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import paddle + + +class TestNegOp(unittest.TestCase): + def setUp(self): + self.init_dtype_type() + self.input = (np.random.random((32, 8)) * 100).astype(self.dtype) + + def init_dtype_type(self): + self.dtype = np.float64 + + def run_imperative(self): + input = paddle.to_tensor(self.input) + dy_result = paddle.neg(input) + expected_result = np.negative(self.input) + self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) + + def run_static(self, use_gpu=False): + input = paddle.fluid.data(name='input', shape=[32, 8], dtype=self.dtype) + result = paddle.neg(input) + + place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(paddle.static.default_startup_program()) + st_result = exe.run(feed={"input": self.input}, fetch_list=[result]) + expected_result = np.negative(self.input) + self.assertTrue(np.allclose(st_result[0], expected_result)) + + def test_cpu(self): + paddle.disable_static(place=paddle.CPUPlace()) + self.run_imperative() + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program()): + self.run_static() + + def test_gpu(self): + if not paddle.fluid.core.is_compiled_with_cuda(): + return + + paddle.disable_static(place=paddle.CUDAPlace(0)) + self.run_imperative() + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program()): + self.run_static(use_gpu=True) + + +class TestNegOpFp32(TestNegOp): + def init_dtype_type(self): + self.dtype = np.float32 + + +class TestNegOpInt64(TestNegOp): + def init_dtype_type(self): + self.dtype = np.int64 + + +class TestNegOpInt32(TestNegOp): + def init_dtype_type(self): + self.dtype = np.int32 + + +class TestNegOpInt16(TestNegOp): + def init_dtype_type(self): + self.dtype = np.int16 + + +class TestNegOpInt8(TestNegOp): + def init_dtype_type(self): + self.dtype = np.int8 + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 6a75c8e78bc..596cd926231 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -164,6 +164,8 @@ from .math import all # noqa: F401 from .math import any # noqa: F401 from .math import broadcast_shape # noqa: F401 from .math import conj # noqa: F401 +from .math import neg # noqa: F401 +from .math import lgamma # noqa: F401 from .random import multinomial # noqa: F401 from .random import standard_normal # noqa: F401 @@ -281,6 +283,8 @@ tensor_method_func = [ #noqa 'isnan', 'broadcast_shape', 'conj', + 'neg', + 'lgamma', 'equal', 'equal_all', 'greater_equal', diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 2f69946c521..652c7c41fb8 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -62,6 +62,7 @@ from ..fluid.layers import erf # noqa: F401 from ..fluid.layers import sqrt # noqa: F401 from ..fluid.layers import sqrt_ # noqa: F401 from ..fluid.layers import sin # noqa: F401 +from ..fluid.layers import lgamma # noqa: F401 from ..fluid.layers import multiplex # noqa: F401 from ..fluid import layers @@ -2280,3 +2281,27 @@ def conj(x, name=None): helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]}) return out + +def neg(x, name=None): + """ + This function computes the negative of the Tensor elementwisely. + + Args: + x (Tensor): Input of neg operator, an N-D Tensor, with data type float32, float64, int8, int16, int32, or int64. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Returns: + out (Tensor): The negative of input Tensor. The shape and data type are the same with input Tensor. + + Examples: + .. code-block:: python + + import paddle + + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.neg(x) + print(out) + # [0.4 0.2 -0.1 -0.3] + """ + + return layers.scale(x, scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name) -- GitLab