未验证 提交 9cda9ec2 编写于 作者: L levi131 提交者: GitHub

Add API paddle.neg() and paddle.lgamma(), along with some unittests for paddle.neg(). (#33248)

* add paddle.neg api

* add test for neg

* fix an English gammar error in comment

* add lgamma api

* support api paddle.tensor.neg() and paddle.tensor.lgamma()

* modify test_neg_op.py
上级 2b56b1b0
......@@ -207,6 +207,8 @@ from .tensor.math import isnan # noqa: F401
from .tensor.math import prod # noqa: F401
from .tensor.math import broadcast_shape # noqa: F401
from .tensor.math import conj # noqa: F401
from .tensor.math import neg # noqa: F401
from .tensor.math import lgamma # noqa: F401
from .tensor.random import multinomial # noqa: F401
from .tensor.random import standard_normal # noqa: F401
......@@ -424,6 +426,8 @@ __all__ = [ #noqa
'prod',
'broadcast_shape',
'conj',
'neg',
'lgamma',
'square',
'divide',
'ceil',
......
......@@ -53,6 +53,7 @@ __unary_func__ = [
'round',
'reciprocal',
'square',
'lgamma',
]
__inplace_unary_func__ = [
......@@ -396,6 +397,19 @@ Examples:
""")
add_sample_code(globals()["lgamma"], r"""
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.lgamma(x)
print(out)
# [1.31452441, 1.76149750, 2.25271273, 1.09579802]
""")
add_sample_code(globals()["softplus"], r"""
Examples:
.. code-block:: python
......
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
class TestNegOp(unittest.TestCase):
def setUp(self):
self.init_dtype_type()
self.input = (np.random.random((32, 8)) * 100).astype(self.dtype)
def init_dtype_type(self):
self.dtype = np.float64
def run_imperative(self):
input = paddle.to_tensor(self.input)
dy_result = paddle.neg(input)
expected_result = np.negative(self.input)
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))
def run_static(self, use_gpu=False):
input = paddle.fluid.data(name='input', shape=[32, 8], dtype=self.dtype)
result = paddle.neg(input)
place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
st_result = exe.run(feed={"input": self.input}, fetch_list=[result])
expected_result = np.negative(self.input)
self.assertTrue(np.allclose(st_result[0], expected_result))
def test_cpu(self):
paddle.disable_static(place=paddle.CPUPlace())
self.run_imperative()
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
self.run_static()
def test_gpu(self):
if not paddle.fluid.core.is_compiled_with_cuda():
return
paddle.disable_static(place=paddle.CUDAPlace(0))
self.run_imperative()
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
self.run_static(use_gpu=True)
class TestNegOpFp32(TestNegOp):
def init_dtype_type(self):
self.dtype = np.float32
class TestNegOpInt64(TestNegOp):
def init_dtype_type(self):
self.dtype = np.int64
class TestNegOpInt32(TestNegOp):
def init_dtype_type(self):
self.dtype = np.int32
class TestNegOpInt16(TestNegOp):
def init_dtype_type(self):
self.dtype = np.int16
class TestNegOpInt8(TestNegOp):
def init_dtype_type(self):
self.dtype = np.int8
if __name__ == "__main__":
unittest.main()
......@@ -164,6 +164,8 @@ from .math import all # noqa: F401
from .math import any # noqa: F401
from .math import broadcast_shape # noqa: F401
from .math import conj # noqa: F401
from .math import neg # noqa: F401
from .math import lgamma # noqa: F401
from .random import multinomial # noqa: F401
from .random import standard_normal # noqa: F401
......@@ -281,6 +283,8 @@ tensor_method_func = [ #noqa
'isnan',
'broadcast_shape',
'conj',
'neg',
'lgamma',
'equal',
'equal_all',
'greater_equal',
......
......@@ -62,6 +62,7 @@ from ..fluid.layers import erf # noqa: F401
from ..fluid.layers import sqrt # noqa: F401
from ..fluid.layers import sqrt_ # noqa: F401
from ..fluid.layers import sin # noqa: F401
from ..fluid.layers import lgamma # noqa: F401
from ..fluid.layers import multiplex # noqa: F401
from ..fluid import layers
......@@ -2280,3 +2281,27 @@ def conj(x, name=None):
helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]})
return out
def neg(x, name=None):
"""
This function computes the negative of the Tensor elementwisely.
Args:
x (Tensor): Input of neg operator, an N-D Tensor, with data type float32, float64, int8, int16, int32, or int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): The negative of input Tensor. The shape and data type are the same with input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.neg(x)
print(out)
# [0.4 0.2 -0.1 -0.3]
"""
return layers.scale(x, scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册