未验证 提交 72cb09e3 编写于 作者: Z zhiboniu 提交者: GitHub

add logaddexp api (#52284)

* add logaddexp

* update
上级 9682b04e
......@@ -258,6 +258,7 @@ from .tensor.math import renorm # noqa: F401
from .tensor.math import add # noqa: F401
from .tensor.math import subtract # noqa: F401
from .tensor.math import logsumexp # noqa: F401
from .tensor.math import logaddexp # noqa: F401
from .tensor.math import inverse # noqa: F401
from .tensor.math import log1p # noqa: F401
from .tensor.math import erf # noqa: F401
......@@ -437,6 +438,7 @@ __all__ = [ # noqa
'eye',
'cumsum',
'cumprod',
'logaddexp',
'logcumsumexp',
'logit',
'LazyGuard',
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
def ref_logaddexp_old(x, y):
y = np.broadcast_to(y, x.shape)
out = np.log1p(np.exp(-np.absolute(x - y))) + np.maximum(x, y)
return out
def ref_logaddexp(x, y):
return np.logaddexp(x, y)
class TestLogsumexpAPI(unittest.TestCase):
def setUp(self):
self.place = (
paddle.CUDAPlace(0)
if paddle.fluid.core.is_compiled_with_cuda()
else paddle.CPUPlace()
)
def api_case(self):
self.x = np.random.uniform(-1, 1, self.xshape).astype(self.dtype)
self.y = np.random.uniform(-1, 1, self.yshape).astype(self.dtype)
out_ref = ref_logaddexp(self.x, self.y)
# paddle.disable_static(self.place)
x = paddle.to_tensor(self.x)
y = paddle.to_tensor(self.y)
out = paddle.logaddexp(x, y)
np.testing.assert_allclose(out.numpy(), out_ref, atol=1e-06)
def test_api(self):
self.xshape = [1, 2, 3, 4]
self.yshape = [1, 2, 3, 4]
self.dtype = np.float64
self.api_case()
def test_api_broadcast(self):
self.xshape = [1, 2, 3, 4]
self.yshape = [1, 2, 3, 1]
self.dtype = np.float32
self.api_case()
def test_api_bigdata(self):
self.xshape = [10, 200, 300]
self.yshape = [10, 200, 300]
self.dtype = np.float32
self.api_case()
if __name__ == '__main__':
unittest.main()
......@@ -201,6 +201,7 @@ from .math import subtract # noqa: F401
from .math import subtract_ # noqa: F401
from .math import atan2 # noqa: F401
from .math import logsumexp # noqa: F401
from .math import logaddexp # noqa: F401
from .math import inverse # noqa: F401
from .math import log2 # noqa: F401
from .math import log10 # noqa: F401
......@@ -341,6 +342,7 @@ tensor_method_func = [ # noqa
'floor',
'floor_',
'increment',
'logaddexp',
'log',
'log2',
'log10',
......
......@@ -636,6 +636,64 @@ def add_(x, y, name=None):
return _C_ops.add_(x, y)
def logaddexp(x, y, name=None):
"""
Elementwise LogAddExp Operator.
Add of exponentiations of the inputs
The equation is:
.. math::
Out=log(X.exp()+Y.exp())
$X$ the tensor of any dimension.
$Y$ the tensor whose dimensions must be less than or equal to the dimensions of $X$.
There are two cases for this operator:
1. The shape of $Y$ is the same with $X$.
2. The shape of $Y$ is a continuous subsequence of $X$.
For case 2:
1. Broadcast $Y$ to match the shape of $X$, where axis is the start dimension index for broadcasting $Y$ onto $X$.
2. If $axis$ is -1 (default), $axis$=rank($X$)-rank($Y$).
3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of subsequence, such as shape($Y$) = (2, 1) => (2).
For example:
.. code-block:: python
shape(X) = (2, 3, 4, 5), shape(Y) = (,)
shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
Args:
x (Tensor): Tensor or LoDTensor of any dimensions. Its dtype should be float32, float64, float16.
y (Tensor): Tensor or LoDTensor of any dimensions. Its dtype should be float32, float64, float16.
name (string, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
N-D Tensor. A location into which the result is stored. It's dimension equals with x.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-1, -2, -3], 'float64')
y = paddle.to_tensor([-1], 'float64')
z = paddle.logaddexp(x, y)
print(z) # [-0.30685282, -0.68673831, -0.87307199]
"""
return paddle.log1p(paddle.exp(-paddle.abs(x - y))) + paddle.maximum(x, y)
def subtract(x, y, name=None):
"""
Substract two tensors element-wise. The equation is:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册