未验证 提交 6554cc10 编写于 作者: W wangguanqun 提交者: GitHub

add nansum api to math (#38137)

* add nansum api

* delete layerhelper

* add nansum to all and tensor_method_func

* update doc

* update doc

* update doc
上级 6982871d
...@@ -197,6 +197,7 @@ from .tensor.math import sqrt # noqa: F401 ...@@ -197,6 +197,7 @@ from .tensor.math import sqrt # noqa: F401
from .tensor.math import square # noqa: F401 from .tensor.math import square # noqa: F401
from .tensor.math import stanh # noqa: F401 from .tensor.math import stanh # noqa: F401
from .tensor.math import sum # noqa: F401 from .tensor.math import sum # noqa: F401
from .tensor.math import nansum # noqa: F401
from .tensor.math import tanh # noqa: F401 from .tensor.math import tanh # noqa: F401
from .tensor.math import tanh_ # noqa: F401 from .tensor.math import tanh_ # noqa: F401
from .tensor.math import add_n # noqa: F401 from .tensor.math import add_n # noqa: F401
...@@ -524,6 +525,7 @@ __all__ = [ # noqa ...@@ -524,6 +525,7 @@ __all__ = [ # noqa
'ones', 'ones',
'not_equal', 'not_equal',
'sum', 'sum',
'nansum',
'tile', 'tile',
'greater_equal', 'greater_equal',
'isfinite', 'isfinite',
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
class API_Test_Nansum(unittest.TestCase):
def test_static_graph(self):
paddle.enable_static()
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input = fluid.data(name='input', dtype='float32', shape=[2, 4])
out1 = paddle.nansum(input)
out2 = paddle.nansum(input, axis=0)
out3 = paddle.nansum(input, axis=-1)
out4 = paddle.nansum(input, axis=1, keepdim=True)
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_program)
x = np.array([[float('nan'), 3, 5, 9],
[1, 2, float('-nan'), 7]]).astype(np.float32)
res = exe.run(train_program,
feed={'input': x},
fetch_list=[out1, out2, out3, out4])
out1_np = np.array(res[0])
out2_np = np.array(res[1])
out3_np = np.array(res[2])
out4_np = np.array(res[3])
out1_ref = np.array([27]).astype(np.float32)
out2_ref = np.array([1, 5, 5, 16]).astype(np.float32)
out3_ref = np.array([17, 10]).astype(np.float32)
out4_ref = np.array([[17], [10]]).astype(np.float32)
self.assertTrue(
(out1_np == out1_ref).all(),
msg='nansum output is wrong, out =' + str(out1_np))
self.assertTrue(
(out2_np == out2_ref).all(),
msg='nansum output is wrong, out =' + str(out2_np))
self.assertTrue(
(out3_np == out3_ref).all(),
msg='nansum output is wrong, out =' + str(out3_np))
self.assertTrue(
(out4_np == out4_ref).all(),
msg='nansum output is wrong, out =' + str(out4_np))
def test_error_api(self):
paddle.enable_static()
## input dtype error
def run1():
input = fluid.data(name='input', dtype='float16', shape=[2, 3])
output = paddle.nansum(input)
self.assertRaises(TypeError, run1)
## axis type error
def run2():
input = fluid.data(name='input', dtype='float16', shape=[2, 3])
output = paddle.nansum(input, axis=1.2)
self.assertRaises(TypeError, run2)
def test_dygraph(self):
x = np.array([[float('nan'), 3, 5, 9],
[1, 2, float('-nan'), 7]]).astype(np.float32)
with fluid.dygraph.guard():
inputs = fluid.dygraph.to_variable(x)
out = paddle.nansum(inputs)
out_ref = np.array([27]).astype(np.float32)
self.assertTrue(
(out.numpy() == out_ref).all(),
msg='nansum output is wrong, out =' + str(out.numpy()))
if __name__ == "__main__":
unittest.main()
...@@ -157,6 +157,7 @@ from .math import sqrt_ # noqa: F401 ...@@ -157,6 +157,7 @@ from .math import sqrt_ # noqa: F401
from .math import square # noqa: F401 from .math import square # noqa: F401
from .math import stanh # noqa: F401 from .math import stanh # noqa: F401
from .math import sum # noqa: F401 from .math import sum # noqa: F401
from .math import nansum # noqa: F401
from .math import tanh # noqa: F401 from .math import tanh # noqa: F401
from .math import tanh_ # noqa: F401 from .math import tanh_ # noqa: F401
from .math import add_n # noqa: F401 from .math import add_n # noqa: F401
...@@ -315,6 +316,7 @@ tensor_method_func = [ #noqa ...@@ -315,6 +316,7 @@ tensor_method_func = [ #noqa
'square', 'square',
'stanh', 'stanh',
'sum', 'sum',
'nansum',
'tanh', 'tanh',
'tanh_', 'tanh_',
'add_n', 'add_n',
......
...@@ -905,6 +905,66 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): ...@@ -905,6 +905,66 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
return out return out
def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
"""
Computes the sum of tensor elements over the given axis, treating Not a Numbers (NaNs) as zero.
Args:
x (Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64.
axis (int|list|tuple, optional): The dimensions along which the nansum is performed. If
:attr:`None`, nansum all elements of :attr:`x` and return a
Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype
of output is the same as input Tensor `x`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result Tensor will have one fewer dimension
than the :attr:`x` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: Results of summation operation on the specified axis of input Tensor `x`,
Examples:
.. code-block:: python
import paddle
import numpy as np
# x is a Tensor with following elements:
# [[nan, 0.3, 0.5, 0.9]
# [0.1, 0.2, -nan, 0.7]]
# Each example is followed by the corresponding output tensor.
x = np.array([[float('nan'), 0.3, 0.5, 0.9],
[0.1, 0.2, float('-nan'), 0.7]]).astype(np.float32)
x = paddle.to_tensor(x)
out1 = paddle.nansum(x) # [2.7]
out2 = paddle.nansum(x, axis=0) # [0.1, 0.5, 0.5, 1.6]
out3 = paddle.nansum(x, axis=-1) # [1.7, 1.0]
out4 = paddle.nansum(x, axis=1, keepdim=True) # [[1.7], [1.0]]
# y is a Tensor with shape [2, 2, 2] and elements as below:
# [[[1, nan], [3, 4]],
# [[5, 6], [-nan, 8]]]
# Each example is followed by the corresponding output tensor.
y = np.array([[[1, float('nan')], [3, 4]],
[[5, 6], [float('-nan'), 8]]])
y = paddle.to_tensor(y)
out5 = paddle.nansum(y, axis=[1, 2]) # [8, 19]
out6 = paddle.nansum(y, axis=[0, 1]) # [9, 18]
"""
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'nansum')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'nansum')
zero_tensor = paddle.zeros_like(x)
tmp_tensor = paddle.where(isnan(x), zero_tensor, x)
return sum(tmp_tensor, axis, dtype, keepdim, name)
@templatedoc(op_type="sum") @templatedoc(op_type="sum")
def add_n(inputs, name=None): def add_n(inputs, name=None):
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册