未验证 提交 f5fac6fd 编写于 作者: L Liufang Sang 提交者: GitHub

add paddle.std api (#23825)

* add paddle.std api test=develop

* update  test=develop

* fix example code format test=develop
上级 f0e743f1
...@@ -63,7 +63,7 @@ from .tensor.creation import full_like #DEFINE_ALIAS ...@@ -63,7 +63,7 @@ from .tensor.creation import full_like #DEFINE_ALIAS
from .tensor.creation import meshgrid #DEFINE_ALIAS from .tensor.creation import meshgrid #DEFINE_ALIAS
# from .tensor.stat import mean #DEFINE_ALIAS # from .tensor.stat import mean #DEFINE_ALIAS
# from .tensor.stat import reduce_mean #DEFINE_ALIAS # from .tensor.stat import reduce_mean #DEFINE_ALIAS
# from .tensor.stat import std #DEFINE_ALIAS from .tensor.stat import std #DEFINE_ALIAS
from .tensor.stat import var #DEFINE_ALIAS from .tensor.stat import var #DEFINE_ALIAS
from .tensor.logic import equal #DEFINE_ALIAS from .tensor.logic import equal #DEFINE_ALIAS
# from .tensor.logic import greater_equal #DEFINE_ALIAS # from .tensor.logic import greater_equal #DEFINE_ALIAS
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
class TestStdLayer(unittest.TestCase):
def setUp(self):
self._dtype = "float64"
self._input = np.random.random([2, 3, 4, 5]).astype(self._dtype)
def static(self, axis=None, keepdim=False, unbiased=True):
prog = fluid.Program()
with fluid.program_guard(prog):
data = fluid.data(
name="data", dtype=self._dtype, shape=[None, 3, 4, 5])
out = prog.current_block().create_var(
dtype=self._dtype, shape=[2, 3, 4, 5])
paddle.std(input=data,
axis=axis,
keepdim=keepdim,
unbiased=unbiased,
out=out)
exe = fluid.Executor(self._place)
return exe.run(feed={"data": self._input},
program=prog,
fetch_list=[out])[0]
def dynamic(self, axis=None, keepdim=False, unbiased=True):
with fluid.dygraph.guard(self._place):
data = fluid.dygraph.to_variable(self._input)
out = paddle.std(input=data,
axis=axis,
keepdim=keepdim,
unbiased=unbiased)
return out.numpy()
def numpy(self, axis=None, keepdim=False, unbiased=True):
ddof = 1 if unbiased else 0
axis = tuple(axis) if isinstance(axis, list) else axis
return np.std(self._input, axis=axis, keepdims=keepdim, ddof=ddof)
def test_equal(self):
places = []
if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
self._place = place
self.assertTrue(np.allclose(self.numpy(), self.static()))
self.assertTrue(
np.allclose(
self.numpy(axis=[0, 2]), self.dynamic(axis=[0, 2])))
self.assertTrue(
np.allclose(
self.numpy(
axis=[1, 3], keepdim=True),
self.dynamic(
axis=[1, 3], keepdim=True)))
self.assertTrue(
np.allclose(
self.numpy(unbiased=False), self.dynamic(unbiased=False)))
if __name__ == '__main__':
unittest.main()
...@@ -42,7 +42,7 @@ from .creation import tril #DEFINE_ALIAS ...@@ -42,7 +42,7 @@ from .creation import tril #DEFINE_ALIAS
from .creation import meshgrid #DEFINE_ALIAS from .creation import meshgrid #DEFINE_ALIAS
# from .stat import mean #DEFINE_ALIAS # from .stat import mean #DEFINE_ALIAS
# from .stat import reduce_mean #DEFINE_ALIAS # from .stat import reduce_mean #DEFINE_ALIAS
# from .stat import std #DEFINE_ALIAS from .stat import std #DEFINE_ALIAS
# from .stat import var #DEFINE_ALIAS # from .stat import var #DEFINE_ALIAS
from .logic import equal #DEFINE_ALIAS from .logic import equal #DEFINE_ALIAS
# from .logic import greater_equal #DEFINE_ALIAS # from .logic import greater_equal #DEFINE_ALIAS
......
...@@ -15,16 +15,15 @@ ...@@ -15,16 +15,15 @@
# TODO: define statistical functions of a tensor # TODO: define statistical functions of a tensor
__all__ = [ #'mean', __all__ = [ #'mean',
#'reduce_mean', #'reduce_mean',
#'std', 'std',
'var' 'var'
] ]
import numpy as np import numpy as np
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import in_dygraph_mode from ..fluid.framework import in_dygraph_mode
from ..fluid import layers from ..fluid import layers
from .search import where from .search import where
from ..fluid.data_feeder import convert_dtype from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None): def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
...@@ -101,3 +100,59 @@ def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None): ...@@ -101,3 +100,59 @@ def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
return out return out
else: else:
return tmp return tmp
def std(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
"""
Computes the standard-deviation of the input Variable's elements along the specified
axis.
Args:
input (Variable): The input Variable to be computed standard-deviation, with data
type float32 and float64 supported.
axis (list|int, optional): The axis along which the standard-deviation is computed.
If `None`, compute the standard-deviation over all elements of :attr:`input`
and return a Variable with a single element, otherwise it must be in
the range :math:`[-rank(input), rank(input))`. If :math:`axis[i] < 0`,
the axis to compute is :math:`rank(input) + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimensions in
the output Variable. The dimensions in :attr:`axis` will be squeezed
and the result Variable will have :attr:`len(axis)` fewer dimensions
than the :attr:`input` unless :attr:`keepdim` is true, default False.
unbiased (bool, optional): Whether to compute standard-deviation via the unbiased
estimator, in which the divisor used in the computation is
:math:`N - 1`, where :math:`N` represents the number of elements
along :attr:`axis`, otherwise the divisor is :math:`N`. Default True.
out (Variable, optional): Alternate output Variable to store the result
standard-deviation . Default None.
name (str, optional): The name for this layer. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`. Default None.
Returns:
Variable: The result standard-deviation with the same dtype as :attr:`input`.
If :attr:`out = None`, returns a new Variable containing the
standard-deviation , otherwise returns a reference to the output Variable.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
paddle.std(x) # [0.28252685]
paddle.std(x, axis=[0]) # [0.0707107, 0.07071075, 0.07071064, 0.1414217]
paddle.std(x, axis=[-1]) # [0.30956957, 0.29439208]
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'std')
tmp = var(input, axis=axis, keepdim=keepdim, unbiased=unbiased, name=name)
tmp = layers.sqrt(tmp)
if out:
layers.assign(input=tmp, output=out)
return out
else:
return tmp
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册