diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3ac43df872e377e96f49d6852744febde219d69d..2feca60430dc04dd839b163c7469a6993570178a 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -9730,15 +9730,13 @@ def swish(x, beta=1.0, name=None): return out -@deprecated(since="2.0.0", update_to="paddle.nn.functional.prelu") +@deprecated(since="2.0.0", update_to="paddle.static.nn.prelu") def prelu(x, mode, param_attr=None, name=None): """ - :api_attr: Static Graph - - Equation: + prelu activation. .. math:: - y = \max(0, x) + \\alpha * \min(0, x) + prelu(x) = max(0, x) + \\alpha * min(0, x) There are three modes for the activation: @@ -9748,34 +9746,28 @@ def prelu(x, mode, param_attr=None, name=None): channel: Elements in same channel share same alpha. element: All elements do not share alpha. Each element has its own alpha. - Args: - x (Variable): The input Tensor or LoDTensor with data type float32. + Parameters: + x (Tensor): The input Tensor or LoDTensor with data type float32. mode (str): The mode for weight sharing. - param_attr(ParamAttr|None): The parameter attribute for the learnable - weight (alpha), it can be create by ParamAttr. None by default. - For detailed information, please refer to :ref:`api_fluid_ParamAttr`. - name(str|None): For detailed information, please refer - to :ref:`api_guide_Name`. Usually name is no need to set and - None by default. + param_attr (ParamAttr|None, optional): The parameter attribute for the learnable + weight (alpha), it can be create by ParamAttr. None by default. + For detailed information, please refer to :ref:`api_fluid_ParamAttr`. + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. Returns: - Variable: - - output(Variable): The tensor or LoDTensor with the same shape as input. - The data type is float32. + Tensor: A tensor with the same shape and data type as x. Examples: .. code-block:: python - import paddle.fluid as fluid import paddle - paddle.enable_static() - from paddle.fluid.param_attr import ParamAttr - x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32") - mode = 'channel' - output = fluid.layers.prelu( - x,mode,param_attr=ParamAttr(name='alpha')) + + x = paddle.to_tensor([-1., 2., 3.]) + param = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.2)) + out = paddle.static.nn.prelu(x, 'all', param) + # [-0.2, 2., 3.] """ check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'prelu') diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index fd86c2e9fa760dd3929c5b157090917bb22ef507..e7adc7106a4f09c7aef710b93b0976a0ae13fa45 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -79,9 +79,8 @@ def elu(x, alpha=1.0, name=None): import paddle import paddle.nn.functional as F - import numpy as np - x = paddle.to_tensor(np.array([[-1,6],[1,15.6]])) + x = paddle.to_tensor([[-1., 6.], [1., 15.6]]) out = F.elu(x, alpha=0.2) # [[-0.12642411 6. ] # [ 1. 15.6 ]] @@ -131,11 +130,14 @@ def gelu(x, approximate=False, name=None): import paddle import paddle.nn.functional as F - import numpy as np - x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]])) - out1 = F.gelu(x) # [-0.158655 0.345731 0.841345 1.39979] - out2 = F.gelu(x, True) # [-0.158808 0.345714 0.841192 1.39957] + x = paddle.to_tensor([[-1, 0.5], [1, 1.5]]) + out1 = F.gelu(x) + # [[-0.15865529, 0.34573123], + # [ 0.84134471, 1.39978933]] + out2 = F.gelu(x, True) + # [[-0.15880799, 0.34571400], + # [ 0.84119201, 1.39957154]] """ if in_dygraph_mode(): @@ -181,11 +183,8 @@ def hardshrink(x, threshold=0.5, name=None): import paddle import paddle.nn.functional as F - import numpy as np - - paddle.disable_static() - x = paddle.to_tensor(np.array([-1, 0.3, 2.5])) + x = paddle.to_tensor([-1, 0.3, 2.5]) out = F.hardshrink(x) # [-1., 0., 2.5] """ @@ -385,11 +384,8 @@ def leaky_relu(x, negative_slope=0.01, name=None): import paddle import paddle.nn.functional as F - import numpy as np - - paddle.disable_static() - x = paddle.to_tensor(np.array([-2, 0, 1], 'float32')) + x = paddle.to_tensor([-2., 0., 1.]) out = F.leaky_relu(x) # [-0.02, 0., 1.] """ @@ -1147,8 +1143,10 @@ def log_softmax(x, axis=-1, dtype=None, name=None): .. math:: - log\\_softmax[i, j] = log(softmax(x)) - = log(\\frac{\exp(X[i, j])}{\\sum_j(exp(X[i, j])}) + \\begin{aligned} + log\\_softmax[i, j] &= log(softmax(x)) \\\\ + &= log(\\frac{\\exp(X[i, j])}{\\sum_j(\\exp(X[i, j])}) + \\end{aligned} Parameters: x (Tensor): The input Tensor with data type float32, float64. @@ -1174,16 +1172,13 @@ def log_softmax(x, axis=-1, dtype=None, name=None): import paddle import paddle.nn.functional as F - import numpy as np - - paddle.disable_static() - x = np.array([[[-2.0, 3.0, -4.0, 5.0], - [3.0, -4.0, 5.0, -6.0], - [-7.0, -8.0, 8.0, 9.0]], - [[1.0, -2.0, -3.0, 4.0], - [-5.0, 6.0, 7.0, -8.0], - [6.0, 7.0, 8.0, 9.0]]], 'float32') + x = [[[-2.0, 3.0, -4.0, 5.0], + [3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [6.0, 7.0, 8.0, 9.0]]] x = paddle.to_tensor(x) out1 = F.log_softmax(x) out2 = F.log_softmax(x, dtype='float64') diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index 32979bae34d8038edee014ec341859459018c0a1..520762107db07ec19c2bfb8f8274355e90cbbc68 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -70,9 +70,8 @@ class ELU(layers.Layer): .. code-block:: python import paddle - import numpy as np - x = paddle.to_tensor(np.array([[-1,6],[1,15.6]])) + x = paddle.to_tensor([[-1. ,6.], [1., 15.6]]) m = paddle.nn.ELU(0.2) out = m(x) # [[-0.12642411 6. ] @@ -166,11 +165,8 @@ class Hardshrink(layers.Layer): .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - - x = paddle.to_tensor(np.array([-1, 0.3, 2.5])) + x = paddle.to_tensor([-1, 0.3, 2.5]) m = paddle.nn.Hardshrink() out = m(x) # [-1., 0., 2.5] """ @@ -293,11 +289,10 @@ class Hardtanh(layers.Layer): .. code-block:: python import paddle - import numpy as np - x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5])) + x = paddle.to_tensor([-1.5, 0.3, 2.5]) m = paddle.nn.Hardtanh() - out = m(x) # # [-1., 0.3, 1.] + out = m(x) # [-1., 0.3, 1.] """ def __init__(self, min=-1.0, max=1.0, name=None): @@ -397,9 +392,8 @@ class ReLU(layers.Layer): .. code-block:: python import paddle - import numpy as np - x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32')) + x = paddle.to_tensor([-2., 0., 1.]) m = paddle.nn.ReLU() out = m(x) # [0., 0., 1.] """ @@ -613,7 +607,7 @@ class Hardsigmoid(layers.Layer): import paddle - m = paddle.nn.Sigmoid() + m = paddle.nn.Hardsigmoid() x = paddle.to_tensor([-4., 5., 1.]) out = m(x) # [0., 1, 0.666667] """ @@ -1016,8 +1010,10 @@ class LogSoftmax(layers.Layer): .. math:: - Out[i, j] = log(softmax(x)) - = log(\\frac{\exp(X[i, j])}{\\sum_j(exp(X[i, j])}) + \\begin{aligned} + Out[i, j] &= log(softmax(x)) \\\\ + &= log(\\frac{\\exp(X[i, j])}{\\sum_j(\\exp(X[i, j])}) + \\end{aligned} Parameters: axis (int, optional): The axis along which to perform log_softmax @@ -1035,16 +1031,13 @@ class LogSoftmax(layers.Layer): .. code-block:: python import paddle - import numpy as np - - paddle.disable_static() - x = np.array([[[-2.0, 3.0, -4.0, 5.0], - [3.0, -4.0, 5.0, -6.0], - [-7.0, -8.0, 8.0, 9.0]], - [[1.0, -2.0, -3.0, 4.0], - [-5.0, 6.0, 7.0, -8.0], - [6.0, 7.0, 8.0, 9.0]]]) + x = [[[-2.0, 3.0, -4.0, 5.0], + [3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [6.0, 7.0, 8.0, 9.0]]] m = paddle.nn.LogSoftmax() x = paddle.to_tensor(x) out = m(x) diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index a69bc64c4cf66906e8c3168b3ac063d69edaf370..622ae3c584ef04fdfadaa352344b528afe00d92f 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -300,9 +300,6 @@ def ones(shape, dtype=None, name=None): def ones_like(x, dtype=None, name=None): """ - :alias_main: paddle.ones_like - :alias: paddle.tensor.ones_like, paddle.tensor.creation.ones_like - This OP returns a Tensor filled with the value 1, with the same shape and data type (use ``dtype`` if ``dtype`` is not None) as ``x``. @@ -323,18 +320,16 @@ def ones_like(x, dtype=None, name=None): Raise: TypeError: If ``dtype`` is not None and is not bool, float16, float32, - float64, int32 or int64. + float64, int32 or int64. Examples: .. code-block:: python import paddle - paddle.disable_static() - x = paddle.to_tensor([1,2,3]) - out1 = paddle.zeros_like(x) # [1., 1., 1.] - out2 = paddle.zeros_like(x, dtype='int32') # [1, 1, 1] + out1 = paddle.ones_like(x) # [1., 1., 1.] + out2 = paddle.ones_like(x, dtype='int32') # [1, 1, 1] """ return full_like(x=x, fill_value=1, dtype=dtype, name=name) @@ -380,9 +375,6 @@ def zeros(shape, dtype=None, name=None): def zeros_like(x, dtype=None, name=None): """ - :alias_main: paddle.zeros_like - :alias: paddle.tensor.zeros_like, paddle.tensor.creation.zeros_like - This OP returns a Tensor filled with the value 0, with the same shape and data type (use ``dtype`` if ``dtype`` is not None) as ``x``. @@ -403,16 +395,14 @@ def zeros_like(x, dtype=None, name=None): Raise: TypeError: If ``dtype`` is not None and is not bool, float16, float32, - float64, int32 or int64. + float64, int32 or int64. Examples: .. code-block:: python import paddle - paddle.disable_static() - - x = paddle.to_tensor([1,2,3]) + x = paddle.to_tensor([1, 2, 3]) out1 = paddle.zeros_like(x) # [0., 0., 0.] out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0] @@ -519,9 +509,6 @@ def full(shape, fill_value, dtype=None, name=None): def arange(start=0, end=None, step=1, dtype=None, name=None): """ - :alias_main: paddle.arange - :alias: paddle.tensor.arange, paddle.tensor.creation.arange - This OP returns a 1-D Tensor with spaced values within a given interval. Values are generated into the half-open interval [``start``, ``end``) with @@ -552,33 +539,30 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): Returns: Tensor: A 1-D Tensor with values from the interval [``start``, ``end``) - taken with common difference ``step`` beginning from ``start``. Its - data type is set by ``dtype``. + taken with common difference ``step`` beginning from ``start``. Its + data type is set by ``dtype``. Raises: TypeError: If ``dtype`` is not int32, int64, float32, float64. - examples: - + Examples: .. code-block:: python - import paddle - - paddle.disable_static() + import paddle - out1 = paddle.arange(5) - # [0, 1, 2, 3, 4] + out1 = paddle.arange(5) + # [0, 1, 2, 3, 4] - out2 = paddle.arange(3, 9, 2.0) - # [3, 5, 7] + out2 = paddle.arange(3, 9, 2.0) + # [3, 5, 7] - # use 4.999 instead of 5.0 to avoid floating point rounding errors - out3 = paddle.arange(4.999, dtype='float32') - # [0., 1., 2., 3., 4.] + # use 4.999 instead of 5.0 to avoid floating point rounding errors + out3 = paddle.arange(4.999, dtype='float32') + # [0., 1., 2., 3., 4.] - start_var = paddle.to_tensor([3]) - out4 = paddle.arange(start_var, 7) - # [3, 4, 5, 6] + start_var = paddle.to_tensor([3]) + out4 = paddle.arange(start_var, 7) + # [3, 4, 5, 6] """ if dtype is None: diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 7e4d3d7bf9279b8e878ccd1bf5a74575730dbff9..934008dc969f16266e29d17e75422dce476bf46a 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -252,16 +252,14 @@ def standard_normal(shape, dtype=None, name=None): import paddle - paddle.disable_static() - # example 1: attr shape is a list which doesn't contain Tensor. out1 = paddle.standard_normal(shape=[2, 3]) # [[-2.923464 , 0.11934398, -0.51249987], # random # [ 0.39632758, 0.08177969, 0.2692008 ]] # random # example 2: attr shape is a list which contains Tensor. - dim1 = paddle.full([1], 2, "int64") - dim2 = paddle.full([1], 3, "int32") + dim1 = paddle.to_tensor([2], 'int64') + dim2 = paddle.to_tensor([3], 'int32') out2 = paddle.standard_normal(shape=[dim1, dim2, 2]) # [[[-2.8852394 , -0.25898588], # random # [-0.47420555, 0.17683524], # random @@ -272,8 +270,7 @@ def standard_normal(shape, dtype=None, name=None): # example 3: attr shape is a Tensor, the data type must be int64 or int32. shape_tensor = paddle.to_tensor([2, 3]) - result_3 = paddle.standard_normal(shape_tensor) - + out3 = paddle.standard_normal(shape_tensor) # [[-2.878077 , 0.17099959, 0.05111201] # random # [-0.3761474, -1.044801 , 1.1870178 ]] # random @@ -281,7 +278,58 @@ def standard_normal(shape, dtype=None, name=None): return gaussian(shape=shape, mean=0.0, std=1.0, dtype=dtype, name=name) -randn = standard_normal +def randn(shape, dtype=None, name=None): + """ + This OP returns a Tensor filled with random values sampled from a standard + normal distribution with mean 0 and standard deviation 1, with ``shape`` + and ``dtype``. + + Args: + shape (list|tuple|Tensor): The shape of the output Tensor. If ``shape`` + is a list or tuple, the elements of it should be integers or Tensors + (with the shape [1], and the data type int32 or int64). If ``shape`` + is a Tensor, it should be a 1-D Tensor(with the data type int32 or + int64). + dtype (str|np.dtype, optional): The data type of the output Tensor. + Supported data types: float32, float64. + Default is None, use global default dtype (see ``get_default_dtype`` + for details). + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Returns: + Tensor: A Tensor filled with random values sampled from a standard + normal distribution with mean 0 and standard deviation 1, with + ``shape`` and ``dtype``. + + Examples: + .. code-block:: python + + import paddle + + # example 1: attr shape is a list which doesn't contain Tensor. + out1 = paddle.randn(shape=[2, 3]) + # [[-2.923464 , 0.11934398, -0.51249987], # random + # [ 0.39632758, 0.08177969, 0.2692008 ]] # random + + # example 2: attr shape is a list which contains Tensor. + dim1 = paddle.to_tensor([2], 'int64') + dim2 = paddle.to_tensor([3], 'int32') + out2 = paddle.randn(shape=[dim1, dim2, 2]) + # [[[-2.8852394 , -0.25898588], # random + # [-0.47420555, 0.17683524], # random + # [-0.7989969 , 0.00754541]], # random + # [[ 0.85201347, 0.32320443], # random + # [ 1.1399018 , 0.48336947], # random + # [ 0.8086993 , 0.6868893 ]]] # random + + # example 3: attr shape is a Tensor, the data type must be int64 or int32. + shape_tensor = paddle.to_tensor([2, 3]) + out3 = paddle.randn(shape_tensor) + # [[-2.878077 , 0.17099959, 0.05111201] # random + # [-0.3761474, -1.044801 , 1.1870178 ]] # random + """ + return standard_normal(shape, dtype, name) def normal(mean=0.0, std=1.0, shape=None, name=None): @@ -322,8 +370,6 @@ def normal(mean=0.0, std=1.0, shape=None, name=None): import paddle - paddle.disable_static() - out1 = paddle.normal(shape=[2, 3]) # [[ 0.17501129 0.32364586 1.561118 ] # random # [-1.7232178 1.1545963 -0.76156676]] # random @@ -381,7 +427,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): Examples: - :: + .. code-block:: text Input: shape = [1, 2] @@ -423,33 +469,27 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): import paddle - paddle.disable_static() - # example 1: # attr shape is a list which doesn't contain Tensor. - result_1 = paddle.tensor.random.uniform(shape=[3, 4]) - # [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357], - # [-0.34646994, -0.45116323, -0.09902662, -0.11397249], - # [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] + out1 = paddle.uniform(shape=[3, 4]) + # [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357], # random + # [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random + # [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random # example 2: # attr shape is a list which contains Tensor. - dim_1 = paddle.full([1], 2, "int64") - dim_2 = paddle.full([1], 3, "int32") - result_2 = paddle.tensor.random.uniform(shape=[dim_1, dim_2]) - # [[-0.9951253, 0.30757582, 0.9899647 ], - # [ 0.5864527, 0.6607096, -0.8886161 ]] + dim1 = paddle.to_tensor([2], 'int64') + dim2 = paddle.to_tensor([3], 'int32') + out2 = paddle.uniform(shape=[dim1, dim2]) + # [[-0.9951253, 0.30757582, 0.9899647 ], # random + # [ 0.5864527, 0.6607096, -0.8886161]] # random # example 3: # attr shape is a Tensor, the data type must be int64 or int32. shape_tensor = paddle.to_tensor([2, 3]) - result_3 = paddle.tensor.random.uniform(shape_tensor) - # if shape_tensor's value is [2, 3] - # result_3 is: - # [[-0.8517412, -0.4006908, 0.2551912 ], - # [ 0.3364414, 0.36278176, -0.16085452]] - - + out3 = paddle.uniform(shape_tensor) + # [[-0.8517412, -0.4006908, 0.2551912 ], # random + # [ 0.3364414, 0.36278176, -0.16085452]] # random """ if dtype is None: dtype = paddle.framework.get_default_dtype() @@ -517,8 +557,6 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): import paddle - paddle.disable_static() - # example 1: # attr shape is a list which doesn't contain Tensor. out1 = paddle.randint(low=-5, high=5, shape=[3]) @@ -526,18 +564,16 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): # example 2: # attr shape is a list which contains Tensor. - dim1 = paddle.full([1], 2, "int64") - dim2 = paddle.full([1], 3, "int32") - out2 = paddle.randint(low=-5, high=5, shape=[dim1, dim2], dtype="int32") + dim1 = paddle.to_tensor([2], 'int64') + dim2 = paddle.to_tensor([3], 'int32') + out2 = paddle.randint(low=-5, high=5, shape=[dim1, dim2]) # [[0, -1, -3], # random # [4, -2, 0]] # random # example 3: # attr shape is a Tensor - shape_tensor = paddle.to_tensor(3) - result_3 = paddle.randint(low=-5, high=5, shape=shape_tensor) - + out3 = paddle.randint(low=-5, high=5, shape=shape_tensor) # [-2, 2, 3] # random # example 4: @@ -611,8 +647,6 @@ def randperm(n, dtype="int64", name=None): import paddle - paddle.disable_static() - out1 = paddle.randperm(5) # [4, 1, 2, 3, 0] # random @@ -668,15 +702,14 @@ def rand(shape, dtype=None, name=None): import paddle - paddle.disable_static() # example 1: attr shape is a list which doesn't contain Tensor. out1 = paddle.rand(shape=[2, 3]) # [[0.451152 , 0.55825245, 0.403311 ], # random # [0.22550228, 0.22106001, 0.7877319 ]] # random # example 2: attr shape is a list which contains Tensor. - dim1 = paddle.full([1], 2, "int64") - dim2 = paddle.full([1], 3, "int32") + dim1 = paddle.to_tensor([2], 'int64') + dim2 = paddle.to_tensor([3], 'int32') out2 = paddle.rand(shape=[dim1, dim2, 2]) # [[[0.8879919 , 0.25788337], # random # [0.28826773, 0.9712097 ], # random @@ -687,8 +720,7 @@ def rand(shape, dtype=None, name=None): # example 3: attr shape is a Tensor, the data type must be int64 or int32. shape_tensor = paddle.to_tensor([2, 3]) - result_3 = paddle.rand(shape_tensor) - + out3 = paddle.rand(shape_tensor) # [[0.22920267, 0.841956 , 0.05981819], # random # [0.4836288 , 0.24573246, 0.7516129 ]] # random diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index 3873d893bd7c34e31dc5303216e0dc13672ccc1f..9e565d4e5223cdcaa7c06702671b973b29c240e1 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -56,17 +56,13 @@ def mean(x, axis=None, keepdim=False, name=None): .. code-block:: python import paddle - import numpy as np - paddle.disable_static() - - x = np.array([[[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]], - [[13, 14, 15, 16], - [17, 18, 19, 20], - [21, 22, 23, 24]]], 'float32') - x = paddle.to_tensor(x) + x = paddle.to_tensor([[[1., 2., 3., 4.], + [5., 6., 7., 8.], + [9., 10., 11., 12.]], + [[13., 14., 15., 16.], + [17., 18., 19., 20.], + [21., 22., 23., 24.]]]) out1 = paddle.mean(x) # [12.5] out2 = paddle.mean(x, axis=-1) @@ -145,12 +141,8 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None): .. code-block:: python import paddle - import numpy as np - - paddle.disable_static() - x = np.array([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]]) - x = paddle.to_tensor(x) + x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]]) out1 = paddle.var(x) # [2.66666667] out2 = paddle.var(x, axis=1) @@ -208,12 +200,8 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None): .. code-block:: python import paddle - import numpy as np - - paddle.disable_static() - x = np.array([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]]) - x = paddle.to_tensor(x) + x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]]) out1 = paddle.std(x) # [1.63299316] out2 = paddle.std(x, axis=1)