diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3e7d10f8d1a02126c3d4bec490fcd2f3194123ee..75f785aaa12beaca9551e549b9ec841f00f5465f 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -3222,6 +3222,7 @@ def data_norm(input, summary_decay_rate=0.9999999, enable_scale_and_shift=False): """ + :alias_main: paddle.static.nn.data_norm :api_attr: Static Graph **Data Normalization Layer** @@ -3246,7 +3247,7 @@ def data_norm(input, y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift Args: - input(variable): The input variable which is a LoDTensor. + input(Tensor): The input Tensor. act(string, Default None): Activation type, linear|relu|prelu|... epsilon(float, Default 1e-05): param_attr(ParamAttr): The parameter attribute for Parameter `scale`. @@ -3274,16 +3275,16 @@ def data_norm(input, enable_scale_and_shift(bool, Default False): do scale&shift after normalization. Returns: - Variable: A tensor variable which is the result after applying data normalization on the input. + Tensor: A tensor which is the result after applying data normalization on the input. Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle - hidden1 = fluid.data(name="hidden1", shape=[64, 200]) - hidden2 = fluid.layers.data_norm(name="hidden2", input=hidden1) + x = paddle.randn(shape=[32,100]) + hidden2 = paddle.static.nn.data_norm(input=x) """ helper = LayerHelper('data_norm', **locals()) dtype = helper.input_dtype() diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 9aee911e568d1b2cd7aac0cf45e44f2886612a5a..a72ef925bfaffb76b9293cc1fbc33a8dd4f8ac86 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -807,35 +807,28 @@ def meshgrid(*args, **kwargs): vector, and creates N-dimensional grids. Args: - *args(Variable|list of Variable) : tensors (tuple(list) of tensor): the shapes of input k tensors are (N1,), + *args(Tensor|list of Tensor) : tensors (tuple(list) of tensor): the shapes of input k tensors are (N1,), (N2,),..., (Nk,). Support data types: ``float64``, ``float32``, ``int32``, ``int64``. **kwargs (optional): Currently, we only accept name in **kwargs The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: - Variable: k tensors. The shape of each tensor is (N1, N2, ..., Nk) + Tensor: k tensors. The shape of each tensor is (N1, N2, ..., Nk) Examples: .. code-block:: python import paddle - import paddle.fluid as fluid - import numpy as np - x = fluid.data(name='x', shape=[100], dtype='int32') - y = fluid.data(name='y', shape=[200], dtype='int32') + x = paddle.randint(low=0, high=100, shape=[100]) + y = paddle.randint(low=0, high=100, shape=[200]) + + grid_x, grid_y = paddle.meshgrid(x, y) - input_1 = np.random.randint(0, 100, [100, ]).astype('int32') - input_2 = np.random.randint(0, 100, [200, ]).astype('int32') + print(grid_x.shape) + print(grid_y.shape) - exe = fluid.Executor(place=fluid.CPUPlace()) - grid_x, grid_y = paddle.tensor.meshgrid(x, y) - res_1, res_2 = exe.run(fluid.default_main_program(), - feed={'x': input_1, - 'y': input_2}, - fetch_list=[grid_x, grid_y]) - #the shape of res_1 is (100, 200) #the shape of res_2 is (100, 200) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 15580b6618e6dc61d5e74216776417a02846a16a..fbafc2353ddaaddc87eeb779b6251c0e2994d70d 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -813,19 +813,17 @@ def bmm(x, y, name=None): if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: - x (Variable): The input variable which is a Tensor or LoDTensor. - y (Variable): The input variable which is a Tensor or LoDTensor. + x (Tensor): The input Tensor. + y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: - Variable: The product Tensor (or LoDTensor) variable. + Tensor: The product Tensor. Examples: import paddle - paddle.disable_static() - # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index dc6a04a4723bd92dbe1c76fce5b3e52981136211..dd043d3a1bd52724d64a679a946847350e5f88ce 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -213,7 +213,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): Out.shape = (3 * 100 * 100 * 4) Args: - x (Variable): A tensor of number of dimentions >= axis. A tensor with data type float32, + x (Tensor): A tensor of number of dimentions >= axis. A tensor with data type float32, float64, int8, int32, int64. start_axis (int): the start axis to flatten stop_axis (int): the stop axis to flatten @@ -221,12 +221,12 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): Generally, no setting is required. Default: None. Returns: - Variable: A tensor with the contents of the input tensor, with input \ + Tensor: A tensor with the contents of the input tensor, with input \ axes flattened by indicated start axis and end axis. \ A Tensor with data type same as input x. Raises: - ValueError: If x is not a Variable. + ValueError: If x is not a Tensor. ValueError: If start_axis or stop_axis is illegal. Examples: @@ -234,20 +234,16 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): .. code-block:: python import paddle - import numpy as np - - paddle.disable_static() image_shape=(2, 3, 4, 4) - x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100. - x = x.astype('float32') + x = paddle.arange(end=image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]) + img = paddle.reshape(x, image_shape) / 100 - img = paddle.to_tensor(x) out = paddle.flatten(img, start_axis=1, stop_axis=2) # out shape is [2, 12, 4] """ if not (isinstance(x, Variable)): - raise ValueError("The input x should be a Variable") + raise ValueError("The input x should be a Tensor") check_variable_and_dtype( x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64'], 'flatten') @@ -297,20 +293,18 @@ def roll(x, shifts, axis=None, name=None): the tensor will be flattened before rolling and then restored to the original shape. Args: - x (Variable): The x tensor variable as input. + x (Tensor): The x tensor variable as input. shifts (int|list|tuple): The number of places by which the elements of the `x` tensor are shifted. axis (int|list|tuple|None): axis(axes) along which to roll. Returns: - Variable: A Tensor with same data type as `x`. + Tensor: A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle - import paddle.fluid as fluid - paddle.disable_static() x = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index ce32fb76f5cd4da30f95baa3b8928d1c879477ca..020c9680f0e479c2276596c5058ca4637169bd69 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -931,31 +931,24 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): $Input$, $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $input$. Args: - input (Variable): The input Tensor/LoDTensor to be added to the final result. - x (Variable): The first input Tensor/LoDTensor for matrix multiplication. - y (Variable): The second input Tensor/LoDTensor for matrix multiplication. + input (Tensor): The input Tensor to be added to the final result. + x (Tensor): The first input Tensor for matrix multiplication. + y (Tensor): The second input Tensor for matrix multiplication. beta (float): Coefficient of $input$. alpha (float): Coefficient of $x*y$. name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None. Returns: - Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of addmm op. + Tensor: The output Tensor of addmm op. Examples: .. code-block:: python - - import numpy as np + import paddle - data_x = np.ones((2, 2)).astype(np.float32) - data_y = np.ones((2, 2)).astype(np.float32) - data_input = np.ones((2, 2)).astype(np.float32) - - paddle.disable_static() - - x = paddle.to_tensor(data_x) - y = paddle.to_tensor(data_y) - input = paddle.to_tensor(data_input) + x = paddle.ones([2,2]) + y = paddle.ones([2,2]) + input = paddle.ones([2,2]) out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )