diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index f1a0b50e1d34cf759f2715c46cce4c42bacacd17..7cebcbbfcabe2119057d4ef00858c66149b8d3e3 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -445,7 +445,9 @@ def leaky_relu(x, negative_slope=0.01, name=None): import paddle.nn.functional as F x = paddle.to_tensor([-2., 0., 1.]) - out = F.leaky_relu(x) # [-0.02, 0., 1.] + out = F.leaky_relu(x) + print(out) + # [-0.02, 0., 1.] """ if in_dygraph_mode(): @@ -490,17 +492,17 @@ def prelu(x, weight, data_format="NCHW", name=None): import paddle import paddle.nn.functional as F - import numpy as np - data = np.array([[[[-2.0, 3.0, -4.0, 5.0], + data = paddle.to_tensor([[[[-2.0, 3.0, -4.0, 5.0], [ 3.0, -4.0, 5.0, -6.0], [-7.0, -8.0, 8.0, 9.0]], [[ 1.0, -2.0, -3.0, 4.0], [-5.0, 6.0, 7.0, -8.0], - [ 6.0, 7.0, 8.0, 9.0]]]], 'float32') - x = paddle.to_tensor(data) - w = paddle.to_tensor(np.array([0.25]).astype('float32')) - out = F.prelu(x, w) + [ 6.0, 7.0, 8.0, 9.0]]]], dtype='float32') + + w = paddle.to_tensor([0.25], dtype='float32') + out = F.prelu(data, w) + print(out) # [[[[-0.5 , 3. , -1. , 5. ], # [ 3. , -1. , 5. , -1.5 ], # [-1.75, -2. , 8. , 9. ]], @@ -625,6 +627,7 @@ def rrelu(x, lower=1. / 8., upper=1. / 3., training=True, name=None): [ 6.0, 7.0, 8.0, 9.0]]]], dtype='float32') out = F.rrelu(input_tensor, 0.1, 0.3) + print(out) #[[[[-0.20000899 3. -0.8810822 5. ] # [ 3. -0.55175185 5. -1.0776101 ] # [-1.0680687 -1.9896201 8. 9. ]] @@ -699,10 +702,11 @@ def relu(x, name=None): import paddle import paddle.nn.functional as F - import numpy as np - x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32')) - out = F.relu(x) # [0., 0., 1.] + x = paddle.to_tensor([-2, 0, 1], dtype='float32') + out = F.relu(x) + print(out) + # [0., 0., 1.] """ if in_dygraph_mode(): @@ -868,10 +872,11 @@ def relu6(x, name=None): import paddle import paddle.nn.functional as F - import numpy as np - x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) - out = F.relu6(x) # [0, 0.3, 6] + x = paddle.to_tensor([-1, 0.3, 6.5]) + out = F.relu6(x) + print(out) + # [0, 0.3, 6] """ threshold = 6.0 if in_dygraph_mode(): @@ -921,10 +926,11 @@ def selu(x, import paddle import paddle.nn.functional as F - import numpy as np - x = paddle.to_tensor(np.array([[0.0, 1.0],[2.0, 3.0]])) - out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]] + x = paddle.to_tensor([[0.0, 1.0],[2.0, 3.0]]) + out = F.selu(x) + print(out) + # [[0, 1.050701],[2.101402, 3.152103]] """ if scale <= 1.0: raise ValueError( diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index c1b1d5fca4bf5a8b4f9724c8c933f52e4eaf808c..e4e47aa682833219c2c1373cdff38596dd3cb7cd 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -305,25 +305,27 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None): """ This API implements pixel shuffle operation. See more details in :ref:`api_nn_vision_PixelShuffle` . + + Parameters: x(Tensor): 4-D tensor, the data type should be float32 or float64. upscale_factor(int): factor to increase spatial resolution. - data_format (str): The data format of the input and output data. An optional string from: "NCHW", "NHWC". The default is "NCHW". When it is "NCHW", the data is stored in the order of: [batch_size, input_channels, input_height, input_width]. + data_format (str, optional): The data format of the input and output data. An optional string from: "NCHW", "NHWC". The default is "NCHW". When it is "NCHW", the data is stored in the order of: [batch_size, input_channels, input_height, input_width]. name (str, optional): The default value is None. Normally there is no need for user to set this property. + Returns: Out(tensor): Reshaped tensor according to the new dimension. - Raises: - ValueError: If the square of upscale_factor cannot divide the channels of input. + Examples: .. code-block:: python import paddle import paddle.nn.functional as F - import numpy as np - x = np.random.randn(2, 9, 4, 4).astype(np.float32) - x_var = paddle.to_tensor(x) - out_var = F.pixel_shuffle(x_var, 3) + + x = paddle.randn(shape=[2,9,4,4]) + out_var = F.pixel_shuffle(x, 3) out = out_var.numpy() + print(out.shape) # (2, 1, 12, 12) """ if not isinstance(upscale_factor, int): @@ -361,7 +363,7 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None): Parameters: x (Tensor): 4-D tensor, the data type should be float32 or float64. downscale_factor (int): Factor to decrease spatial resolution. - data_format (str): The data format of the input and output data. An optional string of NCHW or NHWC. The default is NCHW. When it is NCHW, the data is stored in the order of [batch_size, input_channels, input_height, input_width]. + data_format (str, optional): The data format of the input and output data. An optional string of NCHW or NHWC. The default is NCHW. When it is NCHW, the data is stored in the order of [batch_size, input_channels, input_height, input_width]. name (str, optional): Name for the operation (optional, default is None). Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: @@ -374,7 +376,8 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None): import paddle.nn.functional as F x = paddle.randn([2, 1, 12, 12]) out = F.pixel_unshuffle(x, 3) - # out.shape = [2, 9, 4, 4] + print(out.shape) + # [2, 9, 4, 4] """ if len(x.shape) != 4: raise ValueError( diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index 68cc0cedb8f27ef5839fa1883f6c21716956432a..dc80743de51c5e6c81f370bffa20dcd66c1ffa90 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -384,19 +384,18 @@ class PReLU(Layer): .. code-block:: python import paddle - import numpy as np - paddle.set_default_dtype("float64") - data = np.array([[[[-2.0, 3.0, -4.0, 5.0], - [ 3.0, -4.0, 5.0, -6.0], - [-7.0, -8.0, 8.0, 9.0]], - [[ 1.0, -2.0, -3.0, 4.0], - [-5.0, 6.0, 7.0, -8.0], - [ 6.0, 7.0, 8.0, 9.0]]]], 'float64') - x = paddle.to_tensor(data) + data = paddle.to_tensor([[[[-2.0, 3.0, -4.0, 5.0], + [ 3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[ 1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [ 6.0, 7.0, 8.0, 9.0]]]]) + m = paddle.nn.PReLU(1, 0.25) - out = m(x) + out = m(data) + print(out) # [[[[-0.5 , 3. , -1. , 5. ], # [ 3. , -1. , 5. , -1.5 ], # [-1.75, -2. , 8. , 9. ]], @@ -496,7 +495,8 @@ class RReLU(Layer): [ 6.0, 7.0, 8.0, 9.0]]]], dtype='float32') rrelu_layer = paddle.nn.RReLU(0.1, 0.3) - output = rrelu_layer(input_tensor) + out = rrelu_layer(input_tensor) + print(out) #[[[[-0.20000899 3. -0.88108218 5. ] # [ 3. -0.55175185 5. -1.07761011] # [-1.06806871 -1.98962009 8. 9. ]] @@ -546,7 +546,9 @@ class ReLU(Layer): x = paddle.to_tensor([-2., 0., 1.]) m = paddle.nn.ReLU() - out = m(x) # [0., 0., 1.] + out = m(x) + print(out) + # [0., 0., 1.] """ def __init__(self, name=None): @@ -581,11 +583,12 @@ class ReLU6(Layer): .. code-block:: python import paddle - import numpy as np - x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) + x = paddle.to_tensor([-1., 0.3, 6.5]) m = paddle.nn.ReLU6() - out = m(x) # [0, 0.3, 6] + out = m(x) + print(out) + # [0, 0.3, 6] """ def __init__(self, name=None): @@ -628,11 +631,12 @@ class SELU(Layer): .. code-block:: python import paddle - import numpy as np - x = paddle.to_tensor(np.array([[0.0, 1.0],[2.0, 3.0]])) + x = paddle.to_tensor([[0.0, 1.0],[2.0, 3.0]]) m = paddle.nn.SELU() - out = m(x) # [[0, 1.050701],[2.101402, 3.152103]] + out = m(x) + print(out) + # [[0, 1.050701],[2.101402, 3.152103]] """ def __init__(self, diff --git a/python/paddle/nn/layer/vision.py b/python/paddle/nn/layer/vision.py index 80363a94ec26691d35195a5b0a088a7e794cd8bb..e338e89dadaf4079db2dc27c8df64666ee995f8c 100644 --- a/python/paddle/nn/layer/vision.py +++ b/python/paddle/nn/layer/vision.py @@ -25,9 +25,9 @@ class PixelShuffle(Layer): PixelShuffle Layer - This operator rearranges elements in a tensor of shape [N, C, H, W] - to a tensor of shape [N, C/upscale_factor**2, H*upscale_factor, W*upscale_factor], - or from shape [N, H, W, C] to [N, H*upscale_factor, W*upscale_factor, C/upscale_factor**2]. + Rearranges elements in a tensor of shape :math:`[N, C, H, W]` + to a tensor of shape :math:`[N, C/upscale_factor^2, H*upscale_factor, W \times upscale_factor]`, + or from shape :math:`[N, H, W, C]` to :math:`[N, H \times upscale_factor, W \times upscale_factor, C/upscale_factor^2]`. This is useful for implementing efficient sub-pixel convolution with a stride of 1/upscale_factor. Please refer to the paper: `Real-Time Single Image and Video Super-Resolution @@ -37,12 +37,12 @@ class PixelShuffle(Layer): Parameters: upscale_factor(int): factor to increase spatial resolution. - data_format (str): The data format of the input and output data. An optional string from: "NCHW", "NHWC". The default is "NCHW". When it is "NCHW", the data is stored in the order of: [batch_size, input_channels, input_height, input_width]. + data_format (str, optional): The data format of the input and output data. An optional string from: "NCHW", "NHWC". The default is "NCHW". When it is "NCHW", the data is stored in the order of: [batch_size, input_channels, input_height, input_width]. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Shape: - - x: 4-D tensor with shape: (N, C, H, W) or (N, H, W, C). - - out: 4-D tensor with shape: (N, C/upscale_factor**2, H*upscale_factor, W*upscale_factor) or (N, H*upscale_factor, W*upscale_factor, C/upscale_factor^2). + - x: 4-D tensor with shape of :math:`(N, C, H, W)` or :math:`(N, H, W, C)`. + - out: 4-D tensor with shape of :math:`(N, C/upscale_factor^2, H \times upscale_factor, W \times upscale_factor)` or :math:`(N, H \times upscale_factor, W \times upscale_factor, C/upscale_factor^2)`. Examples: @@ -50,14 +50,12 @@ class PixelShuffle(Layer): import paddle import paddle.nn as nn - import numpy as np - x = np.random.randn(2, 9, 4, 4).astype(np.float32) - x_var = paddle.to_tensor(x) + x = paddle.randn(shape=[2,9,4,4]) pixel_shuffle = nn.PixelShuffle(3) - out_var = pixel_shuffle(x_var) + out_var = pixel_shuffle(x) out = out_var.numpy() - print(out.shape) + print(out.shape) # (2, 1, 12, 12) """ @@ -91,7 +89,7 @@ class PixelShuffle(Layer): class PixelUnshuffle(Layer): """ - This operator rearranges elements in a tensor of shape :math:`[N, C, H, W]` + Rearranges elements in a tensor of shape :math:`[N, C, H, W]` to a tensor of shape :math:`[N, r^2C, H/r, W/r]`, or from shape :math:`[N, H, W, C]` to :math:`[N, H/r, W/r, r^2C]`, where :math:`r` is the downscale factor. This operation is the reversion of PixelShuffle operation. @@ -101,7 +99,7 @@ class PixelUnshuffle(Layer): Parameters: downscale_factor (int): Factor to decrease spatial resolution. - data_format (str): The data format of the input and output data. An optional string of NCHW or NHWC. The default is NCHW. When it is NCHW, the data is stored in the order of [batch_size, input_channels, input_height, input_width]. + data_format (str, optional): The data format of the input and output data. An optional string of NCHW or NHWC. The default is NCHW. When it is NCHW, the data is stored in the order of [batch_size, input_channels, input_height, input_width]. name (str, optional): Name for the operation (optional, default is None). Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Shape: @@ -117,7 +115,8 @@ class PixelUnshuffle(Layer): x = paddle.randn([2, 1, 12, 12]) pixel_unshuffle = nn.PixelUnshuffle(3) out = pixel_unshuffle(x) - # out.shape = [2, 9, 4, 4] + print(out.shape) + # [2, 9, 4, 4] """