From b347ca084e17f8646cdc80fe06c259bad201cf41 Mon Sep 17 00:00:00 2001 From: huangjun12 <2399845970@qq.com> Date: Thu, 10 Oct 2019 14:35:00 +0800 Subject: [PATCH] Fix document of APIs, test=develop, test=document_fix (#20314) --- paddle/fluid/API.spec | 10 +- .../fluid/layers/learning_rate_scheduler.py | 78 +++--- python/paddle/fluid/layers/nn.py | 225 +++++++++++------- 3 files changed, 192 insertions(+), 121 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index c236772b61..e11e9b7831 100755 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -201,11 +201,11 @@ paddle.fluid.layers.squeeze (ArgSpec(args=['input', 'axes', 'name'], varargs=Non paddle.fluid.layers.unsqueeze (ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b9bd3129d36a70e7c4385df51ff71c62')) paddle.fluid.layers.lod_reset (ArgSpec(args=['x', 'y', 'target_lod'], varargs=None, keywords=None, defaults=(None, None)), ('document', '74498d37dd622ac472cb36887fce09ea')) paddle.fluid.layers.lod_append (ArgSpec(args=['x', 'level'], varargs=None, keywords=None, defaults=None), ('document', '37663c7c179e920838a250ea0e28d909')) -paddle.fluid.layers.lrn (ArgSpec(args=['input', 'n', 'k', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(5, 1.0, 0.0001, 0.75, None)), ('document', '73d297256da8954617996958d26ee93d')) +paddle.fluid.layers.lrn (ArgSpec(args=['input', 'n', 'k', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(5, 1.0, 0.0001, 0.75, None)), ('document', 'fa565b65fb98d3ca82361c79f41b06b2')) paddle.fluid.layers.pad (ArgSpec(args=['x', 'paddings', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '36b6e58678956585e5b30aa3de123a60')) paddle.fluid.layers.pad_constant_like (ArgSpec(args=['x', 'y', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '95aa1972983f30fe9b5a3713e523e20f')) paddle.fluid.layers.label_smooth (ArgSpec(args=['label', 'prior_dist', 'epsilon', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 0.1, 'float32', None)), ('document', '214f1dfbe95a628600bbe99e836319cf')) -paddle.fluid.layers.roi_pool (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0)), ('document', '49368d724023a66b41b0071be41c0ba5')) +paddle.fluid.layers.roi_pool (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0)), ('document', '6fc9bae94518bbf3e1a9e479f38f6537')) paddle.fluid.layers.roi_align (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale', 'sampling_ratio', 'name'], varargs=None, keywords=None, defaults=(1, 1, 1.0, -1, None)), ('document', '3885fd76e122ac0563fa8369bcab7363')) paddle.fluid.layers.dice_loss (ArgSpec(args=['input', 'label', 'epsilon'], varargs=None, keywords=None, defaults=(1e-05,)), ('document', '7e8e4bf1f0f8612961ed113e8af8f0c5')) paddle.fluid.layers.image_resize (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape', 'align_corners', 'align_mode', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None, True, 1, 'NCHW')), ('document', 'd29d829607b5ff12924197a3ba296c89')) @@ -297,7 +297,7 @@ paddle.fluid.layers.py_func (ArgSpec(args=['func', 'x', 'out', 'backward_func', paddle.fluid.layers.psroi_pool (ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '42d5155374f69786300d90d751956998')) paddle.fluid.layers.prroi_pool (ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(1.0, 1, 1, None)), ('document', '454c7ea8c73313dd41513929d7526303')) paddle.fluid.layers.teacher_student_sigmoid_loss (ArgSpec(args=['input', 'label', 'soft_max_up_bound', 'soft_max_lower_bound'], varargs=None, keywords=None, defaults=(15.0, -15.0)), ('document', 'b0e07aa41caae04b07a8e8217cc96020')) -paddle.fluid.layers.huber_loss (ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None), ('document', '11bb8e62cc9256958eff3991fe4834da')) +paddle.fluid.layers.huber_loss (ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None), ('document', '9d93ee81f7a3e526d68bb280bc695d6c')) paddle.fluid.layers.kldiv_loss (ArgSpec(args=['x', 'target', 'reduction', 'name'], varargs=None, keywords=None, defaults=('mean', None)), ('document', '18bc95c62d3300456c3c7da5278b47bb')) paddle.fluid.layers.npair_loss (ArgSpec(args=['anchor', 'positive', 'labels', 'l2_reg'], varargs=None, keywords=None, defaults=(0.002,)), ('document', '3828c4bd81c25af0ab955f52d453c587')) paddle.fluid.layers.pixel_shuffle (ArgSpec(args=['x', 'upscale_factor'], varargs=None, keywords=None, defaults=None), ('document', '7e5cac851fd9bad344230e1044b6a565')) @@ -310,7 +310,7 @@ paddle.fluid.layers.unfold (ArgSpec(args=['x', 'kernel_sizes', 'strides', 'paddi paddle.fluid.layers.deformable_roi_pooling (ArgSpec(args=['input', 'rois', 'trans', 'no_trans', 'spatial_scale', 'group_size', 'pooled_height', 'pooled_width', 'part_size', 'sample_per_part', 'trans_std', 'position_sensitive', 'name'], varargs=None, keywords=None, defaults=(False, 1.0, [1, 1], 1, 1, None, 1, 0.1, False, None)), ('document', 'e0e7bf35da2287efb015546f1b8350df')) paddle.fluid.layers.filter_by_instag (ArgSpec(args=['ins', 'ins_tag', 'filter_tag', 'is_lod'], varargs=None, keywords=None, defaults=None), ('document', '7703a2088af8de4128b143ff1164ca4a')) paddle.fluid.layers.shard_index (ArgSpec(args=['input', 'index_num', 'nshards', 'shard_id', 'ignore_value'], varargs=None, keywords=None, defaults=(-1,)), ('document', '3c6b30e9cd57b38d4a5fa1ade887f779')) -paddle.fluid.layers.hard_swish (ArgSpec(args=['x', 'threshold', 'scale', 'offset', 'name'], varargs=None, keywords=None, defaults=(6.0, 6.0, 3.0, None)), ('document', '6a5152a7015c62cb8278fc24cb456459')) +paddle.fluid.layers.hard_swish (ArgSpec(args=['x', 'threshold', 'scale', 'offset', 'name'], varargs=None, keywords=None, defaults=(6.0, 6.0, 3.0, None)), ('document', 'bd763b9ca99239d624c3cb4626e3627a')) paddle.fluid.layers.mse_loss (ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None), ('document', 'd9ede6469288636e1b3233b461a165c9')) paddle.fluid.layers.uniform_random (ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', -1.0, 1.0, 0)), ('document', '126ede8ce0e751244b1b54cd359c89d7')) paddle.fluid.layers.data (ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', '9d7806e31bdf727c1a23b8782a09b545')) @@ -447,7 +447,7 @@ paddle.fluid.layers.polynomial_decay (ArgSpec(args=['learning_rate', 'decay_step paddle.fluid.layers.piecewise_decay (ArgSpec(args=['boundaries', 'values'], varargs=None, keywords=None, defaults=None), ('document', 'd9f654117542c6b702963dda107a247f')) paddle.fluid.layers.noam_decay (ArgSpec(args=['d_model', 'warmup_steps'], varargs=None, keywords=None, defaults=None), ('document', 'fd57228fb76195e66bbcc8d8e42c494d')) paddle.fluid.layers.cosine_decay (ArgSpec(args=['learning_rate', 'step_each_epoch', 'epochs'], varargs=None, keywords=None, defaults=None), ('document', '1062e487dd3b50a6e58b5703b4f594c9')) -paddle.fluid.layers.linear_lr_warmup (ArgSpec(args=['learning_rate', 'warmup_steps', 'start_lr', 'end_lr'], varargs=None, keywords=None, defaults=None), ('document', 'dc7292c456847ba41cfd318e9f7f4363')) +paddle.fluid.layers.linear_lr_warmup (ArgSpec(args=['learning_rate', 'warmup_steps', 'start_lr', 'end_lr'], varargs=None, keywords=None, defaults=None), ('document', 'bfb548a508c34998c969f19eb47ee473')) paddle.fluid.layers.Uniform ('paddle.fluid.layers.distributions.Uniform', ('document', '9b1a9ebdd8ae18bf562486611ed74e59')) paddle.fluid.layers.Uniform.__init__ (ArgSpec(args=['self', 'low', 'high'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.layers.Uniform.entropy (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'cde9f1980a2be7939798b32ec8cd59e1')) diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 5be4ea7560..e80f5d0120 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -440,37 +440,59 @@ def cosine_decay(learning_rate, step_each_epoch, epochs): def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr): """ - Applies linear learning rate warmup before the normal learning rate - scheduling. - - .. code-block:: python - - if global_step < warmup_steps: - linear_step = end_lr - start_lr - lr = start_lr + linear_step * (global_step / warmup_steps) - + This operator use the linear learning rate warm up strategy to adjust the learning rate preliminarily before the normal learning rate scheduling. + For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks `_ + + When global_step < warmup_steps, learning rate is updated as: + + .. code-block:: text + + linear_step = end_lr - start_lr + lr = start_lr + linear_step * (global_step / warmup_steps) + + where start_lr is the initial learning rate, and end_lr is the final learning rate; + + When global_step >= warmup_steps, learning rate is updated as: + + .. code-block:: text + + lr = learning_rate + + where lr is the learning_rate after warm-up. + Args: - learning_rate (float | Variable): A float value or Variable. - warmup_steps (int): The warmup steps. - start_lr (float): The start learning rate of warmup. - end_lr (float): The end learning rate of warmup. - + learning_rate (Variable|float): Learning_rate after warm-up, it could be 1D-Tensor or single value with the data type of float32. + warmup_steps (int): Steps for warm up. + start_lr (float): Initial learning rate of warm up. + end_lr (float): Final learning rate of warm up. + Returns: - The decayed learning rate in warmup period. - + Variable: Warm-up learning rate with the same data type as learning_rate. + + Examples: - .. code-block:: python - - import paddle.fluid as fluid - boundaries = [100, 200] - lr_steps = [0.1, 0.01, 0.001] - warmup_steps = 50 - start_lr = 1. / 3. - end_lr = 0.1 - decayed_lr = fluid.layers.linear_lr_warmup( - fluid.layers.piecewise_decay(boundaries, lr_steps), - warmup_steps, start_lr, end_lr) - + + .. code-block:: python + + import paddle.fluid as fluid + + boundaries = [100, 200] + lr_steps = [0.1, 0.01, 0.001] + learning_rate = fluid.layers.piecewise_decay(boundaries, lr_steps) #case1, 1D-Tensor + #learning_rate = 0.1 #case2, single-value + warmup_steps = 50 + start_lr = 1. / 3. + end_lr = 0.1 + decayed_lr = fluid.layers.linear_lr_warmup(learning_rate, + warmup_steps, start_lr, end_lr) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + out, = exe.run(fetch_list=[decayed_lr.name]) + print(out) + # case1: [0.33333334] + # case2: [0.33333334] """ dtype = 'float32' if isinstance(learning_rate, Variable): diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 9538106414..ae15c6a412 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -8587,46 +8587,46 @@ def lod_append(x, level): def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): """ - Local Response Normalization Layer. This layer performs a type of - "lateral inhibition" by normalizing over local input regions. + This operator implements the Local Response Normalization Layer. + This layer performs a type of "lateral inhibition" by normalizing over local input regions. + For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks `_ The formula is as follows: .. math:: - Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta} + Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta} In the above equation: - * :math:`n`: The number of channels to sum over. - * :math:`k`: The offset (avoid being divided by 0). - * :math:`alpha`: The scaling parameter. - * :math:`beta`: The exponent parameter. + - :math:`n` : The number of channels to sum over. + - :math:`k` : The offset (avoid being divided by 0). + - :math:`\\alpha` : The scaling parameter. + - :math:`\\beta` : The exponent parameter. - Refer to `ImageNet Classification with Deep Convolutional Neural Networks - `_ Args: - input (Variable): The input tensor of this layer, and the dimension of input tensor must be 4. - n (int, default 5): The number of channels to sum over. - k (float, default 1.0): An offset (usually positive to avoid dividing by 0). - alpha (float, default 1e-4): The scaling parameter. - beta (float, default 0.75): The exponent. - name (str, default None): A name for this operation. - - Raises: - ValueError: If rank of the input tensor is not 4. + input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W], where N is the batch size, C is the input channel, H is Height, W is weight. The data type is float32. The rank of this tensor must be 4, otherwise it will raise ValueError. + n (int, optional): The number of channels to sum over. Default: 5 + k (float, optional): An offset, positive. Default: 1.0 + alpha (float, optional): The scaling parameter, positive. Default:1e-4 + beta (float, optional): The exponent, positive. Default:0.75 + name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: - A tensor variable storing the transformation result. + Variable: A tensor variable storing the transformation result with the same shape and data type as input. + Examples: - .. code-block:: python - import paddle.fluid as fluid - data = fluid.layers.data( - name="data", shape=[3, 112, 112], dtype="float32") - lrn = fluid.layers.lrn(input=data) + .. code-block:: python + + import paddle.fluid as fluid + data = fluid.data( + name="data", shape=[None, 3, 112, 112], dtype="float32") + lrn = fluid.layers.lrn(input=data) + print(lrn.shape) # [-1, 3, 112, 112] + print(lrn.dtype) # float32 """ helper = LayerHelper('lrn', **locals()) dtype = helper.input_dtype() @@ -8870,38 +8870,57 @@ def label_smooth(label, @templatedoc() def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): """ - ${comment} - + This operator implements the roi_pooling layer. + Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7). + + The operator has three steps: + + 1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height; + 2. Finding the largest value in each section; + 3. Copying these max values to the output buffer. + + For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn + Args: - input (Variable): ${x_comment} - rois (Variable): ROIs (Regions of Interest) to pool over.It should be - a 2-D LoDTensor of shape (num_rois, 4), the lod level - is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is - the top left coordinates, and (x2, y2) is the bottom - right coordinates. - pooled_height (integer): ${pooled_height_comment} Default: 1 - pooled_width (integer): ${pooled_width_comment} Default: 1 - spatial_scale (float): ${spatial_scale_comment} Default: 1.0 - + input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W], where N is the batch size, C is the input channel, H is Height, W is weight. The data type is float32 or float64. + rois (Variable): ROIs (Regions of Interest) to pool over. 2D-LoDTensor with the shape of [num_rois,4], the lod level is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates. + pooled_height (int, optional): The pooled output height, data type is int32. Default: 1 + pooled_width (int, optional): The pooled output height, data type is int32. Default: 1 + spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0 + Returns: - Variable: ${out_comment}. - + Variable: The pooled feature, 4D-Tensor with the shape of [num_rois, C, pooled_height, pooled_width]. + + Examples: - .. code-block:: python - - import paddle.fluid as fluid - - x = fluid.layers.data( - name='x', shape=[8, 112, 112], dtype='float32') - rois = fluid.layers.data( - name='roi', shape=[4], lod_level=1, dtype='float32') - pool_out = fluid.layers.roi_pool( + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + DATATYPE='float32' + + place = fluid.CPUPlace() + #place = fluid.CUDAPlace(0) + + input_data = np.array([i for i in range(1,17)]).reshape(1,1,4,4).astype(DATATYPE) + roi_data =fluid.create_lod_tensor(np.array([[1., 1., 2., 2.], [1.5, 1.5, 3., 3.]]).astype(DATATYPE),[[2]], place) + + x = fluid.data(name='input', shape=[None,1,4,4], dtype=DATATYPE) + rois = fluid.data(name='roi', shape=[None,4], dtype=DATATYPE) + + pool_out = fluid.layers.roi_pool( input=x, rois=rois, - pooled_height=7, - pooled_width=7, + pooled_height=1, + pooled_width=1, spatial_scale=1.0) - + + exe = fluid.Executor(place) + out, = exe.run(feed={'input':input_data ,'roi':roi_data}, fetch_list=[pool_out.name]) + print(out) #array([[[[11.]]], [[[16.]]]], dtype=float32) + print(np.array(out).shape) # (2, 1, 1, 1) """ helper = LayerHelper('roi_pool', **locals()) dtype = helper.input_dtype() @@ -14861,44 +14880,49 @@ def prroi_pool(input, def huber_loss(input, label, delta): """ - Huber loss is a loss function used in robust. - Huber loss can evaluate the fitness of input to label. - Different from MSE loss, Huber loss is more robust for outliers. + This operator computes the Huber loss between input and label. + Huber loss is commonly used in regression tasks. Compared to square_error_cost, Huber loss is more robust and less sensitivity to outliers. + + When the absolute difference between input and label is greater than delta, the linear error is calculated: - When the difference between input and label is large than delta .. math:: + huber\_loss = delta * (label - input) - 0.5 * delta * delta - huber\_loss = delta * (label - input) - 0.5 * delta * delta + When the absolute difference between input and label is greater than delta, the square error is calculated: - When the difference between input and label is less than delta .. math:: - - huber\_loss = 0.5 * (label - input) * (label - input) + huber\_loss = 0.5 * (label - input) * (label - input) Args: - input (Variable): This input is a probability computed by the previous operator. - The first dimension is batch size, and the last dimension is 1. - label (Variable): The groud truth whose first dimension is batch size - and last dimension is 1. - delta (float): The parameter of huber loss, which controls - the range of outliers + input (Variable): Predicted data, 2D-Tensor with the shape of [batch_size, 1]. The data type should be float32 or float64. + label (Variable): Ground truth label, 2D-Tensor with the shape of [batch_size, 1]. The data type should be float32 or float64. + delta (float): The threshold for Huber loss, which is used to control the balance between the linear error and square error. The data type should be float32. Returns: - huber\_loss (Variable): The huber loss with shape [batch_size, 1]. + Variable: The huber loss, a tensor with the same shape and data type as input. + Examples: - .. code-block:: python - import paddle.fluid as fluid + .. code-block:: python - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - predict = fluid.layers.fc(input=x, size=1) - label = fluid.layers.data( - name='label', shape=[1], dtype='float32') - loss = fluid.layers.huber_loss( - input=predict, label=label, delta=1.0) + import paddle.fluid as fluid + import numpy as np + + DATATYPE='float32' + input_data = np.array([[1.],[2.],[3.],[4.]]).astype(DATATYPE) + label_data = np.array([[3.],[3.],[4.],[4.]]).astype(DATATYPE) + x = fluid.data(name='input', shape=[None, 1], dtype=DATATYPE) + y = fluid.data(name='label', shape=[None, 1], dtype=DATATYPE) + loss = fluid.layers.huber_loss(input=x, label=y, delta=1.0) + + place = fluid.CPUPlace() + #place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + HuberLoss, = exe.run(feed={'input':input_data ,'label':label_data}, fetch_list=[loss.name]) + print(HuberLoss) #[[1.5], [0.5], [0.5], [0. ]], dtype=float32 """ helper = LayerHelper('huber_loss', **locals()) residual = helper.create_variable_for_type_inference( @@ -15922,25 +15946,50 @@ def shard_index(input, index_num, nshards, shard_id, ignore_value=-1): @templatedoc() def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None): """ - ${comment} - Args: - x(Varaible): Input of HardSwish operator. - threshold(float): The threshold parameter of HardSwish operator. Default:threshold=6.0 - scale(float): The scale parameter of HardSwish operator. Default:scale=6.0 - offset(float): The offset parameter of HardSwish operator. Default:offset=3.0 - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + This operator implements the hard_swish activation function. + Hard_swish is proposed in MobileNetV3, and performs better in computational stability and efficiency compared to swish function. + For more details please refer to: https://arxiv.org/pdf/1905.02244.pdf - Returns: - Variable: The output tensor with the same shape as input. + The formula is as follows: - Examples: + .. math:: - .. code-block:: python + out = \\frac{x * (min(max(0, x+offset), threshold))}{scale} - import paddle.fluid as fluid - x = fluid.layers.data(name="x", shape=[3,10,32,32], dtype="float32") - y = fluid.layers.hard_swish(x) + In the above equation: + + ``threshold`` and ``scale`` should be positive, ``offset`` can be positive or negative. It is recommended to use default parameters. + + Args: + x (Variable): Input feature, multi-dimensional Tensor. The data type should be float32 or float64. + threshold (float, optional): The threshold in Relu function. Default: 6.0 + scale (float, optional): The scale factor. Default: 6.0 + offset (float, optional): The offset factor. Default: 3.0 + name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` + + Returns: + Variable: The output tensor with the same shape and data type as input. + + + Examples: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + DATATYPE='float32' + + x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE) + + x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE) + y = fluid.layers.hard_swish(x) + + place = fluid.CPUPlace() + #place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + out, = exe.run(feed={'x':x_data}, fetch_list=[y.name]) + print(out) # [[0.66666667, 1.66666667,3., 4.]] """ helper = LayerHelper('hard_swish', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) -- GitLab