diff --git a/doc/fluid/api/nn.rst b/doc/fluid/api/nn.rst index b8575ed2df757532889dd0bb818106c971e9d564..472e1b9a80c19a410fefb050b05ff108cfa8bcdb 100644 --- a/doc/fluid/api/nn.rst +++ b/doc/fluid/api/nn.rst @@ -62,6 +62,7 @@ paddle.nn nn/grid_sampler.rst nn/GroupNorm.rst nn/hardshrink.rst + nn/hardtanh.rst nn/hard_sigmoid.rst nn/hard_swish.rst nn/hash.rst @@ -104,12 +105,14 @@ paddle.nn nn/polynomial_decay.rst nn/Pool2D.rst nn/pool3d.rst + nn/prelu.rst nn/prior_box.rst nn/prroi_pool.rst nn/psroi_pool.rst nn/random_crop.rst nn/rank_loss.rst nn/ReLU.rst + nn/relu.rst nn/relu6.rst nn/resize_bilinear.rst nn/resize_nearest.rst diff --git a/doc/fluid/api/nn/activation.rst b/doc/fluid/api/nn/activation.rst index 917e1abd4f51f37da88f86daaac323449a82efa9..d073a6f71c408dc7fe97889a01b92197ed355eb3 100644 --- a/doc/fluid/api/nn/activation.rst +++ b/doc/fluid/api/nn/activation.rst @@ -8,5 +8,8 @@ activation activation/ELU.rst activation/GELU.rst activation/Hardshrink.rst + activation/Hardtanh.rst + activation/PReLU.rst activation/ReLU.rst activation/LogSigmoid.rst + activation/Softmax.rst diff --git a/doc/fluid/api/nn/activation/ELU.rst b/doc/fluid/api/nn/activation/ELU.rst new file mode 100644 index 0000000000000000000000000000000000000000..d98897b3706e5dc2796989c68c4b86adcdf5fa31 --- /dev/null +++ b/doc/fluid/api/nn/activation/ELU.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_ELU: + +ELU +------------------------------- + +.. autoclass:: paddle.nn.ELU + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/GELU.rst b/doc/fluid/api/nn/activation/GELU.rst new file mode 100644 index 0000000000000000000000000000000000000000..3aa80fa5b5b45af194ec0c71e4a537a4e9cb0bec --- /dev/null +++ b/doc/fluid/api/nn/activation/GELU.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_GELU: + +GELU +------------------------------- + +.. autoclass:: paddle.nn.GELU + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/Hardtanh.rst b/doc/fluid/api/nn/activation/Hardtanh.rst new file mode 100644 index 0000000000000000000000000000000000000000..5509d334ae5a21f5af8bbd7f53c0bce46453a120 --- /dev/null +++ b/doc/fluid/api/nn/activation/Hardtanh.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_Hardtanh: + +Hardtanh +------------------------------- + +.. autoclass:: paddle.nn.Hardtanh + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/LogSigmoid.rst b/doc/fluid/api/nn/activation/LogSigmoid.rst new file mode 100644 index 0000000000000000000000000000000000000000..0407712d267bb29f215714ca77782ae6dce1eed9 --- /dev/null +++ b/doc/fluid/api/nn/activation/LogSigmoid.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_LogSigmoid: + +LogSigmoid +------------------------------- + +.. autoclass:: paddle.nn.LogSigmoid + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/PReLU.rst b/doc/fluid/api/nn/activation/PReLU.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb4c2c3f0055c805e516816036de1571989c5d2c --- /dev/null +++ b/doc/fluid/api/nn/activation/PReLU.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_PReLU: + +PReLU +------------------------------- + +.. autoclass:: paddle.nn.PReLU + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/ReLU.rst b/doc/fluid/api/nn/activation/ReLU.rst new file mode 100644 index 0000000000000000000000000000000000000000..32a742dac0a9c1cda06128c2d5985448d0c7ab47 --- /dev/null +++ b/doc/fluid/api/nn/activation/ReLU.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_ReLU: + +ReLU +------------------------------- + +.. autoclass:: paddle.nn.ReLU + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/Softmax.rst b/doc/fluid/api/nn/activation/Softmax.rst new file mode 100644 index 0000000000000000000000000000000000000000..a39a3161092cba5a774a5bcb9817e42049d5e039 --- /dev/null +++ b/doc/fluid/api/nn/activation/Softmax.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_Softmax: + +Softmax +------------------------------- + +.. autoclass:: paddle.nn.Softmax + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/elu.rst b/doc/fluid/api/nn/elu.rst index a1a0181770a55e06714d9ad8e8965f4273fcdc64..cb526089915c31f2761ba0be40087af8233c3632 100644 --- a/doc/fluid/api/nn/elu.rst +++ b/doc/fluid/api/nn/elu.rst @@ -2,6 +2,8 @@ elu ------------------------------- -:doc_source: paddle.fluid.layers.elu + +.. autofunction:: paddle.nn.functional.elu + :noindex: diff --git a/doc/fluid/api/nn/gelu.rst b/doc/fluid/api/nn/gelu.rst index 0426ae27dc147a29e05ea54f3edec6439e670027..f4b5d57f8a45154f80cd3ba8754b3b676c9ddb73 100644 --- a/doc/fluid/api/nn/gelu.rst +++ b/doc/fluid/api/nn/gelu.rst @@ -2,6 +2,8 @@ gelu ------------------------------- -:doc_source: paddle.fluid.layers.gelu + +.. autofunction:: paddle.nn.functional.gelu + :noindex: diff --git a/doc/fluid/api/nn/hardtanh.rst b/doc/fluid/api/nn/hardtanh.rst new file mode 100644 index 0000000000000000000000000000000000000000..d5c796a6638119b9cf0aaf0271b785cb8a54e27d --- /dev/null +++ b/doc/fluid/api/nn/hardtanh.rst @@ -0,0 +1,7 @@ +.. _api_nn_hardtanh: + +hardtanh +------------------------------- + +.. autofunction:: paddle.nn.functional.hardtanh + :noindex: diff --git a/doc/fluid/api/nn/logsigmoid.rst b/doc/fluid/api/nn/logsigmoid.rst index d3fa9266b1db2b8646e9340f06a93310fa214031..7571afec3fda355b589a77ccd125a23b60434071 100644 --- a/doc/fluid/api/nn/logsigmoid.rst +++ b/doc/fluid/api/nn/logsigmoid.rst @@ -2,6 +2,8 @@ logsigmoid ------------------------------- -:doc_source: paddle.fluid.layers.logsigmoid + +.. autofunction:: paddle.nn.functional.logsigmoid + :noindex: diff --git a/doc/fluid/api/nn/prelu.rst b/doc/fluid/api/nn/prelu.rst new file mode 100644 index 0000000000000000000000000000000000000000..9de04524eca18ea1b1ad3d71676bae0a5c64a273 --- /dev/null +++ b/doc/fluid/api/nn/prelu.rst @@ -0,0 +1,7 @@ +.. _api_nn_prelu: + +prelu +------------------------------- + +.. autofunction:: paddle.nn.functional.prelu + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/relu.rst b/doc/fluid/api/nn/relu.rst new file mode 100644 index 0000000000000000000000000000000000000000..f4a73adad505bf8eeb58bee30fd0321cada4dcc9 --- /dev/null +++ b/doc/fluid/api/nn/relu.rst @@ -0,0 +1,7 @@ +.. _api_nn_relu: + +relu +------------------------------- + +.. autofunction:: paddle.nn.functional.relu + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/softmax.rst b/doc/fluid/api/nn/softmax.rst index bb18407af36005b23ab911390b8be880c9695101..f97889cdd33799cec1af362a0a22c47de6ba0feb 100644 --- a/doc/fluid/api/nn/softmax.rst +++ b/doc/fluid/api/nn/softmax.rst @@ -1,10 +1,7 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - .. _api_nn_softmax: softmax -------- +------------------------------- .. autofunction:: paddle.nn.functional.softmax :noindex: diff --git a/doc/fluid/api_cn/nn_cn.rst b/doc/fluid/api_cn/nn_cn.rst index 31926805f9303f1669fe1cd9e67a1b6a0a4baae1..4999fe557ba004c185931af2d0a5b5928f68f846 100644 --- a/doc/fluid/api_cn/nn_cn.rst +++ b/doc/fluid/api_cn/nn_cn.rst @@ -75,6 +75,7 @@ paddle.nn nn_cn/grid_sampler_cn.rst nn_cn/GroupNorm_cn.rst nn_cn/hardshrink_cn.rst + nn_cn/hardtanh_cn.rst nn_cn/hard_sigmoid_cn.rst nn_cn/hard_swish_cn.rst nn_cn/hash_cn.rst @@ -117,6 +118,7 @@ paddle.nn nn_cn/pool2d_cn.rst nn_cn/Pool2D_cn.rst nn_cn/pool3d_cn.rst + nn_cn/prelu_cn.rst nn_cn/prior_box_cn.rst nn_cn/prroi_pool_cn.rst nn_cn/psroi_pool_cn.rst diff --git a/doc/fluid/api_cn/nn_cn/activation_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn.rst index 4ba1ca09390a8c4d03da573eca7050eed9c19a21..14caa161565ccce2fac5d063fd45f7d68e1d3730 100644 --- a/doc/fluid/api_cn/nn_cn/activation_cn.rst +++ b/doc/fluid/api_cn/nn_cn/activation_cn.rst @@ -11,8 +11,11 @@ activation activation_cn/ELU_cn.rst activation_cn/GELU_cn.rst activation_cn/Hardshrink_cn.rst + activation_cn/Hardtanh_cn.rst + activation_cn/PRelu_cn.rst activation_cn/ReLU_cn.rst activation_cn/LeakyReLU_cn.rst + activation_cn/Softmax_cn.rst activation_cn/LogSoftmax_cn.rst activation_cn/Sigmoid_cn.rst activation_cn/LogSigmoid_cn.rst diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/ELU_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/ELU_cn.rst index 8a2bb7bd2f239c4c6db57b7b9991c91d7ef12100..f2cb3cfb242282ccca4ffa6b7355adb2a9e9e1ca 100644 --- a/doc/fluid/api_cn/nn_cn/activation_cn/ELU_cn.rst +++ b/doc/fluid/api_cn/nn_cn/activation_cn/ELU_cn.rst @@ -6,7 +6,7 @@ ELU ELU激活层(ELU Activation Operator) -根据 `Exponential Linear Units ` 对输入Tensor中每个元素应用以下计算。 +根据 `Exponential Linear Units `_ 对输入Tensor中每个元素应用以下计算。 .. math:: diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/GELU_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/GELU_cn.rst index bd04b40302626eae592a5126e990f53ca0fb1ecf..3493bf7f6ea266f1c392908fc933c25a664af549 100644 --- a/doc/fluid/api_cn/nn_cn/activation_cn/GELU_cn.rst +++ b/doc/fluid/api_cn/nn_cn/activation_cn/GELU_cn.rst @@ -6,7 +6,7 @@ GELU GELU激活层(GELU Activation Operator) -更多细节请参考 `Gaussian Error Linear Units `。 +逐元素计算 GELU激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。 如果使用近似计算: diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/Hardtanh_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/Hardtanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d3955f41d3d8a9177ec1d0cb42a8103fa39a83ea --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/Hardtanh_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_nn_Hardtanh: + +Hardtanh +------------------------------- +.. py:class:: paddle.nn.Hardtanh(min=-1.0, max=1.0, name=None) + +Hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: + +.. math:: + + Hardtanh(x)= + \left\{ + \begin{aligned} + &max, & & if \ x > max \\ + &min, & & if \ x < min \\ + &x, & & if \ others + \end{aligned} + \right. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - min (float, 可选) - Hardtanh激活计算公式中的min值。默认值为-1。 + - max (float, 可选) - Hardtanh激活计算公式中的max值。默认值为1。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5])) + m = paddle.nn.Hardtanh() + out = m(x) # # [-1., 0.3, 1.] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/LogSigmoid_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/LogSigmoid_cn.rst index 321eb1d9ef0907d8ba1b651bf273b6c8d7dea47c..754f59fce5b6ab07dd2c0c699d7f86bff9bd93bb 100644 --- a/doc/fluid/api_cn/nn_cn/activation_cn/LogSigmoid_cn.rst +++ b/doc/fluid/api_cn/nn_cn/activation_cn/LogSigmoid_cn.rst @@ -4,11 +4,11 @@ LogSigmoid ------------------------------- .. py:class:: paddle.nn.LogSigmoid(name=None) -Logsigmoid激活层。计算公式如下: +LogSigmoid激活层。计算公式如下: .. math:: - Logsigmoid(x) = \log \frac{1}{1 + e^{-x}} + LogSigmoid(x) = \log \frac{1}{1 + e^{-x}} 其中,:math:`x` 为输入的 Tensor @@ -33,4 +33,4 @@ Logsigmoid激活层。计算公式如下: x = paddle.to_tensor(np.array([1.0, 2.0, 3.0, 4.0])) m = paddle.nn.LogSigmoid() - out = m(x) # [0.7310586, 0.880797, 0.95257413, 0.98201376] + out = m(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/PRelu_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/PRelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d558d84a3c9c52dd362da38fbc7a23adc5d18db --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/PRelu_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_nn_PRelu: + +PRelu +------------------------------- +.. py:class:: paddle.nn.PRelu(num_parameters=1, init=0.25, weight_attr=None, name=None) + +PRelu激活层(PRelu Activation Operator)。计算公式如下: + +如果使用近似计算: + +.. math:: + + PReLU(x) = max(0, x) + weight * min(0, x) + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - num_parameters (int, 可选) - 可训练`weight`数量,支持2种输入:1 - 输入中的所有元素使用同一个`weight`值; 输入的通道数 - 在同一个通道中的元素使用同一个`weight`值。默认为1。 + - init (float, 可选) - `weight`的初始值。默认为0.25。 + - weight_attr (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + data = np.array([[[[-2.0, 3.0, -4.0, 5.0], + [ 3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[ 1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [ 6.0, 7.0, 8.0, 9.0]]]], 'float32') + x = paddle.to_tensor(data) + m = paddle.nn.PReLU(1, 0.25) + out = m(x) + # [[[[-0.5 , 3. , -1. , 5. ], + # [ 3. , -1. , 5. , -1.5 ], + # [-1.75, -2. , 8. , 9. ]], + # [[ 1. , -0.5 , -0.75, 4. ], + # [-1.25, 6. , 7. , -2. ], + # [ 6. , 7. , 8. , 9. ]]]] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/Softmax_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/Softmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba6f60076ea114a210ac1d0a4b919a0b99a02a0f --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/Softmax_cn.rst @@ -0,0 +1,117 @@ +.. _cn_api_nn_Softmax: + +Softmax +------------------------------- +.. py:class:: paddle.nn.Softmax(axis=-1, name=None) + +Softmax激活层,OP的计算过程如下: + +步骤1:输入 ``x`` 的 ``axis`` 维会被置换到最后一维; + +步骤2:将输入 ``x`` 在逻辑上变换为二维矩阵。二维矩阵第一维(列长度)是输入除最后一维之外的其他维度值的乘积,第二维(行长度)和输入 ``axis`` 维的长度相同;对于矩阵的每一行,softmax操作对其进行重新缩放,使得该行的每个元素在 \[0,1\] 范围内,并且总和为1; + +步骤3:softmax操作执行完成后,执行步骤1和步骤2的逆运算,将二维矩阵恢复至和输入 ``x`` 相同的维度。 + +上述步骤2中softmax操作计算过程如下: + + - 对于二维矩阵的每一行,计算K维向量(K是输入第 ``axis`` 维的长度)中指定位置的指数值和全部位置指数值的和。 + + - 指定位置指数值与全部位置指数值之和的比值就是softmax操作的输出。 + +对于二维矩阵中的第i行和第j列有: + +.. math:: + + Softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])} + +- 示例1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做softmax操作) + +.. code-block:: text + + # input + + x.shape = [2, 3, 4] + + x.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = -1 + + # output + + out.shape = [2, 3, 4] + + out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] + +- 示例2(矩阵一共有三维。axis = 1,表示沿着第二维做softmax操作) + +.. code-block:: text + + # input + + x.shape = [2, 3, 4] + + x.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = 1 + + # output + + out.shape = [2, 3, 4] + + out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783], + [0.01786798, 0.01786798, 0.04661262, 0.04661262], + [0.97555875, 0.97555875, 0.93623955, 0.93623955]], + [[0.00490169, 0.00490169, 0.00490169, 0.00490169], + [0.26762315, 0.26762315, 0.26762315, 0.26762315], + [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] + +参数 +:::::::::: + - axis (int, 可选) - 指定对输入Tensor进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入Tensor的维度, ``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[[-2.0, 3.0, -4.0, 5.0], + [3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [6.0, 7.0, 8.0, 9.0]]], 'float32') + x = paddle.to_tensor(x) + m = paddle.nn.Softmax() + out = m(x) + # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + # [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] diff --git a/doc/fluid/api_cn/nn_cn/elu_cn.rst b/doc/fluid/api_cn/nn_cn/elu_cn.rst index 2006e70251db3b67675083623f114e7f273ef47b..c0cf22a5123d75696dc8bdbd7fb78e2d33314c0c 100644 --- a/doc/fluid/api_cn/nn_cn/elu_cn.rst +++ b/doc/fluid/api_cn/nn_cn/elu_cn.rst @@ -7,7 +7,7 @@ elu elu激活层(ELU Activation Operator) -根据 `Exponential Linear Units ` 对输入Tensor中每个元素应用以下计算。 +根据 `Exponential Linear Units `_ 对输入Tensor中每个元素应用以下计算。 .. math:: diff --git a/doc/fluid/api_cn/nn_cn/hardtanh_cn.rst b/doc/fluid/api_cn/nn_cn/hardtanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fa18b323674f9f5f9fac51b41f32c804b2c1852b --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/hardtanh_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_nn_cn_hardtanh: + +hardtanh +------------------------------- +.. py:function:: paddle.nn.functional.hardtanh(x, min=-1.0, max=1.0, name=None): + +hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: + +.. math:: + + hardtanh(x)= + \left\{ + \begin{aligned} + &max, & & if \ x > max \\ + &min, & & if \ x < min \\ + &x, & & if \ others + \end{aligned} + \right. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - min (float, 可选) - hardtanh激活计算公式中的min值。默认值为-1。 + - max (float, 可选) - hardtanh激活计算公式中的max值。默认值为1。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5])) + out = F.hardtanh(x) # [-1., 0.3, 1.] diff --git a/doc/fluid/api_cn/nn_cn/logsigmoid_cn.rst b/doc/fluid/api_cn/nn_cn/logsigmoid_cn.rst index 0bbb5f3ca510f293705777512dbdd024dc629efa..5296f5197e8051d67f87bd7b453a335c5c3b6117 100644 --- a/doc/fluid/api_cn/nn_cn/logsigmoid_cn.rst +++ b/doc/fluid/api_cn/nn_cn/logsigmoid_cn.rst @@ -34,4 +34,4 @@ logsigmoid激活层。计算公式如下: paddle.disable_static() x = paddle.to_tensor(np.array([1.0, 2.0, 3.0, 4.0])) - out = F.logsigmoid(x) # [0.7310586, 0.880797, 0.95257413, 0.98201376] + out = F.logsigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499] diff --git a/doc/fluid/api_cn/nn_cn/prelu_cn.rst b/doc/fluid/api_cn/nn_cn/prelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..caa1681a91964f84679793a7a795a812bc839a2a --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/prelu_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_nn_cn_prelu: + +prelu +------------------------------- +.. py:function:: paddle.nn.functional.prelu(x, weight, name=None): + +prelu激活层(PRelu Activation Operator)。计算公式如下: + +.. math:: + + prelu(x) = max(0, x) + weight * min(0, x) + +其中,:math:`x` 和 `weight` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - weight (Tensor) - 可训练参数,数据类型同``x`` 一致,形状支持2种:[1] 或者 [in],其中`in`为输入的通道数。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + data = np.array([[[[-2.0, 3.0, -4.0, 5.0], + [ 3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[ 1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [ 6.0, 7.0, 8.0, 9.0]]]], 'float32') + x = paddle.to_tensor(data) + w = paddle.to_tensor(np.array([0.25]).astype('float32')) + out = F.prelu(x, w) + # [[[[-0.5 , 3. , -1. , 5. ], + # [ 3. , -1. , 5. , -1.5 ], + # [-1.75, -2. , 8. , 9. ]], + # [[ 1. , -0.5 , -0.75, 4. ], + # [-1.25, 6. , 7. , -2. ], + # [ 6. , 7. , 8. , 9. ]]]] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/softmax_cn.rst b/doc/fluid/api_cn/nn_cn/softmax_cn.rst index 5c2e0cc806c78a831b0a66e6fa89c4bc233a6ecb..74b1605f2b433c57bfae76b4629341450d451677 100644 --- a/doc/fluid/api_cn/nn_cn/softmax_cn.rst +++ b/doc/fluid/api_cn/nn_cn/softmax_cn.rst @@ -2,8 +2,7 @@ softmax ------------------------------- - -.. py:function:: paddle.nn.functional.softmax(x, axis=-1, name=None) +.. py:function:: paddle.nn.functional.softmax(x, axis=-1, dtype=None, name=None) 该OP实现了softmax层。OP的计算过程如下: @@ -24,8 +23,7 @@ softmax .. math:: - - Out[i,j] = \frac{exp(X[i,j])}{\sum_j exp(X[i,j])} + softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])} - 示例1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做softmax操作) @@ -86,13 +84,14 @@ softmax 参数 :::::::::: - - x (Tensor) - 输入的多维 ``Tensor`` ,数据类型为:float32、float64。 + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 - axis (int, 可选) - 指定对输入 ``x`` 进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入 ``x`` 的维度, ``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - dtype (str|np.dtype|core.VarDesc.VarType, 可选) - 输入Tensor的数据类型。如果指定了 ``dtype`` ,则输入Tensor的数据类型会在计算前转换到 ``dtype`` 。``dtype``可以用来避免数据溢出。如果 ``dtype`` 为None,则输出Tensor的数据类型和 ``x`` 相同。默认值为None。 - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 返回 :::::::::: - ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + ``Tensor`` ,形状和 ``x`` 相同,数据类型为 ``dtype`` 或者和 ``x`` 相同。 代码示例 :::::::::: @@ -111,8 +110,11 @@ softmax [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [6.0, 7.0, 8.0, 9.0]]], 'float32') - x = paddle.to_variable(x) - out = F.softmax(x) + x = paddle.to_tensor(x) + out1 = F.softmax(x) + out2 = F.softmax(x, dtype='float64') + # out1's data type is float32; out2's data type is float64 + # out1 and out2's value is as follows: # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], # [0.07232949, 0.19661193, 0.19661193, 0.53444665]],