diff --git a/doc/fluid/api_cn/nn_cn.rst b/doc/fluid/api_cn/nn_cn.rst index 16f0da5958fabd448c95f5c4e7af3e64fb9b58c7..bdd40cd44e5e636184ed75c2c8d5cb40c4546310 100644 --- a/doc/fluid/api_cn/nn_cn.rst +++ b/doc/fluid/api_cn/nn_cn.rst @@ -13,7 +13,7 @@ paddle.nn nn_cn/diag_embed_cn.rst nn_cn/interpolate_cn.rst nn_cn/Linear_cn.rst - nn_cn/log_softmax_cn.rst + nn_cn/LogSoftmax_cn.rst nn_cn/ReLU_cn.rst nn_cn/Upsample_cn.rst nn_cn/activation_cn.rst diff --git a/doc/fluid/api_cn/nn_cn/LogSoftmax_cn.rst b/doc/fluid/api_cn/nn_cn/LogSoftmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9114f8b41edb38282ee250a7955b230f80fc1121 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/LogSoftmax_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_nn_LogSoftmax: + +LogSoftmax +------------------------------- +.. py:class:: paddle.nn.LogSoftmax(axis=None) + + +**LogSoftmax激活层:** + +.. math:: + + \\output = \frac{1}{1 + e^{-input}}\\ + +参数: + - **axis** (int, 可选) - 指示进行LogSoftmax计算的维度索引,其范围应为 :math:`[-1,rank-1]` ,其中rank是输入变量的秩。默认值:None(与-1效果相同,表示对最后一维做LogSoftmax操作)。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.nn as nn + import numpy as np + + data = np.array([[[-2.0, 3.0, -4.0, 5.0], + [3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [6.0, 7.0, 8.0, 9.0]]]).astype('float32') + my_log_softnmax = nn.LogSoftmax() + with fluid.dygraph.guard(): + data = fluid.dygraph.to_variable(data) + res = my_log_softnmax(data) + # [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948] + # [ -2.1270514 -9.127051 -0.12705144 -11.127051 ] + # [-16.313261 -17.313261 -1.3132617 -0.31326184]] + # [[ -3.0518122 -6.051812 -7.051812 -0.051812 ] + # [-12.313267 -1.3132664 -0.3132665 -15.313267 ] + # [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]] diff --git a/doc/fluid/api_cn/nn_cn/ReLU_cn.rst b/doc/fluid/api_cn/nn_cn/ReLU_cn.rst index 687436b737f3e9f6ee2f645af84b3091fa2ad146..5550e7d18fbf7b91c74015fa7c11306e59c4a824 100644 --- a/doc/fluid/api_cn/nn_cn/ReLU_cn.rst +++ b/doc/fluid/api_cn/nn_cn/ReLU_cn.rst @@ -1,3 +1,33 @@ +.. _cn_api_nn_ReLU: + ReLU ------------------------------- -**版本升级,文档正在开发中** +.. py:class:: paddle.nn.ReLU(inplace=False) + + +**ReLU(Rectified Linear Unit)激活层:** + +.. math:: + + \\Out = max(X, 0)\\ + +其中,:math:`X` 为输入的 Tensor + +参数: + - **inplace** (bool,可选)- 如果 ``inplace`` 为 ``True``,则 ``ReLU`` 的输入和输出是同一个变量,否则 ``ReLU`` 的输入和输出是不同的变量。默认值:``False``。请注意,如果 ``ReLU`` 的输入同时是其它OP的输入,则 ``inplace`` 必须为False。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.nn as nn + import numpy as np + + data = np.array([-2, 0, 1]).astype('float32') + my_relu = nn.ReLU() + with fluid.dygraph.guard(): + data = fluid.dygraph.to_variable(data) + res = my_relu(data) # [0, 0, 1] diff --git a/doc/fluid/api_cn/nn_cn/log_softmax_cn.rst b/doc/fluid/api_cn/nn_cn/log_softmax_cn.rst deleted file mode 100644 index 45561655c9d5cc6c8c3b8fb537a09494c3b93792..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/nn_cn/log_softmax_cn.rst +++ /dev/null @@ -1,3 +0,0 @@ -log -------------------------------- -**版本升级,文档正在开发中**