提交 caf6914f 编写于 作者: L Luo Tao

add doc of sequence_softmax and parallelDo

上级 9169b3b8
...@@ -233,9 +233,8 @@ class BlockGuard(object): ...@@ -233,9 +233,8 @@ class BlockGuard(object):
class ParallelDo(object): class ParallelDo(object):
""" """
ParallelDo class. ParallelDo class is used to create a ParallelDo.
It will be soon deprecated, please use ParallelExecutor instead.
ParallelDo class is used to create a ParallelDo.
""" """
def __init__(self, places, use_nccl=False, name=None): def __init__(self, places, use_nccl=False, name=None):
......
...@@ -1146,6 +1146,41 @@ def sequence_conv(input, ...@@ -1146,6 +1146,41 @@ def sequence_conv(input,
def sequence_softmax(input, param_attr=None, bias_attr=None, use_cudnn=True): def sequence_softmax(input, param_attr=None, bias_attr=None, use_cudnn=True):
"""
This function computes the softmax activation among all time-steps for each
sequence. The dimension of each time-step should be 1. Thus, the shape of
input Tensor can be either :math:`[N, 1]` or :math:`[N]`, where :math:`N`
is the sum of the length of all sequences.
For i-th sequence in a mini-batch:
.. math::
Out(X[lod[i]:lod[i+1]], :) = \\frac{\exp(X[lod[i]:lod[i+1], :])}{\sum(\exp(X[lod[i]:lod[i+1], :]))}
For example, for a mini-batch of 3 sequences with variable-length,
each containing 2, 3, 2 time-steps, the lod of which is [0, 2, 5, 7],
then softmax will be computed among :math:`X[0:2, :]`, :math:`X[2:5, :]`,
:math:`X[5:7, :]`, and :math:`N` turns out to be 7.
Args:
input (Variable): The input variable which is a LoDTensor.
bias_attr (ParamAttr|None): attributes for bias
param_attr (ParamAttr|None): attributes for parameter
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn \
library is installed. Default: True
Returns:
Variable: output of sequence_softmax
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[7, 1],
dtype='float32', lod_level=1)
x_sequence_softmax = fluid.layers.sequence_softmax(input=x)
"""
helper = LayerHelper('sequence_softmax', **locals()) helper = LayerHelper('sequence_softmax', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
softmax_out = helper.create_tmp_variable(dtype) softmax_out = helper.create_tmp_variable(dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册