提交 297a1698 编写于 作者: W wanghaoshuang

Fix doc of warpctc, array_read, edit_distance and sequence_reshape.

上级 e0a8c584
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
import contextlib import contextlib
from layer_function_generator import autodoc from layer_function_generator import autodoc, templatedoc
from tensor import assign, fill_constant from tensor import assign, fill_constant
from .. import core from .. import core
from ..framework import Program, Variable, Operator from ..framework import Program, Variable, Operator
...@@ -721,26 +721,22 @@ def lod_rank_table(x, level=0): ...@@ -721,26 +721,22 @@ def lod_rank_table(x, level=0):
return table return table
@templatedoc()
def max_sequence_len(rank_table): def max_sequence_len(rank_table):
"""Max Sequence Len Operator. Given a LoDRankTable object, this layer """
returns the max length of a batch of sequences. In fact, a LoDRankTable ${comment}
object contains a list of tuples(<sequence index, sequence length>) and
the list is already sorted by sequence length in descending order, so the >>> import paddle.fluid as fluid
operator just returns the sequence length of the first tuple element. >>> x = fluid.layers.data(name='x', shape=[10], dtype='float32',
>>> lod_level=1)
>>> rank_table = layers.lod_rank_table(x=x, level=0)
>>> max_seq_len = layers.max_sequence_len(rank_table)
Args: Args:
rank_table (Variable): Input variable which is a LoDRankTable object. rank_table(${rank_table_type}): ${rank_table_comment}.
Returns: Returns:
Variable: The max length of sequence. ${out_comment}.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[10],
dtype='float32', lod_level=1)
rank_table = layers.lod_rank_table(x=x, level=0)
max_seq_len = layers.max_sequence_len(rank_table)
""" """
helper = LayerHelper("max_seqence_len", **locals()) helper = LayerHelper("max_seqence_len", **locals())
res = helper.create_tmp_variable(dtype="int64") res = helper.create_tmp_variable(dtype="int64")
...@@ -978,19 +974,38 @@ def equal(x, y, cond=None, **ignored): ...@@ -978,19 +974,38 @@ def equal(x, y, cond=None, **ignored):
def array_read(array, i): def array_read(array, i):
"""This function performs the operation to read the data in as an """
This function performs the operation to read the data in as an
LOD_TENSOR_ARRAY. LOD_TENSOR_ARRAY.
.. code-block:: text
Given:
array = [0.6, 0.1, 0.3, 0.1]
And:
i = 2
Then:
output = 0.3
Args: Args:
array (Variable|list): The input tensor that will be written to an array. array (Variable|list): The input tensor that store data to be read.
i (Variable|list): The subscript index in tensor array, that points the i (Variable|list): The index of the data to be read from input array.
place where data will be written to.
Returns: Returns:
Variable: The tensor type variable that has the data written to it. Variable: The tensor type variable that has the data written to it.
Examples: Examples:
.. code-block::python .. code-block:: python
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) tmp = fluid.layers.zeros(shape=[10], dtype='int32')
arr = layers.array_read(tmp, i=i) i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = layers.array_read(tmp, i=i)
""" """
helper = LayerHelper('array_read', **locals()) helper = LayerHelper('array_read', **locals())
if not isinstance( if not isinstance(
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册