未验证 提交 09c2b29b 编写于 作者: Z zhaoyuchen2018 提交者: GitHub

[cherry-pick] Refine StaticRNN api-doc-en (#20093) (#20447)

test=develop
test=document_fix

* Refine StaticRNN api-doc-en

* refine static rnn doc

* refine api doc of fluid.data

* refine api doc from commends

* refine api.spec

* Refine grammer
上级 439bfe8c
...@@ -371,14 +371,14 @@ paddle.fluid.layers.DynamicRNN.output (ArgSpec(args=['self'], varargs='outputs', ...@@ -371,14 +371,14 @@ paddle.fluid.layers.DynamicRNN.output (ArgSpec(args=['self'], varargs='outputs',
paddle.fluid.layers.DynamicRNN.static_input (ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', 'a766b9ea54014d1af913b0d39d7fb010')) paddle.fluid.layers.DynamicRNN.static_input (ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', 'a766b9ea54014d1af913b0d39d7fb010'))
paddle.fluid.layers.DynamicRNN.step_input (ArgSpec(args=['self', 'x', 'level'], varargs=None, keywords=None, defaults=(0,)), ('document', '70081f137db4d215b302e49b5db33353')) paddle.fluid.layers.DynamicRNN.step_input (ArgSpec(args=['self', 'x', 'level'], varargs=None, keywords=None, defaults=(0,)), ('document', '70081f137db4d215b302e49b5db33353'))
paddle.fluid.layers.DynamicRNN.update_memory (ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None), ('document', 'b416c1c4a742a86379887b092cf7f2f0')) paddle.fluid.layers.DynamicRNN.update_memory (ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None), ('document', 'b416c1c4a742a86379887b092cf7f2f0'))
paddle.fluid.layers.StaticRNN ('paddle.fluid.layers.control_flow.StaticRNN', ('document', 'f73671cb98696a1962bf5deaf49dc2e9')) paddle.fluid.layers.StaticRNN ('paddle.fluid.layers.control_flow.StaticRNN', ('document', '4b0befc7f1ca7f11a459939f09c24d9a'))
paddle.fluid.layers.StaticRNN.__init__ (ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.layers.StaticRNN.__init__ (ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.layers.StaticRNN.memory (ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1)), ('document', 'f1b60dc4194d0bb714d6c6f5921b227f')) paddle.fluid.layers.StaticRNN.memory (ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1)), ('document', 'dcacaa7f1953e89ef5c9e0f9f37c5470'))
paddle.fluid.layers.StaticRNN.output (ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None), ('document', 'df6ceab6e6c9bd31e97914d7e7538137')) paddle.fluid.layers.StaticRNN.output (ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None), ('document', 'a4ff467887b0a60d1aa5547b7f4e212c'))
paddle.fluid.layers.StaticRNN.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6d3e0a5d9aa519a9773a36e1620ea9b7')) paddle.fluid.layers.StaticRNN.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '28fe641c56bc57adff720a01e4310047'))
paddle.fluid.layers.StaticRNN.step_input (ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', '903387ec11f3d0bf46821d31a68cffa5')) paddle.fluid.layers.StaticRNN.step_input (ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', '5b71df1f6beee225ccfa9566f537059d'))
paddle.fluid.layers.StaticRNN.step_output (ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None), ('document', '252890d4c3199a7623ab8667e13fd837')) paddle.fluid.layers.StaticRNN.step_output (ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None), ('document', '10a543d40e14408765c5294636646a6c'))
paddle.fluid.layers.StaticRNN.update_memory (ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None), ('document', '7a0000520f179f35239956a5ba55119f')) paddle.fluid.layers.StaticRNN.update_memory (ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None), ('document', '2021b2399d9a2265e2c254973ed151cd'))
paddle.fluid.layers.reorder_lod_tensor_by_rank (ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None), ('document', 'db67cfcdd20ff6380d125a7553d62121')) paddle.fluid.layers.reorder_lod_tensor_by_rank (ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None), ('document', 'db67cfcdd20ff6380d125a7553d62121'))
paddle.fluid.layers.Print (ArgSpec(args=['input', 'first_n', 'message', 'summarize', 'print_tensor_name', 'print_tensor_type', 'print_tensor_shape', 'print_tensor_lod', 'print_phase'], varargs=None, keywords=None, defaults=(-1, None, 20, True, True, True, True, 'both')), ('document', 'e57b87b4d1f9d4a6c7a3f4e6942dea10')) paddle.fluid.layers.Print (ArgSpec(args=['input', 'first_n', 'message', 'summarize', 'print_tensor_name', 'print_tensor_type', 'print_tensor_shape', 'print_tensor_lod', 'print_phase'], varargs=None, keywords=None, defaults=(-1, None, 20, True, True, True, True, 'both')), ('document', 'e57b87b4d1f9d4a6c7a3f4e6942dea10'))
paddle.fluid.layers.is_empty (ArgSpec(args=['x', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a79576af16e8ce1c6ac61b902b04f10a')) paddle.fluid.layers.is_empty (ArgSpec(args=['x', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a79576af16e8ce1c6ac61b902b04f10a'))
......
...@@ -292,11 +292,13 @@ class StaticRNN(object): ...@@ -292,11 +292,13 @@ class StaticRNN(object):
""" """
StaticRNN class. StaticRNN class.
The StaticRNN can process a batch of sequence data. The length of each The StaticRNN can process a batch of sequence data. The first dimension of inputs
sample sequence must be equal. The StaticRNN will have its own parameters represents sequence length, the length of each input sequence must be equal.
like inputs, outputs, memories. **Note that the first dimension of inputs StaticRNN will unfold sequence into time steps, user needs to define how to process
represents sequence length, and all the sequence length of inputs must be each time step during the :code:`with` step.
the same. And the meaning of each axis of input and output are the same.**
Args:
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -305,34 +307,30 @@ class StaticRNN(object): ...@@ -305,34 +307,30 @@ class StaticRNN(object):
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200 vocab_size, hidden_size=10000, 200
x = layers.data(name="x", shape=[-1, 1, 1], dtype='int64') x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding( x_emb = layers.embedding(
input=x, input=x,
size=[vocab_size, hidden_size], size=[vocab_size, hidden_size],
dtype='float32', dtype='float32',
is_sparse=False) is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN() rnn = fluid.layers.StaticRNN()
with rnn.step(): with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb) word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
rnn.update_memory(prev, hidden) # set prev to hidden # use hidden to update prev
rnn.update_memory(prev, hidden)
# mark hidden as output
rnn.step_output(hidden) rnn.step_output(hidden)
rnn.output(word) # get StaticrNN final output
result = rnn() result = rnn()
The StaticRNN will unfold sequence into time steps. Users need to define
how to process each time step during the :code:`with` step.
The :code:`memory` is used as a staging data cross time step. The initial
value of memory can be a variable that is filled with a constant value or
a specified variable.
The StaticRNN can mark multiple variables as its output. Use `rnn()` to
get the output sequence.
""" """
BEFORE_RNN_BLOCK = 0 BEFORE_RNN_BLOCK = 0
IN_RNN_BLOCK = 1 IN_RNN_BLOCK = 1
...@@ -349,7 +347,8 @@ class StaticRNN(object): ...@@ -349,7 +347,8 @@ class StaticRNN(object):
def step(self): def step(self):
""" """
The block for user to define operators in RNN. Define operators in each step. step is used in :code:`with` block, OP in :code:`with` block
will be executed sequence_len times (sequence_len is the length of input)
""" """
return BlockGuardWithCompletion(self) return BlockGuardWithCompletion(self)
...@@ -366,48 +365,80 @@ class StaticRNN(object): ...@@ -366,48 +365,80 @@ class StaticRNN(object):
ref_batch_dim_idx=1): ref_batch_dim_idx=1):
""" """
Create a memory variable for static rnn. Create a memory variable for static rnn.
If the :code:`init` is not None, :code:`memory` will be initialized by If the :code:`init` is not None, :code:`memory` will be initialized by
this Variable. If the :code:`init` is None, :code:`shape` and :code:`batch_ref` this Variable. If the :code:`init` is None, :code:`shape` and :code:`batch_ref`
must be set, and this function will initialize a :code:`init` Variable. must be set, and this function will create a new variable with shape and batch_ref
to initialize :code:`init` Variable.
Args: Args:
init(Variable|None): The initialized variable. If it is not set, init(Variable, optional): Tensor used to init memory. If it is not set,
:code:`shape` and :code:`batch_ref` must be provided. :code:`shape` and :code:`batch_ref` must be provided.
Default: None. Default: None.
shape(list|tuple): The shape of the boot memory. NOTE the shape shape(list|tuple): When :code:`init` is None use this arg to initialize memory shape.
does not contain batch_size. Default: None. NOTE the shape does not contain batch_size. Default: None.
batch_ref(Variable|None): The batch size reference Variable. batch_ref(Variable, optional): When :code:`init` is None, memory's batch size will
Default: None. be set as batch_ref's ref_batch_dim_idx value. Default: None.
init_value(float): the init value of boot memory. Default: 0.0. init_value(float, optional): When :code:`init` is None, used to init memory's value. Default: 0.0.
init_batch_dim_idx(int): the batch_size axis of the init_batch_dim_idx(int, optional): the batch_size axis of the :code:`init` Variable. Default: 0.
:code:`init` Variable. Default: 0. ref_batch_dim_idx(int, optional): the batch_size axis of the :code:`batch_ref` Variable. Default: 1.
ref_batch_dim_idx(int): the batch_size axis of the
:code:`batch_ref` Variable. Default: 1.
Returns: Returns:
The memory variable. Variable: The memory variable.
Examples:
Examples 1:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200 vocab_size, hidden_size=10000, 200
x = layers.data(name="x", shape=[-1, 1, 1], dtype='int64') x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
x_emb = layers.embedding( # create word sequence
input=x, x_emb = layers.embedding(
size=[vocab_size, hidden_size], input=x,
dtype='float32', size=[vocab_size, hidden_size],
is_sparse=False) dtype='float32',
x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) is_sparse=False)
# transform batch size to dim 1
rnn = fluid.layers.StaticRNN() x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
with rnn.step():
word = rnn.step_input(x_emb) rnn = fluid.layers.StaticRNN()
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) with rnn.step():
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') # mark created x_emb as input, each step process a word
rnn.update_memory(prev, hidden) word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
Examples 2:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
boot_memory = fluid.layers.data(name='boot', shape=[hidden_size], dtype='float32', lod_level=1)
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# init memory
prev = rnn.memory(init=boot_memory)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# update hidden with prev
rnn.update_memory(prev, hidden)
""" """
self._assert_in_rnn_block_('memory') self._assert_in_rnn_block_('memory')
if init is None: if init is None:
...@@ -455,7 +486,35 @@ class StaticRNN(object): ...@@ -455,7 +486,35 @@ class StaticRNN(object):
should be [seq_len, ...]. should be [seq_len, ...].
Returns: Returns:
The current time step in the input sequence. Variable: The current time step data in the input sequence.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
""" """
self._assert_in_rnn_block_('step_input') self._assert_in_rnn_block_('step_input')
if not isinstance(x, Variable): if not isinstance(x, Variable):
...@@ -479,6 +538,37 @@ class StaticRNN(object): ...@@ -479,6 +538,37 @@ class StaticRNN(object):
Returns: Returns:
None. None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
rnn.step_output(hidden)
result = rnn()
""" """
self._assert_in_rnn_block_('step_output') self._assert_in_rnn_block_('step_output')
if not isinstance(o, Variable): if not isinstance(o, Variable):
...@@ -503,25 +593,57 @@ class StaticRNN(object): ...@@ -503,25 +593,57 @@ class StaticRNN(object):
Mark the StaticRNN output variables. Mark the StaticRNN output variables.
Args: Args:
outputs: The output Variables. outputs: The output Tensor, can mark multiple variables as output
Returns: Returns:
None None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
# mark each step's hidden and word as output
rnn.output(hidden, word)
result = rnn()
""" """
for each in outputs: for each in outputs:
self.step_output(each) self.step_output(each)
def update_memory(self, mem, var): def update_memory(self, mem, var):
""" """
Update the memory from ex_mem to new_mem. NOTE that the shape and data Update the memory from :code:`mem` to :code:`var`.
type of :code:`ex_mem` and :code:`new_mem` must be same.
Args: Args:
mem(Variable): the memory variable. mem(Variable): the memory variable.
var(Variable): the plain variable generated in RNN block. var(Variable): the plain variable generated in RNN block, used to update memory.
var and mem should hava same dims and data type.
Returns: Returns:
None None
""" """
if not isinstance(mem, Variable) or not isinstance(var, Variable): if not isinstance(mem, Variable) or not isinstance(var, Variable):
raise TypeError("update memory should take variables") raise TypeError("update memory should take variables")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册