From db65f497ffce0dcc8b71a06d8a303e1d75b864ca Mon Sep 17 00:00:00 2001 From: Cao Ying Date: Sat, 13 Jan 2018 02:35:13 +0800 Subject: [PATCH] Update comments for two operators. (#7457) * update code comments. * update the comments. * follow comments. --- .../reorder_lod_tensor_by_rank_op.cc | 44 ++++++++++++++----- paddle/operators/shrink_rnn_memory_op.cc | 22 +++++----- python/paddle/v2/fluid/layers/control_flow.py | 40 ++++++++++------- python/paddle/v2/fluid/layers/tensor.py | 17 +++---- 4 files changed, 77 insertions(+), 46 deletions(-) diff --git a/paddle/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/operators/reorder_lod_tensor_by_rank_op.cc index a055cdf7e89..3c304479494 100644 --- a/paddle/operators/reorder_lod_tensor_by_rank_op.cc +++ b/paddle/operators/reorder_lod_tensor_by_rank_op.cc @@ -26,22 +26,44 @@ class ReorderLoDTensorByRankTableOpProtoMaker ReorderLoDTensorByRankTableOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(LoDTensor) the input lod tensor need to be reordered."); + AddInput("X", + "(LoDTensor), the input lod tensor to be reordered according to " + "Input(RankTable)."); AddInput("RankTable", - "(LoDRankTable) the rank table that input need follow"); - AddOutput("Out", "(LoDTensor) reordered lod tensor"); - AddComment(R"DOC(ReorderLoDTensorByRankTable + "(LoDRankTable), the rank table according to which Input(X) is " + "reordered."); + AddOutput("Out", "(LoDTensor), the reordered lod tensor."); + AddComment(R"DOC(ReorderLoDTensorByRankTable operator. -Reorder the input X by the rank of `RankTable`. If `RankTable` is ordered by -index [3, 0, 2, 1]. Input X will reorder its sequence, the third sequence of -X will be the first sequence of Output. - -NOTE: The RankTable does not need to be calculated by X. +Input(X) is a batch of sequences. Input(RankTable) stores new orders of the +input sequence batch. The reorder_lod_tensor_by_rank operator reorders the +Input(X) according to the information provided by Input(RankTable). For example: -The X = [Seq0, Seq1, Seq2, Seq3]. The indices of RankTable are [3, 0, 2, 1]. -The Out = [Seq3, Seq0, Seq2, Seq1] with correct LoD information. +If the indices stored in the Input(RankTable) are [3, 0, 2, 1], the +Input(X) will be reordered that the fourth sequence in Input(X) will become the +first one, and then followed by the original first, third, and the second one. + +This is: +X = [Seq0, Seq1, Seq2, Seq3]. The indices in RankTable are [3, 0, 2, 1]. +Out = [Seq3, Seq0, Seq2, Seq1] with a new LoD information. + +If the LoD information of Input(X) is empty, this means Input(X) is not sequence +data. This is also identical to a batch of sequences where each sequence has a +fixed length 1. In this case, the reorder_lod_tensor_by_rank operator reorders +each slice of Input(X) along the first axis according to Input(RankTable). + +This is: +X = [Slice0, Slice1, Slice2, Slice3] and its LoD information is empty. The +indices in RankTable are [3, 0, 2, 1]. +Out = [Slice3, Slice0, Slice2, Slice1] with no LoD information is appended. + +NOTE: This operator sorts Input(X) according to a given LoDRankTable which does +not need to be calculated according to Input(X). It can be calculated according +to another different sequence, and then this operator sorts Input(X) according +to the given LoDRankTable. + )DOC"); } }; diff --git a/paddle/operators/shrink_rnn_memory_op.cc b/paddle/operators/shrink_rnn_memory_op.cc index 3f5b2a9b843..ade94b40bed 100644 --- a/paddle/operators/shrink_rnn_memory_op.cc +++ b/paddle/operators/shrink_rnn_memory_op.cc @@ -45,7 +45,7 @@ class ShrinkRNNMemoryOp : public ArrayOp { rank_items.begin(); auto *out_var = scope.FindVar(Output("Out")); - PADDLE_ENFORCE(out_var != nullptr, "Output Out must be set"); + PADDLE_ENFORCE(out_var != nullptr, "Output(Out) must be set."); auto &out_tensor = *out_var->GetMutable(); size_t height = dst_num_rows; @@ -76,15 +76,17 @@ class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker { "(LoDTensor) The step index. The RNN step memory 'X' will be " "shrinked to match the size of the input of the index'th step."); AddOutput("Out", "(LoDTensor) The shrinked RNN step memory."); - AddComment( - R"DOC( - In dynamic RNN, we are able to handle sequences of different lengths. - Because of the multiple lengths, the size of each step input can be - different, which may lead to a mismatching between the input of - the current step and the memory generated by the previous one. This - operator shrinks memory according to the size of the next step input, - to make sure that they can match each other. - )DOC"); + AddComment(R"DOC( +This operator is used to shrink output batch of memory defined in dynamic RNN. + +Dynamic RNN is able to handle variable-length sequences, in which, sequences in +a mini-batch are sorted by their lengths first. After that, the longest sequence +becomes the first one in the sorted batch, followed by the second longest, the +third longest, and so on. Dynamic RNN then slices a batch input timestep by +timestep from the sorted input. Once any sequence in the input batch reaches its +end, memory defined in dynamicRNN has to shrink its outputs to adapt to the input +batch size for the next time step. +)DOC"); } }; diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index 0cf17f3083a..4b363ecbe78 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -742,11 +742,10 @@ def topk(input, k): def lod_tensor_to_array(x, table): - """This function performs the operation that converts an LOD_Tensor to - an array. + """ Convert a LOD_TENSOR to an LOD_TENSOR_ARRAY. Args: - x (Variable|list): The tensor that needs to be converted to an array. + x (Variable|list): The LOD tensor to be converted to a LOD tensor array. table (ParamAttr|list): The variable that stores the level of lod which is ordered by sequence length in descending order. @@ -776,11 +775,10 @@ def lod_tensor_to_array(x, table): def array_to_lod_tensor(x, table): - """This function performs the operations that converts an array to - an LOD_Tensor. + """Convert a LoD_Tensor_Aarry to an LoDTensor. Args: - x (Variable|list): The array that needs to be converted to a tensor. + x (Variable|list): The lod tensor array to be converted to a tensor. table (ParamAttr|list): The variable that stores the level of lod which is ordered by sequence length in descending order. @@ -808,7 +806,8 @@ def array_to_lod_tensor(x, table): def increment(x, value=1.0, in_place=True): - """This function performs an operation that increments each value in the + """ + This function performs an operation that increments each value in the input :math:`x` by an amount: :math:`value` as mentioned in the input parameter. This operation is performed in-place by default. @@ -841,17 +840,24 @@ def increment(x, value=1.0, in_place=True): def array_write(x, i, array=None): - """This function performs the operation to write the data out as an - LOD_TENSOR_ARRAY. + """ + This function writes the given input variable to the specified position + indicating by the arrary index to an output LOD_TENSOR_ARRAY. If the + output LOD_TENSOR_ARRAY is not given(None), a new one will be created and + returned. Args: x (Variable|list): The input tensor from which the data will be read. - i (Variable|list): The subscript index in tensor array, that points the - place from which data will be read. - array (Variable|list): The data can be read into this variable if - this is assigned. + i (Variable|list): The index of the output LOD_TENSOR_ARRAY, pointing to + the position to which the input tensor will be + written. + array (Variable|list): The output LOD_TENSOR_ARRAY to which the input + tensor will be written. If this parameter is + NONE, a new LOD_TENSOR_ARRAY will be created and + returned. + Returns: - Variable: The tensor type variable that has the data written to it. + Variable: The output LOD_TENSOR_ARRAY where the input tensor is written. Examples: .. code-block::python @@ -1228,7 +1234,7 @@ class DynamicRNN(object): self._assert_in_rnn_block_("step_input") if not isinstance(x, Variable): raise TypeError( - "step_input() can only take a Variable as its input") + "step_input() can only take a Variable as its input.") parent_block = self._parent_block_() if self.lod_rank_table is None: self.lod_rank_table = parent_block.create_var( @@ -1289,8 +1295,8 @@ class DynamicRNN(object): def __call__(self, *args, **kwargs): if self.status != DynamicRNN.AFTER_RNN: - raise ValueError( - "Dynamic RNN outputs can only be retrieved after rnn block") + raise ValueError(("Output of the dynamic RNN can only be visited " + "outside the rnn block.")) if len(self.outputs) == 1: return self.outputs[0] else: diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index 57668a7983b..438df33afbe 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -176,25 +176,26 @@ def fill_constant(shape, dtype, value, out=None): """ **fill_constant** - This function creates a tensor of specified *shape* and - *dtype*, and initializes this with a constant supplied in *value*. + This function creates a tensor with specified `shape` and `dtype`, and + initializes it with a constant specifed by `value`. - It also sets *stop_gradient* to True. + The attribute `stop_gradient` of the created tensor is set to True. Args: - shape(tuple|list|None): Shape of output tensor - dtype(np.dtype|core.DataType|str): Data type of output tensor - value(float): Constant value to initialize the output tensor - out(Variable): Output Variable to initialize + shape(tuple|list|None): Shape of the output tensor. + dtype(np.dtype|core.DataType|str): Data type of the output tensor. + value(float): The constant value used to initialize the output tensor. + out(Variable): The output tensor. Returns: - Variable: The tensor variable storing the output + Variable: The tensor variable storing the output. Examples: .. code-block:: python data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64') """ + helper = LayerHelper("fill_constant", **locals()) if out is None: out = helper.create_tmp_variable(dtype=dtype) -- GitLab