From c322c7bb02849bf7fa89c552088298609275989b Mon Sep 17 00:00:00 2001 From: caoying03 Date: Wed, 20 Dec 2017 17:31:04 +0800 Subject: [PATCH] some small refines. --- paddle/operators/mul_op.cc | 31 ++++++++++++++--------------- python/paddle/v2/fluid/layers/nn.py | 25 +++++++++++++---------- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 25944e3d13c..cee1bb00986 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -81,18 +81,18 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { "(int, default 1) " R"DOC(The mul_op can take tensors with more than two dimensions as its inputs. If the input `X` is a tensor with more than two - dimensions, `X` will be flatten into a two-dimensional matrix - first. The flatten rule is: the first `num_col_dims` will be - flatten to form the first dimension of the matrix (height of the - matrix), and the rest `rank(X) - num_col_dims` dimensions are - flattened to form the second dimension of the matrix (width of the - matrix). As a result, height of the flattened matrix is equal to - the product of `X`'s first `x_num_col_dims` dimensions' sizes, - and width of the flattened matrix is equal to the product of `X`'s - last `rank(x) - num_col_dims` dimensions' size. - For example, suppose `X` is a 6-dimensional tensor with the shape - [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. Then, the flattened - matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. + dimensions, `X` will be flattened into a two-dimensional matrix + first. The flattening rule is: the first `num_col_dims` will be + flattened to form the first dimension of the final matrix (height + of the matrix), and the rest `rank(X) - num_col_dims` dimensions + are flattened to form the second dimension of the final matrix ( + width of the matrix). As a result, height of the flattened matrix + is equal to the product of `X`'s first `x_num_col_dims` dimensions' + sizes, and width of the flattened matrix is equal to the product + of `X`'s last `rank(x) - num_col_dims` dimensions' size. + For example, suppose `X` is a 6-dimensional tensor with the shape + [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. Then, the flattened + matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. )DOC") .SetDefault(1) .EqualGreaterThan(1); @@ -102,14 +102,13 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { R"DOC(The mul_op can take tensors with more than two dimensions as its inputs. If the input `Y` is a tensor with more than two dimensions, `Y` will be flatten into a two-dimensional matrix - first. The attribute `y_num_col_dims` is used to flatten `Y` into - a two-dimensional matrix. See the comments of `x_num_col_dims` for - more details. + first. The attribute `y_num_col_dims` determines how `Y` is + flattened. See comments of `x_num_col_dims` for more details. )DOC") .SetDefault(1) .EqualGreaterThan(1); AddComment(R"DOC( -Mul Operator. +Mul Operator. This operator is used to perform matrix multiplication for input X and Y. diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 4d8ecb5ce2c..51da00f5658 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -55,24 +55,27 @@ def fc(input, act: Activation to be applied to the output of the fully connected layer. name: Name/alias of the fully connected layer. - The fully connected can take multiple tensor as inputs. It creates a - variable (one for each input tensor) called weights which represents a - fully connected weight matrix from each input unit to each output unit. - The fully connected layer multiplies each input tensor with its coresponding - weight to produce an output Tensor. If multiple input tensors are given, - the results of multiple multiplications will be sumed up. If bias_attr is - not None, a biases variable will be created and added to the output. - Finally, if activation is not None, it will be applied to the output as well. - - This process canbe formulated as follows: + The fully connected layer can take multiple tensors as its inputs. It + creates a variable (one for each input tensor) called weights for each input + tensor, which represents a fully connected weight matrix from each input + unit to each output unit. The fully connected layer multiplies each input + tensor with its coresponding weight to produce an output Tensor. If + multiple input tensors are given, the results of multiple multiplications + will be sumed up. If bias_attr is not None, a biases variable will be + created and added to the output. Finally, if activation is not None, + it will be applied to the output as well. + + This process can be formulated as follows: .. math:: Y = \sigma({\sum_{i=0}^{N-1}W_iX_i + b}) where, :math:`N` is the number of input, :math:`X_i` is the input tensor, - :math`W` is the weights created by this layer, :math:`b` is the bias. + :math:`W` is the weights created by this layer, :math:`b` is the bias + created by this layer (if needed), :math:`\sigma` is the activation funtion. """ + helper = LayerHelper("fc", **locals()) dtype = helper.input_dtype() -- GitLab