提交 bcae8729 编写于 作者: F fengjiayi

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into dev_add_doc

...@@ -112,7 +112,7 @@ $$out = \frac{1}{1 + e^{-x}}$$ ...@@ -112,7 +112,7 @@ $$out = \frac{1}{1 + e^{-x}}$$
__attribute__((unused)) constexpr char LogSigmoidDoc[] = R"DOC( __attribute__((unused)) constexpr char LogSigmoidDoc[] = R"DOC(
Logsigmoid Activation Operator Logsigmoid Activation Operator
$$out = \log \frac{1}{1 + e^{-x}}$$ $$out = \\log \\frac{1}{1 + e^{-x}}$$
)DOC"; )DOC";
......
...@@ -106,23 +106,36 @@ class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -106,23 +106,36 @@ class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker {
"and M represents the number of deocded boxes."); "and M represents the number of deocded boxes.");
AddComment(R"DOC( AddComment(R"DOC(
Bounding Box Coder Operator.
Bounding Box Coder.
Encode/Decode the target bounding box with the priorbox information. Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below: The Encoding schema described below:
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv ox = (tx - px) / pw / pxv
ow = log(abs(tw / pw)) / pwv
oh = log(abs(th / ph)) / phv oy = (ty - py) / ph / pyv
ow = log(abs(tw / pw)) / pwv
oh = log(abs(th / ph)) / phv
The Decoding schema described below: The Decoding schema described below:
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2 ox = (pw * pxv * tx * + px) - tw / 2
ow = exp(pwv * tw) * pw + tw / 2
oh = exp(phv * th) * ph + th / 2 oy = (ph * pyv * ty * + py) - th / 2
where tx, ty, tw, th denote the target box's center coordinates, width and
height respectively. Similarly, px, py, pw, ph denote the priorbox's(anchor) ow = exp(pwv * tw) * pw + tw / 2
center coordinates, width and height. pxv, pyv, pwv, phv denote the variance
of the priorbox and ox, oy, ow, oh denote the encoded/decoded coordinates, oh = exp(phv * th) * ph + th / 2
width and height.
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates, width
and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote the
priorbox's (anchor) center coordinates, width and height. `pxv`, `pyv`, `pwv`,
`phv` denote the variance of the priorbox and `ox`, `oy`, `ow`, `oh` denote the
encoded/decoded coordinates, width and height.
)DOC"); )DOC");
} }
}; };
......
...@@ -36,11 +36,12 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { ...@@ -36,11 +36,12 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
void Apply() override { void Apply() override {
AddAttr<float>("mean", AddAttr<float>("mean",
"(float, default 0.0) " "(float, default 0.0) "
"mean of random tensor.") "The mean (or center) of the gaussian distribution.")
.SetDefault(.0f); .SetDefault(.0f);
AddAttr<float>("std", AddAttr<float>("std",
"(float, default 1.0) " "(float, default 1.0) "
"std of random tensor.") "The standard deviation (std, or spread) of the "
"gaussian distribution.")
.SetDefault(1.0f); .SetDefault(1.0f);
AddAttr<int>("seed", AddAttr<int>("seed",
"(int, default 0) " "(int, default 0) "
...@@ -55,9 +56,11 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { ...@@ -55,9 +56,11 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
.SetDefault(framework::proto::VarType::FP32); .SetDefault(framework::proto::VarType::FP32);
AddComment(R"DOC( AddComment(R"DOC(
GaussianRandom Operator.
Used to initialize tensors with gaussian random generator. Used to initialize tensors with gaussian random generator.
The defalut mean of the distribution is 0. and defalut standard
deviation (std) of the distribution is 1.. Uers can set mean and std
by input arguments.
)DOC"); )DOC");
} }
}; };
......
...@@ -15,11 +15,13 @@ ...@@ -15,11 +15,13 @@
import framework import framework
import numpy as np import numpy as np
import contextlib import contextlib
from framework import convert_np_dtype_to_dtype_
from core import VarDesc
__all__ = [ __all__ = [
'Constant', 'Uniform', 'Normal', 'Xavier', 'force_init_on_cpu', 'Constant', 'Uniform', 'Normal', 'Xavier', 'Bilinear', 'force_init_on_cpu',
'init_on_cpu', 'ConstantInitializer', 'UniformInitializer', 'init_on_cpu', 'ConstantInitializer', 'UniformInitializer',
'NormalInitializer', 'XavierInitializer' 'NormalInitializer', 'XavierInitializer', 'BilinearInitializer'
] ]
_force_init_on_cpu_ = False _force_init_on_cpu_ = False
...@@ -422,6 +424,101 @@ class MSRAInitializer(Initializer): ...@@ -422,6 +424,101 @@ class MSRAInitializer(Initializer):
return op return op
class BilinearInitializer(Initializer):
"""Implements the bilinear initializer.
This initializer can be used in transposed convolution operator to
act as upsampling. Users can upsample a feature map with shape of
(B, C, H, W) by any integer factor. The usage is:
>>> factor = 2
>>> w_attr = ParamAttr(learning_rate=0., regularizer=L2Decay(0.),
>>> initializer=Bilinear())
>>> conv_up = fluid.layers.conv2d_transpose(
>>> input,
>>> num_filters=C,
>>> output_size=None,
>>> filter_size=2 * factor - factor % 2,
>>> padding=ceil((factor - 1) / 2.),
>>> stride=factor,
>>> groups=C,
>>> param_attr=w_attr,
>>> bias_attr=False)
Where, `num_filters=C` and `groups=C` means this is channel-wise tranposed
convolution. The filter shape will be (C, 1, K, K) where K is `filer_size`,
This initializer will set a (K, K) interpolation kernel for every channel
of the filter identically. The resulting shape of the output feature map
will be (B, C, factor * H, factor * W). Note that the learning rate and the
weight decay are set to 0 in order to keep coefficient values of bilinear
interpolation unchanged during training.
"""
def __init__(self):
"""Constructor for BilinearInitializer.
"""
super(BilinearInitializer, self).__init__()
def __call__(self, var, block):
"""Add biliear initialization ops for a variable
Args:
var (Variable): Variable that needs to be initialized.
block (Block): The block in which initialization ops should
be added.
Returns:
the initialization op
Raises:
ValueError: If type of `var` and `block` is not right.
If the shape of `var` size is not 4 and
var.shape[2] != var.shape[3].
"""
if not isinstance(var, framework.Variable):
raise ValueError("var must be framework.Variable.")
if not isinstance(block, framework.Block):
raise ValueError("block must be framework.Block.")
shape = var.shape
if len(shape) != 4:
raise ValueError("the length of shape must be 4.")
if shape[2] != shape[3]:
raise ValueError("shape[2] must be equal to shape[3].")
weight = np.zeros(np.prod(var.shape), dtype='float32')
size = shape[3]
# factor
f = np.ceil(size / 2.)
# center
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % size
y = (i / size) % size
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
weight = np.reshape(weight, shape)
if var.dtype == VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in weight.flat]
else:
raise ValueError("Unsupported dtype %s", input.dtype)
if np.prod(shape) > 1024 * 1024:
raise ValueError("The size of input is too big. ")
op = block.append_op(
type='assign_value',
outputs={'Out': [var]},
attrs={
'dtype': var.dtype,
'shape': list(shape),
value_name: values
})
var.op = op
return op
# We short the class name, since users will use the initializer with the package # We short the class name, since users will use the initializer with the package
# name. The sample code: # name. The sample code:
# #
...@@ -436,3 +533,4 @@ Uniform = UniformInitializer ...@@ -436,3 +533,4 @@ Uniform = UniformInitializer
Normal = NormalInitializer Normal = NormalInitializer
Xavier = XavierInitializer Xavier = XavierInitializer
MSRA = MSRAInitializer MSRA = MSRAInitializer
Bilinear = BilinearInitializer
...@@ -378,16 +378,16 @@ def random_data_generator(low, high, shapes, lod_levels, for_parallel=True): ...@@ -378,16 +378,16 @@ def random_data_generator(low, high, shapes, lod_levels, for_parallel=True):
Variable: A Reader Variable from which we can get random data. Variable: A Reader Variable from which we can get random data.
Examples: Examples:
.. code-block:: python .. code-block:: python
reader = fluid.layers.io.random_data_generator( reader = fluid.layers.random_data_generator(
low=0.0, low=0.0,
high=1.0, high=1.0,
shapes=[(3,224,224), (1)], shapes=[[3,224,224], [1]],
lod_levels=[0, 0]) lod_levels=[0, 0])
# Via the reader, we can use 'read_file' layer to get data: # Via the reader, we can use 'read_file' layer to get data:
image, label = fluid.layers.io.read_file(reader) image, label = fluid.layers.read_file(reader)
""" """
dtypes = [core.VarDesc.VarType.FP32] * len(shapes) dtypes = [core.VarDesc.VarType.FP32] * len(shapes)
shape_concat = [] shape_concat = []
......
...@@ -364,8 +364,7 @@ def dynamic_lstm(input, ...@@ -364,8 +364,7 @@ def dynamic_lstm(input,
cell_activation(str): The activation for cell output. Choices = ["sigmoid", cell_activation(str): The activation for cell output. Choices = ["sigmoid",
"tanh", "relu", "identity"], default "tanh". "tanh", "relu", "identity"], default "tanh".
candidate_activation(str): The activation for candidate hidden state. candidate_activation(str): The activation for candidate hidden state.
Choices = ["sigmoid", "tanh", Choices = ["sigmoid", "tanh", "relu", "identity"],
"relu", "identity"],
default "tanh". default "tanh".
dtype(str): Data type. Choices = ["float32", "float64"], default "float32". dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
name(str|None): A name for this layer(optional). If set None, the layer name(str|None): A name for this layer(optional). If set None, the layer
...@@ -540,27 +539,31 @@ def dynamic_lstmp(input, ...@@ -540,27 +539,31 @@ def dynamic_lstmp(input,
cell_activation(str): The activation for cell output. Choices = ["sigmoid", cell_activation(str): The activation for cell output. Choices = ["sigmoid",
"tanh", "relu", "identity"], default "tanh". "tanh", "relu", "identity"], default "tanh".
candidate_activation(str): The activation for candidate hidden state. candidate_activation(str): The activation for candidate hidden state.
Choices = ["sigmoid", "tanh", Choices = ["sigmoid", "tanh", "relu", "identity"],
"relu", "identity"],
default "tanh". default "tanh".
proj_activation(str): The activation for projection output. proj_activation(str): The activation for projection output.
Choices = ["sigmoid", "tanh", Choices = ["sigmoid", "tanh", "relu", "identity"],
"relu", "identity"],
default "tanh". default "tanh".
dtype(str): Data type. Choices = ["float32", "float64"], default "float32". dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
name(str|None): A name for this layer(optional). If set None, the layer name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. will be named automatically.
Returns: Returns:
tuple: The projection of hidden state, and cell state of LSTMP. The \ tuple: A tuple of two output variable: the projection of hidden state, \
shape of projection is (T x P), for the cell state which is \ and cell state of LSTMP. The shape of projection is (T x P), \
(T x D), and both LoD is the same with the `input`. for the cell state which is (T x D), and both LoD is the same \
with the `input`.
Examples: Examples:
.. code-block:: python .. code-block:: python
dict_dim, emb_dim = 128, 64
data = fluid.layers.data(name='sequence', shape=[1],
dtype='int32', lod_level=1)
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
hidden_dim, proj_dim = 512, 256 hidden_dim, proj_dim = 512, 256
fc_out = fluid.layers.fc(input=input_seq, size=hidden_dim * 4, fc_out = fluid.layers.fc(input=emb, size=hidden_dim * 4,
act=None, bias_attr=None) act=None, bias_attr=None)
proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out, proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out,
size=hidden_dim * 4, size=hidden_dim * 4,
...@@ -626,10 +629,10 @@ def dynamic_gru(input, ...@@ -626,10 +629,10 @@ def dynamic_gru(input,
candidate_activation='tanh', candidate_activation='tanh',
h_0=None): h_0=None):
""" """
**Dynamic GRU Layer** **Gated Recurrent Unit (GRU) Layer**
Refer to `Empirical Evaluation of Gated Recurrent Neural Networks on Refer to `Empirical Evaluation of Gated Recurrent Neural Networks on
Sequence Modeling <https://arxiv.org/abs/1412.3555>`_ Sequence Modeling <https://arxiv.org/abs/1412.3555>`_ .
The formula is as follows: The formula is as follows:
...@@ -676,17 +679,25 @@ def dynamic_gru(input, ...@@ -676,17 +679,25 @@ def dynamic_gru(input,
Choices = ["sigmoid", "tanh", "relu", "identity"], default "sigmoid". Choices = ["sigmoid", "tanh", "relu", "identity"], default "sigmoid".
candidate_activation(str): The activation for candidate hidden state. candidate_activation(str): The activation for candidate hidden state.
Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh". Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh".
h_0 (Variable): The hidden output of the first time step. h_0 (Variable): This is initial hidden state. If not set, default is
zero. This is a tensor with shape (N x D), where N is the number of
total time steps of input mini-batch feature and D is the hidden
size.
Returns: Returns:
Variable: The hidden state of GRU. The shape is :math:`(T \\times D)`, \ Variable: The hidden state of GRU. The shape is :math:`(T \\times D)`, \
and lod is the same with the input. and sequence length is the same with the input.
Examples: Examples:
.. code-block:: python .. code-block:: python
dict_dim, emb_dim = 128, 64
data = fluid.layers.data(name='sequence', shape=[1],
dtype='int32', lod_level=1)
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
hidden_dim = 512 hidden_dim = 512
x = fluid.layers.fc(input=data, size=hidden_dim * 3) x = fluid.layers.fc(input=emb, size=hidden_dim * 3)
hidden = fluid.layers.dynamic_gru(input=x, dim=hidden_dim) hidden = fluid.layers.dynamic_gru(input=x, dim=hidden_dim)
""" """
...@@ -924,12 +935,12 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None): ...@@ -924,12 +935,12 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
Drop or keep each element of `x` independently. Dropout is a regularization Drop or keep each element of `x` independently. Dropout is a regularization
technique for reducing overfitting by preventing neuron co-adaption during technique for reducing overfitting by preventing neuron co-adaption during
training. The dropout operator randomly set (according to the given dropout training. The dropout operator randomly sets (according to the given dropout
probability) the outputs of some units to zero, while others are remain probability) the outputs of some units to zero, while others are remain
unchanged. unchanged.
Args: Args:
x (Variable): The input tensor. x (Variable): The input tensor variable.
dropout_prob (float): Probability of setting units to zero. dropout_prob (float): Probability of setting units to zero.
is_test (bool): A flag indicating whether it is in test phrase or not. is_test (bool): A flag indicating whether it is in test phrase or not.
seed (int): A Python integer used to create random seeds. If this seed (int): A Python integer used to create random seeds. If this
...@@ -940,13 +951,14 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None): ...@@ -940,13 +951,14 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
will be named automatically. will be named automatically.
Returns: Returns:
Variable: A tensor variable. Variable: A tensor variable is the shape with `x`.
Examples: Examples:
.. code-block:: python .. code-block:: python
x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
droped = fluid.layers.dropout(input=x, dropout_rate=0.5) droped = fluid.layers.dropout(x, dropout_prob=0.5)
""" """
helper = LayerHelper('dropout', **locals()) helper = LayerHelper('dropout', **locals())
...@@ -3012,26 +3024,27 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): ...@@ -3012,26 +3024,27 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes
.. math:: .. math::
y = \frac{x}{ \sqrt{\sum {x^2} + epsion }}
y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }}
For `x` with more dimensions, this layer independently normalizes each 1-D For `x` with more dimensions, this layer independently normalizes each 1-D
slice along dimension `axis`. slice along dimension `axis`.
Args: Args:
x(Variable|list): The input tensor to l2_normalize layer. x(Variable|list): The input tensor to l2_normalize layer.
axis(int): The axis on which to apply normalization. If `axis < 0`, axis(int): The axis on which to apply normalization. If `axis < 0`, \
the dimension to normalization is rank(X) + axis. -1 is the the dimension to normalization is rank(X) + axis. -1 is the
last dimension. last dimension.
epsilon(float): The epsilon value is used to avoid division by zero, epsilon(float): The epsilon value is used to avoid division by zero, \
the defalut value is 1e-10. the defalut value is 1e-10.
name(str|None): A name for this layer(optional). If set None, the layer name(str|None): A name for this layer(optional). If set None, the layer \
will be named automatically. will be named automatically.
Returns: Returns:
Variable: The output tensor variable. Variable: The output tensor variable is the same shape with `x`.
Examples: Examples:
.. code-block:: python .. code-block:: python
data = fluid.layers.data(name="data", data = fluid.layers.data(name="data",
......
...@@ -513,11 +513,27 @@ def save_combine(x, file_path, overwrite=True): ...@@ -513,11 +513,27 @@ def save_combine(x, file_path, overwrite=True):
Saves a list of variables into a single file. Saves a list of variables into a single file.
Args: Args:
x(list): A list of Tensor/LoDTensor to be saved together in a single file. x(list): A list of Tensor/LoDTensor variables to be saved together in
a single file.
file_path(str): The file path where variables will be saved. file_path(str): The file path where variables will be saved.
overwrite(bool): Whether or not cover the given file when it has already overwrite(bool): Whether or not cover the given file when it has already
existed. If it's set 'False' and the file is existed, a runtime existed. If it's set 'False' and the file is existed, a runtime
error will be thrown. error will be thrown.
Returns:
There is no return value.
Examples:
.. code-block:: python
v1 = fluid.layers.data(name="data",
shape=(4, 6),
dtype="float32")
v2 = fluid.layers.data(name="data",
shape=(6, 8, 4),
dtype="float32")
normed = fluid.layers.save_combine([v1, v2], file_path="output")
""" """
helper = LayerHelper("save_combine", **locals()) helper = LayerHelper("save_combine", **locals())
helper.append_op( helper.append_op(
......
...@@ -364,5 +364,22 @@ class TestMSRAInitializer(unittest.TestCase): ...@@ -364,5 +364,22 @@ class TestMSRAInitializer(unittest.TestCase):
self.assertEqual(init_op.attr('seed'), 134) self.assertEqual(init_op.attr('seed'), 134)
class TestMSRAInitializer(unittest.TestCase):
def test_bilinear_initializer(self):
"""Test the bilinear initializer with supplied arguments
"""
program = framework.Program()
block = program.global_block()
block.create_parameter(
dtype="float32",
shape=[8, 1, 3, 3],
lod_level=0,
name="param",
initializer=initializer.BilinearInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'assign_value')
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册