未验证 提交 98ec9927 编写于 作者: H huzhiqiang 提交者: GitHub

modify WeightNormParamAttr English doc test=develop (#20218)

* modify WeightNormParamAttr English doc test=develop
上级 5fc2cfba
...@@ -1103,7 +1103,7 @@ paddle.fluid.CUDAPinnedPlace ('paddle.fluid.core_avx.CUDAPinnedPlace', ('documen ...@@ -1103,7 +1103,7 @@ paddle.fluid.CUDAPinnedPlace ('paddle.fluid.core_avx.CUDAPinnedPlace', ('documen
paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core_avx.CUDAPinnedPlace) -> None paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core_avx.CUDAPinnedPlace) -> None
paddle.fluid.ParamAttr ('paddle.fluid.param_attr.ParamAttr', ('document', '7b5bfe856689036b8fffb71af1558e5c')) paddle.fluid.ParamAttr ('paddle.fluid.param_attr.ParamAttr', ('document', '7b5bfe856689036b8fffb71af1558e5c'))
paddle.fluid.ParamAttr.__init__ (ArgSpec(args=['self', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, 1.0, None, True, None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.ParamAttr.__init__ (ArgSpec(args=['self', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, 1.0, None, True, None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.WeightNormParamAttr ('paddle.fluid.param_attr.WeightNormParamAttr', ('document', 'b5ae1698ea72d5a9428000b916a67379')) paddle.fluid.WeightNormParamAttr ('paddle.fluid.param_attr.WeightNormParamAttr', ('document', 'ea029ec9e0dea75f136211c433154f25'))
paddle.fluid.WeightNormParamAttr.__init__ (ArgSpec(args=['self', 'dim', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, None, 1.0, None, True, None, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.WeightNormParamAttr.__init__ (ArgSpec(args=['self', 'dim', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, None, 1.0, None, True, None, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.DataFeeder ('paddle.fluid.data_feeder.DataFeeder', ('document', 'd9e64be617bd5f49dbb08ac2bc8665e6')) paddle.fluid.DataFeeder ('paddle.fluid.data_feeder.DataFeeder', ('document', 'd9e64be617bd5f49dbb08ac2bc8665e6'))
paddle.fluid.DataFeeder.__init__ (ArgSpec(args=['self', 'feed_list', 'place', 'program'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.DataFeeder.__init__ (ArgSpec(args=['self', 'feed_list', 'place', 'program'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
......
...@@ -183,7 +183,7 @@ class ParamAttr(object): ...@@ -183,7 +183,7 @@ class ParamAttr(object):
class WeightNormParamAttr(ParamAttr): class WeightNormParamAttr(ParamAttr):
""" """
Used for weight Norm. Weight Norm is a reparameterization of the weight vectors Parameter of weight Norm. Weight Norm is a reparameterization of the weight vectors
in a neural network that decouples the magnitude of those weight vectors from in a neural network that decouples the magnitude of those weight vectors from
their direction. Weight Norm has been implemented as discussed in this their direction. Weight Norm has been implemented as discussed in this
paper: `Weight Normalization: A Simple Reparameterization to Accelerate paper: `Weight Normalization: A Simple Reparameterization to Accelerate
...@@ -191,17 +191,27 @@ class WeightNormParamAttr(ParamAttr): ...@@ -191,17 +191,27 @@ class WeightNormParamAttr(ParamAttr):
<https://arxiv.org/pdf/1602.07868.pdf>`_. <https://arxiv.org/pdf/1602.07868.pdf>`_.
Args: Args:
dim(int): Dimension over which to compute the norm. Default None. dim(int): Dimension over which to compute the norm. Dim is a non-negative
name(str): The parameter's name. Default None. number which is less than the rank of weight Tensor. For Example, dim can
initializer(Initializer): The method to initial this parameter. Default None. be choosed from 0, 1, 2, 3 for convolution whose weight shape is [cout, cin, kh, kw]
learning_rate(float): The parameter's learning rate. The learning rate when and rank is 4. Default None, meaning that all elements will be normalized.
optimize is :math:`global\_lr * parameter\_lr * scheduler\_factor`. name(str, optional): The parameter's name. Default None, meaning that the name would
be created automatically. Please refer to :ref:`api_guide_Name` for more details.
initializer(Initializer): The method to initialize this parameter, such as
``initializer = fluid.initializer.ConstantInitializer(1.0)``. Default None,
meaning that the weight parameter is initialized by Xavier initializer, and
the bias parameter is initialized by 0.
learning_rate(float32): The parameter's learning rate when
optimizer is :math:`global\_lr * parameter\_lr * scheduler\_factor`.
Default 1.0. Default 1.0.
regularizer(WeightDecayRegularizer): Regularization factor. Default None. regularizer(WeightDecayRegularizer): Regularization factor, such as
trainable(bool): Whether this parameter is trainable. Default True. ``regularizer = fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.1)``.
gradient_clip(BaseGradientClipAttr): The method to clip this parameter's Default None, meaning that there is no regularization.
gradient. Default None. trainable(bool, optional): Whether this parameter is trainable. Default True.
do_model_average(bool): Whether this parameter should do model average. gradient_clip: The method to clip this parameter's gradient, such as
``gradient_clip = fluid.clip.GradientClipByNorm(clip_norm=2.0))`` .
Default None, meaning that there is no gradient clip.
do_model_average(bool, optional): Whether this parameter should do model average.
Default False. Default False.
Examples: Examples:
...@@ -213,7 +223,13 @@ class WeightNormParamAttr(ParamAttr): ...@@ -213,7 +223,13 @@ class WeightNormParamAttr(ParamAttr):
size=1000, size=1000,
param_attr=fluid.WeightNormParamAttr( param_attr=fluid.WeightNormParamAttr(
dim=None, dim=None,
name='weight_norm_param')) name='weight_norm_param',
initializer=fluid.initializer.ConstantInitializer(1.0),
learning_rate=1.0,
regularizer=fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.1),
trainable=True,
gradient_clip=fluid.clip.GradientClipByNorm(clip_norm=2.0),
do_model_average=False))
""" """
# List to record the parameters reparameterized by weight normalization. # List to record the parameters reparameterized by weight normalization.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册