From 6b612a2807407b87e865e8b698be40b7bae97d67 Mon Sep 17 00:00:00 2001 From: wopeizl Date: Fri, 11 Oct 2019 14:59:26 +0800 Subject: [PATCH] optimize the english description for APIs test=develop test=document_fix (#20286) * optimize the english description for APIs test=develop test=document_fix --- paddle/fluid/API.spec | 14 ++--- python/paddle/fluid/layers/tensor.py | 39 +++++++------ python/paddle/fluid/optimizer.py | 86 +++++++++++++++------------- 3 files changed, 72 insertions(+), 67 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 1bac87f444d..03104e2cc87 100755 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -315,8 +315,8 @@ paddle.fluid.layers.py_reader (ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lo paddle.fluid.layers.create_py_reader_by_data (ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True)), ('document', '2edf37d57862b24a7a26aa19a3573f73')) paddle.fluid.layers.load (ArgSpec(args=['out', 'file_path', 'load_as_fp16'], varargs=None, keywords=None, defaults=(None,)), ('document', '309f9e5249463e1b207a7347b2a91134')) paddle.fluid.layers.create_tensor (ArgSpec(args=['dtype', 'name', 'persistable'], varargs=None, keywords=None, defaults=(None, False)), ('document', 'fdc2d964488e99fb0743887454c34e36')) -paddle.fluid.layers.create_parameter (ArgSpec(args=['shape', 'dtype', 'name', 'attr', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', '021272f30e0cdf7503586815378abfb8')) -paddle.fluid.layers.create_global_var (ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None)), ('document', '47ea8b8c91879e50c9036e418b00ef4a')) +paddle.fluid.layers.create_parameter (ArgSpec(args=['shape', 'dtype', 'name', 'attr', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', '727aa63c061919bee38547fb126d9428')) +paddle.fluid.layers.create_global_var (ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None)), ('document', 'fa7f74cfb940521cc9fdffabc83debbf')) paddle.fluid.layers.cast (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '1e44a534cf7d26ab230aa9f5e4e0525a')) paddle.fluid.layers.tensor_array_to_tensor (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', '764c095ba4562ae740f979e970152d6e')) paddle.fluid.layers.concat (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'b3f30feb5dec8f110d7393ffeb30dbd9')) @@ -924,7 +924,7 @@ paddle.fluid.nets.sequence_conv_pool (ArgSpec(args=['input', 'num_filters', 'fil paddle.fluid.nets.glu (ArgSpec(args=['input', 'dim'], varargs=None, keywords=None, defaults=(-1,)), ('document', '3efe197c8e3e75f84a4c464d8b74e943')) paddle.fluid.nets.scaled_dot_product_attention (ArgSpec(args=['queries', 'keys', 'values', 'num_heads', 'dropout_rate'], varargs=None, keywords=None, defaults=(1, 0.0)), ('document', 'b1a07a0000eb9103e3a143ca8c13de5b')) paddle.fluid.nets.img_conv_group (ArgSpec(args=['input', 'conv_num_filter', 'pool_size', 'conv_padding', 'conv_filter_size', 'conv_act', 'param_attr', 'conv_with_batchnorm', 'conv_batchnorm_drop_rate', 'pool_stride', 'pool_type', 'use_cudnn'], varargs=None, keywords=None, defaults=(1, 3, None, None, False, 0.0, 1, 'max', True)), ('document', 'a59c581d5969266427e841abe69f694a')) -paddle.fluid.optimizer.SGDOptimizer ('paddle.fluid.optimizer.SGDOptimizer', ('document', 'c3c8dd3193d991adf8bda505560371d6')) +paddle.fluid.optimizer.SGDOptimizer ('paddle.fluid.optimizer.SGDOptimizer', ('document', 'fc09d6e6c1083cec2dce51f6f9f4ecaf')) paddle.fluid.optimizer.SGDOptimizer.__init__ (ArgSpec(args=['self', 'learning_rate', 'regularization', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.optimizer.SGDOptimizer.apply_gradients (ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '80ea99c9af7ef5fac7e57fb302103610')) paddle.fluid.optimizer.SGDOptimizer.apply_optimize (ArgSpec(args=['self', 'loss', 'startup_program', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '5c46d1926a40f1f873ffe9f37ac89dae')) @@ -933,7 +933,7 @@ paddle.fluid.optimizer.SGDOptimizer.get_opti_var_name_list (ArgSpec(args=['self' paddle.fluid.optimizer.SGDOptimizer.minimize (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'grad_clip'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '8387af01322a6defc92c1832faccd304')) paddle.fluid.optimizer.SGDOptimizer.set_dict (ArgSpec(args=['self', 'state_dict'], varargs=None, keywords=None, defaults=None), ('document', '36aa497a2d29abaa4147987d71721d17')) paddle.fluid.optimizer.SGDOptimizer.state_dict (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'deca1537945d33940b350923fb16ddf8')) -paddle.fluid.optimizer.MomentumOptimizer ('paddle.fluid.optimizer.MomentumOptimizer', ('document', 'a72bd02e5459e64596897d190413d449')) +paddle.fluid.optimizer.MomentumOptimizer ('paddle.fluid.optimizer.MomentumOptimizer', ('document', '2bda0a60340fce6c8e594bb35b4e0fcd')) paddle.fluid.optimizer.MomentumOptimizer.__init__ (ArgSpec(args=['self', 'learning_rate', 'momentum', 'use_nesterov', 'regularization', 'name'], varargs=None, keywords=None, defaults=(False, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.optimizer.MomentumOptimizer.apply_gradients (ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '80ea99c9af7ef5fac7e57fb302103610')) paddle.fluid.optimizer.MomentumOptimizer.apply_optimize (ArgSpec(args=['self', 'loss', 'startup_program', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '5c46d1926a40f1f873ffe9f37ac89dae')) @@ -987,7 +987,7 @@ paddle.fluid.optimizer.DecayedAdagradOptimizer.get_opti_var_name_list (ArgSpec(a paddle.fluid.optimizer.DecayedAdagradOptimizer.minimize (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'grad_clip'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '8387af01322a6defc92c1832faccd304')) paddle.fluid.optimizer.DecayedAdagradOptimizer.set_dict (ArgSpec(args=['self', 'state_dict'], varargs=None, keywords=None, defaults=None), ('document', '36aa497a2d29abaa4147987d71721d17')) paddle.fluid.optimizer.DecayedAdagradOptimizer.state_dict (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'deca1537945d33940b350923fb16ddf8')) -paddle.fluid.optimizer.FtrlOptimizer ('paddle.fluid.optimizer.FtrlOptimizer', ('document', 'cba8aae0a267b9a4d8833ae79a00fc55')) +paddle.fluid.optimizer.FtrlOptimizer ('paddle.fluid.optimizer.FtrlOptimizer', ('document', 'a2573c97cd45c2be0d33243cd1aa4a9b')) paddle.fluid.optimizer.FtrlOptimizer.__init__ (ArgSpec(args=['self', 'learning_rate', 'l1', 'l2', 'lr_power', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.0, 0.0, -0.5, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.optimizer.FtrlOptimizer.apply_gradients (ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '80ea99c9af7ef5fac7e57fb302103610')) paddle.fluid.optimizer.FtrlOptimizer.apply_optimize (ArgSpec(args=['self', 'loss', 'startup_program', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '5c46d1926a40f1f873ffe9f37ac89dae')) @@ -996,7 +996,7 @@ paddle.fluid.optimizer.FtrlOptimizer.get_opti_var_name_list (ArgSpec(args=['self paddle.fluid.optimizer.FtrlOptimizer.minimize (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'grad_clip'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '8387af01322a6defc92c1832faccd304')) paddle.fluid.optimizer.FtrlOptimizer.set_dict (ArgSpec(args=['self', 'state_dict'], varargs=None, keywords=None, defaults=None), ('document', '36aa497a2d29abaa4147987d71721d17')) paddle.fluid.optimizer.FtrlOptimizer.state_dict (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'deca1537945d33940b350923fb16ddf8')) -paddle.fluid.optimizer.RMSPropOptimizer ('paddle.fluid.optimizer.RMSPropOptimizer', ('document', '5217bc4fc399010021d6b70541005780')) +paddle.fluid.optimizer.RMSPropOptimizer ('paddle.fluid.optimizer.RMSPropOptimizer', ('document', '6aeb527f958d1d6962d4e56751f44dbd')) paddle.fluid.optimizer.RMSPropOptimizer.__init__ (ArgSpec(args=['self', 'learning_rate', 'rho', 'epsilon', 'momentum', 'centered', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.95, 1e-06, 0.0, False, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.optimizer.RMSPropOptimizer.apply_gradients (ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '80ea99c9af7ef5fac7e57fb302103610')) paddle.fluid.optimizer.RMSPropOptimizer.apply_optimize (ArgSpec(args=['self', 'loss', 'startup_program', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '5c46d1926a40f1f873ffe9f37ac89dae')) @@ -1025,7 +1025,7 @@ paddle.fluid.optimizer.ModelAverage.minimize (ArgSpec(args=['self', 'loss', 'sta paddle.fluid.optimizer.ModelAverage.restore (ArgSpec(args=['self', 'executor'], varargs=None, keywords=None, defaults=None), ('document', '7917cbe4d3ed7954ae73360fbccc39f6')) paddle.fluid.optimizer.ModelAverage.set_dict (ArgSpec(args=['self', 'state_dict'], varargs=None, keywords=None, defaults=None), ('document', '36aa497a2d29abaa4147987d71721d17')) paddle.fluid.optimizer.ModelAverage.state_dict (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'deca1537945d33940b350923fb16ddf8')) -paddle.fluid.optimizer.LarsMomentumOptimizer ('paddle.fluid.optimizer.LarsMomentumOptimizer', ('document', '030b9092a96a409b1bf5446bf45d0659')) +paddle.fluid.optimizer.LarsMomentumOptimizer ('paddle.fluid.optimizer.LarsMomentumOptimizer', ('document', '107d591a9b03264bfc0c55f424f90574')) paddle.fluid.optimizer.LarsMomentumOptimizer.__init__ (ArgSpec(args=['self', 'learning_rate', 'momentum', 'lars_coeff', 'lars_weight_decay', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.0005, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.optimizer.LarsMomentumOptimizer.apply_gradients (ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '80ea99c9af7ef5fac7e57fb302103610')) paddle.fluid.optimizer.LarsMomentumOptimizer.apply_optimize (ArgSpec(args=['self', 'loss', 'startup_program', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '5c46d1926a40f1f873ffe9f37ac89dae')) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 3ace8b0139a..73add29bcd2 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -66,24 +66,26 @@ def create_parameter(shape, is_bias=False, default_initializer=None): """ - Create a parameter. The parameter is a learnable variable, which can have + This function creates a parameter. The parameter is a learnable variable, which can have gradient, and can be optimized. NOTE: this is a very low-level API. This API is useful when you create operator by your self. instead of using layers. - Args: - shape(list[int]): shape of the parameter - dtype(string): element type of the parameter - attr(ParamAttr): attributes of the parameter - is_bias(bool): This can affect which default initializer is chosen + Parameters: + shape (list of int): Shape of the parameter + dtype (str): Data type of the parameter + name (str, optional): For detailed information, please refer to + :ref:`api_guide_Name` . Usually name is no need to set and None by default. + attr (ParamAttr, optional): Attributes of the parameter + is_bias (bool, optional): This can affect which default initializer is chosen when default_initializer is None. If is_bias, initializer.Constant(0.0) will be used. Otherwise, Xavier() will be used. - default_initializer(Initializer): initializer for the parameter + default_initializer (Initializer, optional): Initializer for the parameter Returns: - the created parameter. + The created parameter. Examples: .. code-block:: python @@ -106,23 +108,22 @@ def create_global_var(shape, force_cpu=False, name=None): """ - Create a new tensor variable with value in the global block(block 0). + This function creates a new tensor variable with value in the global block(block 0). - Args: - shape(list[int]): shape of the variable - value(float): the value of the variable. The new created + Parameters: + shape (list of int): Shape of the variable + value (float): The value of the variable. The new created variable will be filled with it. - dtype(string): data type of the variable - persistable(bool): if this variable is persistable. + dtype (str): Data type of the variable + persistable (bool, optional): If this variable is persistable. Default: False - force_cpu(bool): force this variable to be on CPU. + force_cpu (bool, optional): Force this variable to be on CPU. Default: False - name(str|None): The name of the variable. If set to None the variable - name will be generated automatically. - Default: None + name (str, optional): For detailed information, please refer to + :ref:`api_guide_Name` . Usually name is no need to set and None by default. Returns: - Variable: the created Variable + Variable: The created Variable Examples: .. code-block:: python diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 377cd2aa49c..c85433576c0 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -695,12 +695,13 @@ class SGDOptimizer(Optimizer): param\_out = param - learning\_rate * grad - Args: - learning_rate (float|Variable): the learning rate used to update parameters. \ - Can be a float value or a Variable with one float value as data element. - regularization: A Regularizer, such as - fluid.regularizer.L2DecayRegularizer. - name: A optional name prefix. + Parameters: + learning_rate (float|Variable): The learning rate used to update parameters. \ + Can be a float value or a Variable with one float value as data element. + regularization: A Regularizer, such as :ref:`api_fluid_regularizer_L2DecayRegularizer`. \ + Optional, default is None. + name (str, optional): This parameter is used by developers to print debugging information. \ + For details, please refer to :ref:`api_guide_Name`. Default is None. Examples: .. code-block:: python @@ -778,14 +779,15 @@ class MomentumOptimizer(Optimizer): &\quad param = param - learning\_rate * velocity - Args: - learning_rate (float|Variable): the learning rate used to update parameters. \ - Can be a float value or a Variable with one float value as data element. - momentum (float): momentum factor - use_nesterov (bool): enables Nesterov momentum - regularization: A Regularizer, such as - fluid.regularizer.L2DecayRegularizer. - name: A optional name prefix. + Parameters: + learning_rate (float|Variable): The learning rate used to update parameters. \ + Can be a float value or a Variable with one float value as data element. + momentum (float): Momentum factor + use_nesterov (bool, optional): Enables Nesterov momentum, default is false. + regularization: A Regularizer, such as :ref:`api_fluid_regularizer_L2DecayRegularizer`. \ + Optional, default is None. + name (str, optional): This parameter is used by developers to print debugging information. \ + For details, please refer to :ref:`api_guide_Name`. Default is None. Examples: .. code-block:: python @@ -1142,16 +1144,16 @@ class LarsMomentumOptimizer(Optimizer): & param = param - velocity - Args: - learning_rate (float|Variable): the learning rate used to update parameters. \ - Can be a float value or a Variable with one float value as data element. - momentum (float): momentum factor - lars_coeff (float): defines how much we trust the layer to change its weights. - lars_weight_decay (float): weight decay coefficient for decaying using LARS. - regularization: A Regularizer, such as - fluid.regularizer.L2DecayRegularizer. - name: A optional name prefix. - + Parameters: + learning_rate (float|Variable): The learning rate used to update parameters. \ + Can be a float value or a Variable with one float value as data element. \ + momentum (float): momentum factor + lars_coeff (float): Defines how much we trust the layer to change its weights. + lars_weight_decay (float): Weight decay coefficient for decaying using LARS. + regularization: A Regularizer, such as :ref:`api_fluid_regularizer_L2DecayRegularizer`. + Optional, default is None. + name (str, optional): This parameter is used by developers to print debugging information. \ + For details, please refer to :ref:`api_guide_Name`. Default is None. Examples: .. code-block:: python @@ -2015,20 +2017,21 @@ class RMSPropOptimizer(Optimizer): from 1e-4 to 1e-8. - Args: - learning_rate(float): global learning rate. - rho(float): rho is :math: `\\rho` in equation, set 0.95 by default. + Parameters: + learning_rate(float): Global learning rate. + rho(float): rho is :math: `\\rho` in equation, default is 0.95. epsilon(float): :math: `\\epsilon` in equation is smoothing term to - avoid division by zero, set 1e-6 by default. + avoid division by zero, default is 1e-6. momentum(float): :math:`\\beta` in equation is the momentum term, - set 0.0 by default. + default is 0.0. centered(bool): If True, gradients are normalized by the estimated variance of the gradient; if False, by the uncentered second moment. Setting this to True may help with training, but is slightly more expensive in terms of computation and memory. Defaults to False. - regularization: A Regularizer, such as - fluid.regularizer.L2DecayRegularizer. - name: A optional name prefix. + regularization: A Regularizer, such as :ref:`api_fluid_regularizer_L2DecayRegularizer`. \ + Optional, default is None. + name (str, optional): This parameter is used by developers to print debugging information. \ + For details, please refer to :ref:`api_guide_Name`. Default is None. Raises: ValueError: If learning_rate, rho, epsilon, momentum are None. @@ -2180,14 +2183,15 @@ class FtrlOptimizer(Optimizer): &squared\_accum += grad^2 - Args: - learning_rate (float|Variable): global learning rate. - l1 (float): L1 regularization strength. - l2 (float): L2 regularization strength. - lr_power (float): Learning Rate Power. - regularization: A Regularizer, such as - fluid.regularizer.L2DecayRegularizer. - name: A optional name prefix. + Parameters: + learning_rate (float|Variable): Global learning rate. + l1 (float): L1 regularization strength, default is 0.0. + l2 (float): L2 regularization strength, default is 0.0. + lr_power (float): Learning Rate Power, default is -0.5. + regularization: A Regularizer, such as :ref:`api_fluid_regularizer_L2DecayRegularizer`. \ + Optional, default is None. + name (str, optional): This parameter is used by developers to print debugging information. \ + For details, please refer to :ref:`api_guide_Name`. Default is None. Raises: ValueError: If learning_rate, rho, epsilon, momentum are None. @@ -2220,7 +2224,7 @@ class FtrlOptimizer(Optimizer): for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) - Notes: + NOTE: Currently, FtrlOptimizer doesn't support sparse parameter optimization. """ -- GitLab