未验证 提交 1bbf08bc 编写于 作者: M MRXLT 提交者: GitHub

update optimizer doc for 2.0 (#2424)

* for new optimizer

* update en doc

* update en doc

* update en doc

* update cn doc

* fix cn doc

* update doc

* fix code style

* fix firmula of adamw

* update doc

* update doc

* fix sample code

* fix sample code

* fix rmsprop sample code

* change learningratedecay to lrscheduler

* fix doc

* fix return

* fix sample code

* fix sample code

* fix sample code

* fix sample code
上级 c48c22dd
......@@ -21,24 +21,38 @@ import contextlib
import paddle.fluid as fluid
import paddle.tensor as tensor
import paddle.nn as nn
import paddle.complex as complex
import paddle.optimizer as optimizer
#import paddle.complex as complex
#import paddle.framework as framework
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument('--submodules', nargs="*")
parser.add_argument(
'--module_name', type=str, help='Generate the documentation of which module')
'--module_name',
type=str,
help='Generate the documentation of which module')
parser.add_argument(
'--module_prefix', type=str, help='Generate the prefix of module')
parser.add_argument(
'--output', type=str, help='Output file or output directory for output rst')
'--output',
type=str,
help='Output file or output directory for output rst')
parser.add_argument(
'--output_name', type=str, help='Output file or output directory for output rst')
'--output_name',
type=str,
help='Output file or output directory for output rst')
parser.add_argument(
'--output_dir', type=str, help='Output file or output directory for output rst')
'--output_dir',
type=str,
help='Output file or output directory for output rst')
parser.add_argument(
'--to_multiple_files', type=bool, default=False, help='Whether to separate to multiple files')
'--to_multiple_files',
type=bool,
default=False,
help='Whether to separate to multiple files')
return parser.parse_args()
......@@ -53,8 +67,9 @@ def parse_arg():
else:
pass
class DocGenerator(object):
def __init__(self, module_name=None, module_prefix=None):
def __init__(self, module_name=None, module_prefix=None):
self.module_name = module_name
self.module_prefix = module_prefix
self.stream = None
......@@ -62,7 +77,7 @@ class DocGenerator(object):
@contextlib.contextmanager
def guard(self, filename):
assert self.stream is None, "stream must be None"
self.stream = open(filename, 'w')
self.stream = open(filename, 'w')
yield
self.stream.close()
self.stream = None
......@@ -70,14 +85,15 @@ class DocGenerator(object):
def print_submodule(self, submodule_name):
submodule = getattr(self.module, submodule_name)
if submodule is None:
raise ValueError("Cannot find submodule {0}".format(submodule_name))
raise ValueError(
"Cannot find submodule {0}".format(submodule_name))
self.print_section(submodule_name)
for item in sorted(submodule.__all__,key=str.lower):
for item in sorted(submodule.__all__, key=str.lower):
self.print_item(item)
def print_current_module(self):
for item in sorted(self.module.__all__,key=str.lower):
for item in sorted(self.module.__all__, key=str.lower):
self.print_item(item)
def print_section(self, name):
......@@ -91,7 +107,7 @@ class DocGenerator(object):
self.print_method(name)
else:
self.stream.close()
path = os.getcwd()+"/"+output_name+"/"+name+".rst"
path = os.getcwd() + "/" + output_name + "/" + name + ".rst"
if name != "PipeReader":
os.remove(path)
......@@ -149,7 +165,9 @@ class DocGenerator(object):
self.stream.write(".. _api_{0}_{1}:\n\n".format("_".join(
self.module_prefix.split(".")), name))
def generate_doc(module_name, module_prefix, output, output_name, to_multiple_files, output_dir):
def generate_doc(module_name, module_prefix, output, output_name,
to_multiple_files, output_dir):
if module_name == "":
module_name = None
......@@ -176,13 +194,14 @@ def generate_doc(module_name, module_prefix, output, output_name, to_multiple_fi
else:
gen.module_prefix = output_name + "." + module_prefix
dirname = output if to_multiple_files else os.path.dirname(output)
dirname = output if to_multiple_files else os.path.dirname(output)
if output_dir != None:
dirname = output_dir + "/" + dirname
output = output_dir + "/" + output
if len(dirname) > 0 and (not os.path.exists(dirname) or not os.path.isdir(dirname)):
if len(dirname) > 0 and (not os.path.exists(dirname) or
not os.path.isdir(dirname)):
os.makedirs(dirname)
if not to_multiple_files:
......@@ -191,7 +210,7 @@ def generate_doc(module_name, module_prefix, output, output_name, to_multiple_fi
prefix_len = len(gen.module_prefix)
assert gen.module_prefix == gen.module_name[0:prefix_len], \
"module_prefix must be prefix of module_name"
diff_name = gen.module_name[prefix_len+1:]
diff_name = gen.module_name[prefix_len + 1:]
if diff_name != "":
header_name = diff_name
else:
......@@ -203,7 +222,7 @@ def generate_doc(module_name, module_prefix, output, output_name, to_multiple_fi
gen._print_header_(header_name, dot='=', is_title=True)
gen.print_current_module()
else:
apis = sorted(gen.module.__all__,key=str.lower)
apis = sorted(gen.module.__all__, key=str.lower)
for api in apis:
header_name = api
with gen.guard(os.path.join(output, api + '.rst')):
......@@ -213,7 +232,8 @@ def generate_doc(module_name, module_prefix, output, output_name, to_multiple_fi
def main():
args = parse_arg()
generate_doc(args.module_name, args.module_prefix, args.output, args.output_name, args.to_multiple_files, args.output_dir)
generate_doc(args.module_name, args.module_prefix, args.output,
args.output_name, args.to_multiple_files, args.output_dir)
if __name__ == '__main__':
......
......@@ -11,8 +11,7 @@ paddle.optimizer
optimizer/AdagradOptimizer.rst
optimizer/Adam.rst
optimizer/Adamax.rst
optimizer/AdamaxOptimizer.rst
optimizer/AdamOptimizer.rst
optimizer/AdamW.rst
optimizer/DecayedAdagrad.rst
optimizer/DecayedAdagradOptimizer.rst
optimizer/DGCMomentumOptimizer.rst
......@@ -29,9 +28,10 @@ paddle.optimizer
optimizer/Momentum.rst
optimizer/MomentumOptimizer.rst
optimizer/RecomputeOptimizer.rst
optimizer/RMSPropOptimizer.rst
optimizer/RMSProp.rst
optimizer/SGD.rst
optimizer/SGDOptimizer.rst
optimizer/Optimizer.rst
optimizer/NoamLR.rst
optimizer/PiecewiseLR.rst
optimizer/NaturalExpLR.rst
......@@ -43,4 +43,4 @@ paddle.optimizer
optimizer/StepLR.rst
optimizer/LambdaLR.rst
optimizer/ReduceLROnPlateau.rst
optimizer/CosineAnnealingLR.rst
\ No newline at end of file
optimizer/CosineAnnealingLR.rst
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_Adadelta:
.. _api_optimizer_Adadelta:
Adadelta
--------
.. autoclass:: paddle.fluid.optimizer.Adadelta
.. autoclass:: paddle.optimizer.Adadelta
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_AdadeltaOptimizer:
.. _api_optimizer_AdadeltaOptimizer:
AdadeltaOptimizer
-----------------
.. autoclass:: paddle.fluid.optimizer.AdadeltaOptimizer
.. autoclass:: paddle.optimizer.AdadeltaOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_Adagrad:
.. _api_optimizer_Adagrad:
Adagrad
-------
.. autoclass:: paddle.fluid.optimizer.Adagrad
.. autoclass:: paddle.optimizer.Adagrad
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_AdagradOptimizer:
.. _api_optimizer_AdagradOptimizer:
AdagradOptimizer
----------------
.. autoclass:: paddle.fluid.optimizer.AdagradOptimizer
.. autoclass:: paddle.optimizer.AdagradOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_Adam:
.. _api_optimizer_Adam:
Adam
----
.. autoclass:: paddle.fluid.optimizer.Adam
.. autoclass:: paddle.optimizer.Adam
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_AdamOptimizer:
.. _api_optimizer_AdamW:
AdamOptimizer
-------------
AdamW
-----
.. autoclass:: paddle.fluid.optimizer.AdamOptimizer
.. autoclass:: paddle.optimizer.AdamW
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_Adamax:
.. _api_optimizer_Adamax:
Adamax
------
.. autoclass:: paddle.fluid.optimizer.Adamax
.. autoclass:: paddle.optimizer.Adamax
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_DGCMomentumOptimizer:
.. _api_optimizer_DGCMomentumOptimizer:
DGCMomentumOptimizer
--------------------
.. autoclass:: paddle.fluid.optimizer.DGCMomentumOptimizer
.. autoclass:: paddle.optimizer.DGCMomentumOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_DecayedAdagrad:
.. _api_optimizer_DecayedAdagrad:
DecayedAdagrad
--------------
.. autoclass:: paddle.fluid.optimizer.DecayedAdagrad
.. autoclass:: paddle.optimizer.DecayedAdagrad
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_DecayedAdagradOptimizer:
.. _api_optimizer_DecayedAdagradOptimizer:
DecayedAdagradOptimizer
-----------------------
.. autoclass:: paddle.fluid.optimizer.DecayedAdagradOptimizer
.. autoclass:: paddle.optimizer.DecayedAdagradOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_Dpsgd:
.. _api_optimizer_Dpsgd:
Dpsgd
-----
.. autoclass:: paddle.fluid.optimizer.Dpsgd
.. autoclass:: paddle.optimizer.Dpsgd
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_DpsgdOptimizer:
.. _api_optimizer_DpsgdOptimizer:
DpsgdOptimizer
--------------
.. autoclass:: paddle.fluid.optimizer.DpsgdOptimizer
.. autoclass:: paddle.optimizer.DpsgdOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_ExponentialMovingAverage:
.. _api_optimizer_ExponentialMovingAverage:
ExponentialMovingAverage
------------------------
.. autoclass:: paddle.fluid.optimizer.ExponentialMovingAverage
.. autoclass:: paddle.optimizer.ExponentialMovingAverage
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_Ftrl:
.. _api_optimizer_Ftrl:
Ftrl
----
.. autoclass:: paddle.fluid.optimizer.Ftrl
.. autoclass:: paddle.optimizer.Ftrl
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_FtrlOptimizer:
.. _api_optimizer_FtrlOptimizer:
FtrlOptimizer
-------------
.. autoclass:: paddle.fluid.optimizer.FtrlOptimizer
.. autoclass:: paddle.optimizer.FtrlOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_LambOptimizer:
.. _api_optimizer_LambOptimizer:
LambOptimizer
-------------
.. autoclass:: paddle.fluid.optimizer.LambOptimizer
.. autoclass:: paddle.optimizer.LambOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_LarsMomentum:
.. _api_optimizer_LarsMomentum:
LarsMomentum
------------
.. autoclass:: paddle.fluid.optimizer.LarsMomentum
.. autoclass:: paddle.optimizer.LarsMomentum
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_LarsMomentumOptimizer:
.. _api_optimizer_LarsMomentumOptimizer:
LarsMomentumOptimizer
---------------------
.. autoclass:: paddle.fluid.optimizer.LarsMomentumOptimizer
.. autoclass:: paddle.optimizer.LarsMomentumOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_LookaheadOptimizer:
.. _api_optimizer_LookaheadOptimizer:
LookaheadOptimizer
------------------
.. autoclass:: paddle.fluid.optimizer.LookaheadOptimizer
.. autoclass:: paddle.optimizer.LookaheadOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_ModelAverage:
.. _api_optimizer_ModelAverage:
ModelAverage
------------
.. autoclass:: paddle.fluid.optimizer.ModelAverage
.. autoclass:: paddle.optimizer.ModelAverage
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_Momentum:
.. _api_optimizer_Momentum:
Momentum
--------
.. autoclass:: paddle.fluid.optimizer.Momentum
.. autoclass:: paddle.optimizer.Momentum
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_MomentumOptimizer:
.. _api_optimizer_MomentumOptimizer:
MomentumOptimizer
-----------------
.. autoclass:: paddle.fluid.optimizer.MomentumOptimizer
.. autoclass:: paddle.optimizer.MomentumOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_AdamaxOptimizer:
.. _api_optimizer_Optimizer:
AdamaxOptimizer
---------------
Optimizer
---------
.. autoclass:: paddle.fluid.optimizer.AdamaxOptimizer
.. autoclass:: paddle.optimizer.Optimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_RMSPropOptimizer:
.. _api_optimizer_RMSProp:
RMSPropOptimizer
----------------
RMSProp
-------
.. autoclass:: paddle.fluid.optimizer.RMSPropOptimizer
.. autoclass:: paddle.optimizer.RMSProp
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_RecomputeOptimizer:
.. _api_optimizer_RecomputeOptimizer:
RecomputeOptimizer
------------------
.. autoclass:: paddle.fluid.optimizer.RecomputeOptimizer
.. autoclass:: paddle.optimizer.RecomputeOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_SGD:
.. _api_optimizer_SGD:
SGD
---
.. autoclass:: paddle.fluid.optimizer.SGD
.. autoclass:: paddle.optimizer.SGD
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
.. _api_fluid_optimizer_SGDOptimizer:
.. _api_optimizer_SGDOptimizer:
SGDOptimizer
------------
.. autoclass:: paddle.fluid.optimizer.SGDOptimizer
.. autoclass:: paddle.optimizer.SGDOptimizer
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
......@@ -14,8 +14,7 @@ paddle.optimizer
optimizer_cn/AdagradOptimizer_cn.rst
optimizer_cn/Adam_cn.rst
optimizer_cn/Adamax_cn.rst
optimizer_cn/AdamaxOptimizer_cn.rst
optimizer_cn/AdamOptimizer_cn.rst
optimizer_cn/AdamW_cn.rst
optimizer_cn/DecayedAdagrad_cn.rst
optimizer_cn/DecayedAdagradOptimizer_cn.rst
optimizer_cn/DGCMomentumOptimizer_cn.rst
......@@ -32,9 +31,10 @@ paddle.optimizer
optimizer_cn/Momentum_cn.rst
optimizer_cn/MomentumOptimizer_cn.rst
optimizer_cn/RecomputeOptimizer_cn.rst
optimizer_cn/RMSPropOptimizer_cn.rst
optimizer_cn/RMSProp_cn.rst
optimizer_cn/SGD_cn.rst
optimizer_cn/SGDOptimizer_cn.rst
optimizer_cn/Optimizer_cn.rst
optimizer_cn/lr_scheduler_cn/CosineAnnealingLR_cn.rst
optimizer_cn/lr_scheduler_cn/ExponentialLR_cn.rst
optimizer_cn/lr_scheduler_cn/InverseTimeLR_cn.rst
......
.. _cn_api_fluid_optimizer_AdamOptimizer:
AdamOptimizer
-------------------------------
.. py:class:: paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameter_list=None, regularization=None, grad_clip=None, name=None, lazy_mode=False)
Adam优化器出自 `Adam论文 <https://arxiv.org/abs/1412.6980>`_ 的第二节,能够利用梯度的一阶矩估计和二阶矩估计动态调整每个参数的学习率。
其参数更新的计算公式如下:
.. math::
\\t = t + 1
.. math::
moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad
.. math::
moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad
.. math::
learning\_rate=learning\_rate*\frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t}
.. math::
param\_out=param-learning\_rate*\frac{moment\_1}{\sqrt{moment\_2}+\epsilon}\\
相关论文:`Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_
参数:
- **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001
- **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。
- **beta1** (float|Variable, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.9
- **beta2** (float|Variable, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.999
- **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08
- **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、
:ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略;
如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。
- **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。
默认值为None,此时将不进行梯度裁剪。
- **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None
- **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False
**代码示例**
.. code-block:: python
import paddle
import paddle.fluid as fluid
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
adam_optimizer = fluid.optimizer.AdamOptimizer(0.01)
adam_optimizer.minimize(avg_cost)
fetch_list = [avg_cost]
train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), batch_size=1)
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for data in train_reader():
exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
.. code-block:: python
# Adam with beta1/beta2 as Variable
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.data(name='x', shape=[None, 13], dtype='float32')
y = fluid.data(name='y', shape=[None, 1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
# define beta decay variable
def get_decayed_betas(beta1_init, beta2_init, decay_steps, decay_rate):
global_step = lr_scheduler._decay_step_counter()
beta1 = fluid.layers.create_global_var(
shape=[1],
value=float(beta1_init),
dtype='float32',
# set persistable for save checkpoints and resume
persistable=True,
name="beta1")
beta2 = fluid.layers.create_global_var(
shape=[1],
value=float(beta2_init),
dtype='float32',
# set persistable for save checkpoints and resume
persistable=True,
name="beta2")
div_res = global_step / decay_steps
decayed_beta1 = beta1_init * (decay_rate**div_res)
decayed_beta2 = beta2_init * (decay_rate**div_res)
fluid.layers.assign(decayed_beta1, beta1)
fluid.layers.assign(decayed_beta2, beta2)
return beta1, beta2
beta1, beta2 = get_decayed_betas(0.9, 0.99, 1e5, 0.9)
adam_optimizer = fluid.optimizer.AdamOptimizer(
learning_rate=0.01,
beta1=beta1,
beta2=beta2)
adam_optimizer.minimize(avg_cost)
fetch_list = [avg_cost]
train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), batch_size=1)
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for data in train_reader():
exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None)
为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。
参数:
- **loss** (Variable) – 需要最小化的损失值变量
- **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`
- **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter
- **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None
返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。
返回类型: tuple
**代码示例**
.. code-block:: python
import numpy
import paddle.fluid as fluid
x = fluid.layers.data(name='X', shape=[13], dtype='float32')
y = fluid.layers.data(name='Y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
loss = fluid.layers.mean(cost)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.2)
adam.minimize(loss)
place = fluid.CPUPlace() # fluid.CUDAPlace(0)
exe = fluid.Executor(place)
x = numpy.random.random(size=(10, 13)).astype('float32')
y = numpy.random.random(size=(10, 1)).astype('float32')
exe.run(fluid.default_startup_program())
outs = exe.run(program=fluid.default_main_program(),
feed={'X': x, 'Y': y},
fetch_list=[loss.name])
.. py:method:: clear_gradients()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
清除需要优化的参数的梯度。
**代码示例**
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
value = np.arange(26).reshape(2, 13).astype("float32")
a = fluid.dygraph.to_variable(value)
linear = fluid.Linear(13, 5, dtype="float32")
optimizer = fluid.optimizer.Adam(learning_rate=0.02,
parameter_list=linear.parameters())
out = linear(a)
out.backward()
optimizer.minimize(out)
optimizer.clear_gradients()
.. py:method:: set_lr()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。
参数:
value (float|Variable) - 需要设置的学习率的值。
返回:无
**代码示例**
.. code-block:: python
import paddle.fluid as fluid
with fluid.dygraph.guard():
linear = fluid.dygraph.nn.Linear(10, 10)
adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters())
# 通过Python float数值手动设置学习率
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
print("current lr is {}".format(adam.current_step_lr()))
# 打印结果:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
# 通过 框架的Variable 设置学习率
lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32')
adam.set_lr(lr_var)
print("current lr is {}".format(adam.current_step_lr()))
# 打印结果:
# current lr is 0.7
.. py:method:: current_step_lr()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。
返回:当前步骤的学习率。
返回类型:float
**代码示例**
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# example1: LearningRateDecay is not used, return value is all the same
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters())
lr = adam.current_step_lr()
print(lr) # 0.001
# example2: PiecewiseDecay is used, return the step learning rate
with fluid.dygraph.guard():
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = fluid.dygraph.nn.Linear(10, 10)
inp = fluid.dygraph.to_variable(inp)
out = linear(inp)
loss = fluid.layers.reduce_mean(out)
bd = [2, 4, 6, 8]
value = [0.2, 0.4, 0.6, 0.8, 1.0]
adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0),
parameter_list=linear.parameters())
# first step: learning rate is 0.2
np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True
# learning rate for different steps
ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
for i in range(12):
adam.minimize(loss)
lr = adam.current_step_lr()
np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True
.. _cn_api_paddle_optimizer_AdamW:
AdamW
-------------------------------
.. py:class:: paddle.optimizer.AdamW(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=0.01, apply_decay_param_fun=None, grad_clip=None, name=None, lazy_mode=False)
AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 <https://arxiv.org/pdf/1711.05101.pdf>`,用来解决Adam优化器中L2正则化失效的问题。
其参数更新的计算公式如下:
.. math::
\\t = t + 1
.. math::
moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad
.. math::
moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad
.. math::
learning\_rate=learning\_rate*\frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t}
.. math::
param\_out=param-learning\_rate*(\frac{moment\_1}{\sqrt{moment\_2}+\epsilon} + \lambda * param)
相关论文:`Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_
参数:
- **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001
- **beta1** (float|Tensor, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9
- **beta2** (float|Tensor, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999
- **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08
- **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。
- **weight_decay** (float|Tensor, 可选) - 权重衰减系数,是一个float类型或者shape为[1] ,数据类型为float32的Tensor类型。默认值为0.01
- **apply_decay_param_fun** (function|None, 可选): 传入函数时,只有可以使 apply_decay_param_fun(Tensor)==True的Tensor会更新参数。只有在想要指定要更新的参数时使用。默认值为None
- **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。
默认值为None,此时将不进行梯度裁剪。
- **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None
- **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
adam = paddle.optimizer.AdamW(weight_decay=0.01, learning_rate=0.1,
parameters=linear.parameters())
out.backward()
adam.step()
adam.clear_grad()
.. py:method:: step()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
执行一次优化器并进行参数更新。
返回:None。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.AdamW(learning_rate = 0.01,
weight_decay = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None)
为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。
参数:
- **loss** (Tensor) – 需要最小化的损失值变量
- **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`
- **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter
- **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None
返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=0.01)
out.backward()
adam.minimize(loss)
adam.clear_grad()
.. py:method:: clear_grad()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
清除需要优化的参数的梯度。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
optimizer = paddle.optimizer.AdamW(weight_decay=0.01,
learning_rate=0.02,
parameters=linear.parameters())
out = linear(a)
out.backward()
optimizer.step()
optimizer.clear_grad()
.. py:method:: set_lr(value)
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。
参数:
value (float) - 需要设置的学习率的值。
返回:None
**代码示例**
.. code-block:: python
import paddle
paddle.disable_static()
linear = paddle.nn.Linear(10, 10)
adam = paddle.optimizer.AdamW(weight_decay=0.01,
learning_rate=0.1, parameters=linear.parameters())
# set learning rate manually by python float value
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
lr = adam.get_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
.. py:method:: get_lr()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。
返回:float,当前步骤的学习率。
**代码示例**
.. code-block:: python
import numpy as np
import paddle
# example1: _LRScheduler is not used, return value is all the same
paddle.disable_static()
emb = paddle.nn.Embedding(10, 10, sparse=False)
adam = paddle.optimizer.AdamW(learning_rate=0.001, parameters = emb.parameters(),weight_decay=0.01)
lr = adam.get_lr()
print(lr) # 0.001
# example2: PiecewiseLR is used, return the step learning rate
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.reduce_mean(out)
bd = [2, 4, 6, 8]
value = [0.2, 0.4, 0.6, 0.8, 1.0]
scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0)
adam = paddle.optimizer.AdamW(scheduler,
parameters=linear.parameters(),
weight_decay=0.01)
# first step: learning rate is 0.2
np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True
# learning rate for different steps
ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
for i in range(12):
adam.step()
lr = adam.get_lr()
scheduler.step()
np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True
.. _cn_api_fluid_optimizer_Adam:
.. _cn_api_paddle_optimizer_Adam:
Adam
-------------------------------
.. py:attribute:: paddle.fluid.optimizer.Adam
.. py:class:: paddle.optimizer.Adam(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None, lazy_mode=False)
``AdamOptimizer`` 的别名
Adam优化器出自 `Adam论文 <https://arxiv.org/abs/1412.6980>`_ 的第二节,能够利用梯度的一阶矩估计和二阶矩估计动态调整每个参数的学习率。
其参数更新的计算公式如下:
.. math::
\\t = t + 1
.. math::
moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad
.. math::
moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad
.. math::
learning\_rate=learning\_rate*\frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t}
.. math::
param\_out=param-learning\_rate*\frac{moment\_1}{\sqrt{moment\_2}+\epsilon}\\
相关论文:`Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_
参数:
- **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001
- **beta1** (float|Tensor, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9
- **beta2** (float|Tensor, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999
- **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08
- **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。
- **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、
:ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略;
如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。
- **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。
默认值为None,此时将不进行梯度裁剪。
- **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None
- **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters())
out.backward()
adam.step()
adam.clear_grad()
.. code-block:: python
# Adam with beta1/beta2 as Tensor and weight_decay as float
import paddle
import numpy as np
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters(),
beta1=beta1,
beta2=beta2,
weight_decay=0.01)
out.backward()
adam.step()
adam.clear_grad()
.. py:method:: step()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
执行一次优化器并进行参数更新。
返回:None。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None)
为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。
参数:
- **loss** (Tensor) – 需要最小化的损失值变量
- **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`
- **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter
- **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None
返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=0.01)
out.backward()
adam.minimize(loss)
adam.clear_grad()
.. py:method:: clear_grad()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
清除需要优化的参数的梯度。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
optimizer = paddle.optimizer.Adam(learning_rate=0.02,
parameters=linear.parameters())
out = linear(a)
out.backward()
optimizer.step()
optimizer.clear_grad()
.. py:method:: set_lr(value)
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。
参数:
value (float) - 需要设置的学习率的值。
返回:None
**代码示例**
.. code-block:: python
import paddle
paddle.disable_static()
linear = paddle.nn.Linear(10, 10)
adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters())
# set learning rate manually by python float value
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
lr = adam.get_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
.. py:method:: get_lr()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。
返回:float,当前步骤的学习率。
**代码示例**
.. code-block:: python
import numpy as np
import paddle
# example1: _LRScheduler is not used, return value is all the same
paddle.disable_static()
emb = paddle.nn.Embedding(10, 10, sparse=False)
adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters())
lr = adam.get_lr()
print(lr) # 0.001
# example2: PiecewiseLR is used, return the step learning rate
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.reduce_mean(out)
bd = [2, 4, 6, 8]
value = [0.2, 0.4, 0.6, 0.8, 1.0]
scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0)
adam = paddle.optimizer.Adam(scheduler,
parameters=linear.parameters())
# first step: learning rate is 0.2
np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True
# learning rate for different steps
ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
for i in range(12):
adam.step()
lr = adam.get_lr()
scheduler.step()
np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True
.. _cn_api_fluid_optimizer_AdamaxOptimizer:
AdamaxOptimizer
-------------------------------
.. py:class:: paddle.fluid.optimizer.AdamaxOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameter_list=None, regularization=None, grad_clip=None, name=None)
Adamax优化器是参考 `Adam论文 <https://arxiv.org/abs/1412.6980>`_ 第7节Adamax优化相关内容所实现的。Adamax算法是基于无穷大范数的 `Adam <https://arxiv.org/abs/1412.6980>`_ 算法的一个变种,使学习率更新的算法更加稳定和简单。
其参数更新的计算公式如下:
.. math::
\\t = t + 1
.. math::
moment\_out=\beta_1∗moment+(1−\beta_1)∗grad
.. math::
inf\_norm\_out=\max{(\beta_2∗inf\_norm+\epsilon, \left|grad\right|)}
.. math::
learning\_rate=\frac{learning\_rate}{1-\beta_1^t}
.. math::
param\_out=param−learning\_rate*\frac{moment\_out}{inf\_norm\_out}\\
相关论文:`Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_
论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性, 避免除0错误, 此处增加了这个参数。
参数:
- **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001
- **beta1** (float, 可选) - 一阶矩估计的指数衰减率,默认值为0.9
- **beta2** (float, 可选) - 二阶矩估计的指数衰减率,默认值为0.999
- **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08
- **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。
- **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、
:ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略;
如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。
- **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。
默认值为None,此时将不进行梯度裁剪。
- **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None
.. note::
目前 ``AdamaxOptimizer`` 不支持 Sparse Parameter Optimization(稀疏参数优化)。
**代码示例**:
.. code-block:: python
import paddle.fluid as fluid
import numpy
# First create the Executor.
place = fluid.CPUPlace() # fluid.CUDAPlace(0)
exe = fluid.Executor(place)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
adam = fluid.optimizer.AdamaxOptimizer(learning_rate=0.2)
adam.minimize(loss)
# Run the startup program once and only once.
exe.run(startup_program)
x = numpy.random.random(size=(10, 1)).astype('float32')
outs = exe.run(program=train_program,
feed={'X': x},
fetch_list=[loss.name])
.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None)
为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。
参数:
- **loss** (Variable) – 需要最小化的损失值变量
- **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`
- **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter
- **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None
返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。
**代码示例**:
.. code-block:: python
import numpy
import paddle.fluid as fluid
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
adam = fluid.optimizer.Adamax(learning_rate=0.2)
adam.minimize(loss)
place = fluid.CPUPlace() # fluid.CUDAPlace(0)
exe = fluid.Executor(place)
x = numpy.random.random(size=(10, 1)).astype('float32')
exe.run(fluid.default_startup_program())
outs = exe.run(program=fluid.default_main_program(),
feed={'X': x},
fetch_list=[loss.name])
.. py:method:: clear_gradients()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
清除需要优化的参数的梯度。
**代码示例**
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
value = np.arange(26).reshape(2, 13).astype("float32")
a = fluid.dygraph.to_variable(value)
linear = fluid.Linear(13, 5, dtype="float32")
optimizer = fluid.optimizer.AdamaxOptimizer(learning_rate=0.2,
parameter_list=linear.parameters())
out = linear(a)
out.backward()
optimizer.minimize(out)
optimizer.clear_gradients()
.. py:method:: set_lr()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。
参数:
value (float|Variable) - 需要设置的学习率的值。
返回:无
**代码示例**
.. code-block:: python
import paddle.fluid as fluid
with fluid.dygraph.guard():
linear = fluid.dygraph.nn.Linear(10, 10)
adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters())
# 通过Python float数值手动设置学习率
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
print("current lr is {}".format(adam.current_step_lr()))
# 打印结果:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
# 通过 框架的Variable 设置学习率
lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32')
adam.set_lr(lr_var)
print("current lr is {}".format(adam.current_step_lr()))
# 打印结果:
# current lr is 0.7
.. py:method:: current_step_lr()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。
返回:当前步骤的学习率。
返回类型:float
**代码示例**
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# example1: LearningRateDecay is not used, return value is all the same
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters())
lr = adam.current_step_lr()
print(lr) # 0.001
# example2: PiecewiseDecay is used, return the step learning rate
with fluid.dygraph.guard():
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = fluid.dygraph.nn.Linear(10, 10)
inp = fluid.dygraph.to_variable(inp)
out = linear(inp)
loss = fluid.layers.reduce_mean(out)
bd = [2, 4, 6, 8]
value = [0.2, 0.4, 0.6, 0.8, 1.0]
adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0),
parameter_list=linear.parameters())
# first step: learning rate is 0.2
np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True
# learning rate for different steps
ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
for i in range(12):
adam.minimize(loss)
lr = adam.current_step_lr()
np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True
.. _cn_api_fluid_optimizer_Adamax:
.. _cn_api_paddle_optimizer_Adamax:
Adamax
-------------------------------
.. py:attribute:: paddle.fluid.optimizer.Adamax
.. py:class:: paddle.optimizer.Adamax(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None)
``AdamaxOptimizer`` 的别名
Adamax优化器是参考 `Adam论文 <https://arxiv.org/abs/1412.6980>`_ 第7节Adamax优化相关内容所实现的。Adamax算法是基于无穷大范数的 `Adam <https://arxiv.org/abs/1412.6980>`_ 算法的一个变种,使学习率更新的算法更加稳定和简单。
其参数更新的计算公式如下:
.. math::
\\t = t + 1
.. math::
moment\_out=\beta_1∗moment+(1−\beta_1)∗grad
.. math::
inf\_norm\_out=\max{(\beta_2∗inf\_norm+\epsilon, \left|grad\right|)}
.. math::
learning\_rate=\frac{learning\_rate}{1-\beta_1^t}
.. math::
param\_out=param−learning\_rate*\frac{moment\_out}{inf\_norm\_out}\\
相关论文:`Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_
论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性, 避免除0错误, 此处增加了这个参数。
参数:
- **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001
- **beta1** (float, 可选) - 一阶矩估计的指数衰减率,默认值为0.9
- **beta2** (float, 可选) - 二阶矩估计的指数衰减率,默认值为0.999
- **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08
- **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。
- **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、
:ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略;
如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。
- **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。
默认值为None,此时将不进行梯度裁剪。
- **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None
.. note::
目前 ``Adamax`` 不支持 Sparse Parameter Optimization(稀疏参数优化)。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
adam = paddle.optimizer.Adamax(learning_rate=0.1,
parameters=linear.parameters())
out.backward()
adam.step()
adam.clear_grad()
.. py:method:: step()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
执行一次优化器并进行参数更新。
返回:None。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None)
为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。
参数:
- **loss** (Tensor) – 需要最小化的损失值变量
- **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`
- **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter
- **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None
返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.Adamax(learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=0.01)
out.backward()
adam.minimize(loss)
adam.clear_grad()
.. py:method:: clear_grad()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
清除需要优化的参数的梯度。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
optimizer = paddle.optimizer.Adamax(learning_rate=0.02,
parameters=linear.parameters())
out = linear(a)
out.backward()
optimizer.step()
optimizer.clear_grad()
.. py:method:: set_lr(value)
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。
参数:
value (float) - 需要设置的学习率的值。
返回:None
**代码示例**
.. code-block:: python
import paddle
paddle.disable_static()
linear = paddle.nn.Linear(10, 10)
adam = paddle.optimizer.Adamax(0.1, parameters=linear.parameters())
# set learning rate manually by python float value
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
lr = adam.get_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
.. py:method:: get_lr()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。
返回:float,当前步骤的学习率。
**代码示例**
.. code-block:: python
import numpy as np
import paddle
# example1: _LRScheduler is not used, return value is all the same
paddle.disable_static()
emb = paddle.nn.Embedding(10, 10, sparse=False)
adam = paddle.optimizer.Adamax(0.001, parameters = emb.parameters())
lr = adam.get_lr()
print(lr) # 0.001
# example2: PiecewiseLR is used, return the step learning rate
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.reduce_mean(out)
bd = [2, 4, 6, 8]
value = [0.2, 0.4, 0.6, 0.8, 1.0]
scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0)
adam = paddle.optimizer.Adamax(scheduler,
parameters=linear.parameters())
# first step: learning rate is 0.2
np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True
# learning rate for different steps
ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
for i in range(12):
adam.step()
lr = adam.get_lr()
scheduler.step()
np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True
.. _cn_api_paddle_optimizer_Optimizer:
Optimizer
-------------------------------
.. py:class:: paddle.optimizer.Optimizer(learning_rate=0.001, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None)
优化器的基类。
参数:
- **learning_rate** (float|_LRSeduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001
- **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。
- **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、
:ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略;
如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。
- **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。
默认值为None,此时将不进行梯度裁剪。
- **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None
**代码示例**
.. code-block:: python
#以子类Adam为例
import paddle
import numpy as np
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters())
out.backward()
adam.step()
adam.clear_grad()
.. py:method:: step()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
执行一次优化器并进行参数更新。
返回:None。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None)
为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。
参数:
- **loss** (Tensor) – 需要最小化的损失值变量
- **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`
- **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter
- **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None
返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=0.01)
out.backward()
adam.minimize(loss)
adam.clear_grad()
.. py:method:: clear_grad()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
清除需要优化的参数的梯度。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
optimizer = paddle.optimizer.Adam(learning_rate=0.02,
parameters=linear.parameters())
out = linear(a)
out.backward()
optimizer.step()
optimizer.clear_grad()
.. py:method:: set_lr(value)
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。
参数:
value (float) - 需要设置的学习率的值。
返回:None
**代码示例**
.. code-block:: python
import paddle
paddle.disable_static()
linear = paddle.nn.Linear(10, 10)
adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters())
# set learning rate manually by python float value
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
lr = adam.get_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
.. py:method:: get_lr()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。
返回:float,当前步骤的学习率。
**代码示例**
.. code-block:: python
import numpy as np
import paddle
# example1: _LRScheduler is not used, return value is all the same
paddle.disable_static()
emb = paddle.nn.Embedding(10, 10, sparse=False)
adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters())
lr = adam.get_lr()
print(lr) # 0.001
# example2: PiecewiseLR is used, return the step learning rate
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.reduce_mean(out)
bd = [2, 4, 6, 8]
value = [0.2, 0.4, 0.6, 0.8, 1.0]
scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0)
adam = paddle.optimizer.Adam(scheduler,
parameters=linear.parameters())
# first step: learning rate is 0.2
np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True
# learning rate for different steps
ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
for i in range(12):
adam.step()
lr = adam.get_lr()
scheduler.step()
np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True
.. _cn_api_fluid_optimizer_RMSPropOptimizer:
.. _cn_api_paddle_optimizer_RMSProp:
RMSPropOptimizer
RMSProp
-------------------------------
.. py:class:: paddle.fluid.optimizer.RMSPropOptimizer(learning_rate, rho=0.95, epsilon=1e-06, momentum=0.0, centered=False, parameter_list=None, regularization=None, grad_clip=None, name=None)
.. py:class:: paddle.optimizer.RMSProp(learning_rate, rho=0.95, epsilon=1e-06, momentum=0.0, centered=False, parameters=None, weight_decay=None, grad_clip=None, name=None)
......@@ -33,12 +33,12 @@ RMSPropOptimizer
参数:
- **learning_rate** (float) - 全局学习率。
- **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。
- **rho** (float,可选) - rho是等式中的 :math:`rho` ,默认值0.95。
- **epsilon** (float,可选) - 等式中的epsilon是平滑项,避免被零除,默认值1e-6。
- **momentum** (float,可选) - 方程中的β是动量项,默认值0.0。
- **centered** (bool,可选) - 如果为True,则通过梯度的估计方差,对梯度进行归一化;如果False,则由未centered的第二个moment归一化。将此设置为True有助于模型训练,但会消耗额外计算和内存资源。默认为False。
- **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、
- **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。
- **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、
:ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略;
如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。
- **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。
......@@ -53,76 +53,89 @@ RMSPropOptimizer
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
rms_optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
rms_optimizer.minimize(avg_cost)
fetch_list = [avg_cost]
train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), batch_size=1)
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for data in train_reader():
exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.RMSProp(learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=0.01)
out.backward()
adam.step()
adam.clear_grad()
.. py:method:: step()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None)
执行一次优化器并进行参数更新。
为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。
返回:None。
**代码示例**
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.RMSProp(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None)
为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。
参数:
- **loss** (Variable) – 需要最小化的损失值变量
- **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`
- **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter
- **loss** (Tensor) – 需要最小化的损失值变量
- **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`
- **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter
- **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None
返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。
返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。
返回类型: tuple
**示例代码**
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
rms_optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
rms_optimizer.minimize(avg_cost)
fetch_list = [avg_cost]
train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), batch_size=1)
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for data in train_reader():
exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.RMSProp(learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=0.01)
out.backward()
adam.minimize(loss)
adam.clear_grad()
.. py:method:: clear_gradients()
......@@ -137,109 +150,102 @@ RMSPropOptimizer
.. code-block:: python
import paddle.fluid as fluid
import paddle
import numpy as np
with fluid.dygraph.guard():
value = np.arange(26).reshape(2, 13).astype("float32")
a = fluid.dygraph.to_variable(value)
linear = fluid.Linear(13, 5, dtype="float32")
optimizer = fluid.optimizer.RMSPropOptimizer(learning_rate=0.01,
parameter_list=linear.parameters())
out = linear(a)
out.backward()
optimizer.minimize(out)
optimizer.clear_gradients()
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
optimizer = paddle.optimizer.RMSProp(learning_rate=0.02,
parameters=linear.parameters())
out = linear(a)
out.backward()
optimizer.step()
optimizer.clear_gradients()
.. py:method:: set_lr()
.. py:method:: set_lr(value)
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。
手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。
参数:
value (float|Variable) - 需要设置的学习率的值。
value (float) - 需要设置的学习率的值。
返回:
返回:None
**代码示例**
.. code-block:: python
import paddle.fluid as fluid
with fluid.dygraph.guard():
linear = fluid.dygraph.nn.Linear(10, 10)
adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters())
# 通过Python float数值手动设置学习率
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
print("current lr is {}".format(adam.current_step_lr()))
# 打印结果:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
# 通过 框架的Variable 设置学习率
lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32')
adam.set_lr(lr_var)
print("current lr is {}".format(adam.current_step_lr()))
# 打印结果:
# current lr is 0.7
.. py:method:: current_step_lr()
import paddle
paddle.disable_static()
linear = paddle.nn.Linear(10, 10)
adam = paddle.optimizer.RMSProp(0.1, parameters=linear.parameters())
# set learning rate manually by python float value
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
lr = adam.get_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
.. py:method:: get_lr()
**注意:**
**1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效**
获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。
获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。
返回:当前步骤的学习率。
返回:float,当前步骤的学习率。
返回类型:float
**代码示例**
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# example1: LearningRateDecay is not used, return value is all the same
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters())
lr = adam.current_step_lr()
print(lr) # 0.001
import paddle
# example1: _LRScheduler is not used, return value is all the same
paddle.disable_static()
emb = paddle.nn.Embedding(10, 10, sparse=False)
adam = paddle.optimizer.RMSProp(0.001, parameters = emb.parameters())
lr = adam.get_lr()
print(lr) # 0.001
# example2: PiecewiseDecay is used, return the step learning rate
with fluid.dygraph.guard():
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = fluid.dygraph.nn.Linear(10, 10)
inp = fluid.dygraph.to_variable(inp)
out = linear(inp)
loss = fluid.layers.reduce_mean(out)
bd = [2, 4, 6, 8]
value = [0.2, 0.4, 0.6, 0.8, 1.0]
adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0),
parameter_list=linear.parameters())
# first step: learning rate is 0.2
np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True
# learning rate for different steps
ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
for i in range(12):
adam.minimize(loss)
lr = adam.current_step_lr()
np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.reduce_mean(out)
bd = [2, 4, 6, 8]
value = [0.2, 0.4, 0.6, 0.8, 1.0]
scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0)
adam = paddle.optimizer.RMSProp(scheduler,
parameters=linear.parameters())
# first step: learning rate is 0.2
np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True
# learning rate for different steps
ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
for i in range(12):
adam.step()
lr = adam.get_lr()
scheduler.step()
np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册