未验证 提交 0fd50551 编写于 作者: L LoneRanger 提交者: GitHub

fix the example code (#55053)

上级 ff7e6ec5
...@@ -580,17 +580,18 @@ class Optimizer: ...@@ -580,17 +580,18 @@ class Optimizer:
bd = [2, 4, 6, 8] bd = [2, 4, 6, 8]
value = [0.2, 0.4, 0.6, 0.8, 1.0] value = [0.2, 0.4, 0.6, 0.8, 1.0]
adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), adam = paddle.optimizer.Adam(paddle.optimizer.lr.PiecewiseDecay(bd, value),
parameter_list=linear.parameters()) parameters=linear.parameters())
# first step: learning rate is 0.2 # first step: learning rate is 0.2
np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True
# learning rate for different steps # learning rate for different steps
ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
for i in range(12): for i in range(12):
adam.minimize(loss) adam.minimize(loss)
lr = adam.current_step_lr() adam.step()
lr = adam.get_lr()
np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册