From 361a53930f9162bb79af4f0d985350b44e84c762 Mon Sep 17 00:00:00 2001 From: Zhou Wei <52485244+zhouwei25@users.noreply.github.com> Date: Mon, 16 Nov 2020 22:26:00 +0800 Subject: [PATCH] fix doc of save/load (#28645) --- python/paddle/framework/io.py | 2 -- python/paddle/optimizer/lr.py | 6 ++++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/python/paddle/framework/io.py b/python/paddle/framework/io.py index 945c8160b4..d794fce5e3 100644 --- a/python/paddle/framework/io.py +++ b/python/paddle/framework/io.py @@ -228,7 +228,6 @@ def save(obj, path): emb = paddle.nn.Embedding(10, 10) layer_state_dict = emb.state_dict() paddle.save(layer_state_dict, "emb.pdparams") - scheduler = paddle.optimizer.lr.NoamDecay( d_model=0.01, warmup_steps=100, verbose=True) adam = paddle.optimizer.Adam( @@ -320,7 +319,6 @@ def load(path, **configs): emb = paddle.nn.Embedding(10, 10) layer_state_dict = emb.state_dict() paddle.save(layer_state_dict, "emb.pdparams") - scheduler = paddle.optimizer.lr.NoamDecay( d_model=0.01, warmup_steps=100, verbose=True) adam = paddle.optimizer.Adam( diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index e4fb54c229..2d5dc5d998 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -1434,7 +1434,8 @@ class CosineAnnealingDecay(LRScheduler): loss.backward() sgd.step() sgd.clear_gradients() - scheduler.step() + scheduler.step() # If you update learning rate each step + # scheduler.step() # If you update learning rate each epoch # train on static graph mode paddle.enable_static() @@ -1460,7 +1461,8 @@ class CosineAnnealingDecay(LRScheduler): 'y': np.random.randn(3, 4, 5).astype('float32') }, fetch_list=loss.name) - scheduler.step() + scheduler.step() # If you update learning rate each step + # scheduler.step() # If you update learning rate each epoch """ def __init__(self, -- GitLab