未验证 提交 361a5393 编写于 作者: Z Zhou Wei 提交者: GitHub

fix doc of save/load (#28645)

上级 d1e84f3e
...@@ -228,7 +228,6 @@ def save(obj, path): ...@@ -228,7 +228,6 @@ def save(obj, path):
emb = paddle.nn.Embedding(10, 10) emb = paddle.nn.Embedding(10, 10)
layer_state_dict = emb.state_dict() layer_state_dict = emb.state_dict()
paddle.save(layer_state_dict, "emb.pdparams") paddle.save(layer_state_dict, "emb.pdparams")
scheduler = paddle.optimizer.lr.NoamDecay( scheduler = paddle.optimizer.lr.NoamDecay(
d_model=0.01, warmup_steps=100, verbose=True) d_model=0.01, warmup_steps=100, verbose=True)
adam = paddle.optimizer.Adam( adam = paddle.optimizer.Adam(
...@@ -320,7 +319,6 @@ def load(path, **configs): ...@@ -320,7 +319,6 @@ def load(path, **configs):
emb = paddle.nn.Embedding(10, 10) emb = paddle.nn.Embedding(10, 10)
layer_state_dict = emb.state_dict() layer_state_dict = emb.state_dict()
paddle.save(layer_state_dict, "emb.pdparams") paddle.save(layer_state_dict, "emb.pdparams")
scheduler = paddle.optimizer.lr.NoamDecay( scheduler = paddle.optimizer.lr.NoamDecay(
d_model=0.01, warmup_steps=100, verbose=True) d_model=0.01, warmup_steps=100, verbose=True)
adam = paddle.optimizer.Adam( adam = paddle.optimizer.Adam(
......
...@@ -1434,7 +1434,8 @@ class CosineAnnealingDecay(LRScheduler): ...@@ -1434,7 +1434,8 @@ class CosineAnnealingDecay(LRScheduler):
loss.backward() loss.backward()
sgd.step() sgd.step()
sgd.clear_gradients() sgd.clear_gradients()
scheduler.step() scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode # train on static graph mode
paddle.enable_static() paddle.enable_static()
...@@ -1460,7 +1461,8 @@ class CosineAnnealingDecay(LRScheduler): ...@@ -1460,7 +1461,8 @@ class CosineAnnealingDecay(LRScheduler):
'y': np.random.randn(3, 4, 5).astype('float32') 'y': np.random.randn(3, 4, 5).astype('float32')
}, },
fetch_list=loss.name) fetch_list=loss.name)
scheduler.step() scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
""" """
def __init__(self, def __init__(self,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册