未验证 提交 4bf115b4 编写于 作者: A Aurelius84 提交者: GitHub

Fix AdamOptimizer and Scale sample code Bug (#21478)

* fix adam sample code bug test=document_fix

* fix sample code bug in scale test=document_fix
上级 b39c0116
......@@ -10199,7 +10199,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32')
scale = fluid.layers.data(name="scale", shape=[1], dtype='float32'
scale = fluid.layers.data(name="scale", shape=[1], dtype='float32',
append_batch_size=False)
output = fluid.layers.scale(inputs, scale = scale, bias = 1.0)
......
......@@ -1549,7 +1549,7 @@ class AdamOptimizer(Optimizer):
avg_cost = fluid.layers.mean(cost)
# define beta decay variable
def get_decayed_betas(beta1_init, beta2_init, decay_steps, decay_rate)
def get_decayed_betas(beta1_init, beta2_init, decay_steps, decay_rate):
global_step = lr_scheduler._decay_step_counter()
beta1 = fluid.layers.create_global_var(
......@@ -1578,7 +1578,7 @@ class AdamOptimizer(Optimizer):
beta1, beta2 = get_decayed_betas(0.9, 0.99, 1e5, 0.9)
adam_optimizer = fluid.optimizer.AdamOptimizer(
learning_rate=0.01,
beta1=beta1
beta1=beta1,
beta2=beta2)
adam_optimizer.minimize(avg_cost)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册