未验证 提交 19639e31 编写于 作者: F fengjiayi 提交者: GitHub

Merge pull request #12254 from JiayiFeng/fix_lr_decay

Fix learning rate scheduler performance issue
...@@ -62,10 +62,10 @@ def noam_decay(d_model, warmup_steps): ...@@ -62,10 +62,10 @@ def noam_decay(d_model, warmup_steps):
The decayed learning rate. The decayed learning rate.
""" """
global_step = _decay_step_counter(1) global_step = _decay_step_counter(1)
with init_on_cpu():
a = global_step**-0.5 a = global_step**-0.5
b = (warmup_steps**-1.5) * global_step b = (warmup_steps**-1.5) * global_step
lr_value = (d_model**-0.5) * ops.elementwise_min(a, b) lr_value = (d_model**-0.5) * ops.elementwise_min(a, b)
return lr_value return lr_value
...@@ -108,12 +108,10 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -108,12 +108,10 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
""" """
global_step = _decay_step_counter() global_step = _decay_step_counter()
with init_on_cpu(): div_res = global_step / decay_steps
# update learning_rate if staircase:
div_res = global_step / decay_steps div_res = ops.floor(div_res)
if staircase: decayed_lr = learning_rate * (decay_rate**div_res)
div_res = ops.floor(div_res)
decayed_lr = learning_rate * (decay_rate**div_res)
return decayed_lr return decayed_lr
...@@ -138,11 +136,10 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -138,11 +136,10 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
""" """
global_step = _decay_step_counter() global_step = _decay_step_counter()
with init_on_cpu(): div_res = global_step / decay_steps
div_res = global_step / decay_steps if staircase:
if staircase: div_res = ops.floor(div_res)
div_res = ops.floor(div_res) decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res)
decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res)
return decayed_lr return decayed_lr
...@@ -184,12 +181,11 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -184,12 +181,11 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
""" """
global_step = _decay_step_counter() global_step = _decay_step_counter()
with init_on_cpu(): div_res = global_step / decay_steps
div_res = global_step / decay_steps if staircase:
if staircase: div_res = ops.floor(div_res)
div_res = ops.floor(div_res)
decayed_lr = learning_rate / (1 + decay_rate * div_res) decayed_lr = learning_rate / (1 + decay_rate * div_res)
return decayed_lr return decayed_lr
...@@ -224,25 +220,22 @@ def polynomial_decay(learning_rate, ...@@ -224,25 +220,22 @@ def polynomial_decay(learning_rate,
""" """
global_step = _decay_step_counter() global_step = _decay_step_counter()
with init_on_cpu(): if cycle:
if cycle: div_res = ops.ceil(global_step / decay_steps)
div_res = ops.ceil(global_step / decay_steps) zero_var = tensor.fill_constant(shape=[1], dtype='float32', value=0.0)
zero_var = tensor.fill_constant( one_var = tensor.fill_constant(shape=[1], dtype='float32', value=1.0)
shape=[1], dtype='float32', value=0.0)
one_var = tensor.fill_constant(
shape=[1], dtype='float32', value=1.0)
with control_flow.Switch() as switch:
with switch.case(global_step == zero_var):
tensor.assign(input=one_var, output=div_res)
decay_steps = decay_steps * div_res
else:
decay_steps_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(decay_steps))
global_step = ops.elementwise_min(x=global_step, y=decay_steps_var)
decayed_lr = (learning_rate - end_learning_rate) * \ with control_flow.Switch() as switch:
((1 - global_step / decay_steps) ** power) + end_learning_rate with switch.case(global_step == zero_var):
tensor.assign(input=one_var, output=div_res)
decay_steps = decay_steps * div_res
else:
decay_steps_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(decay_steps))
global_step = ops.elementwise_min(x=global_step, y=decay_steps_var)
decayed_lr = (learning_rate - end_learning_rate) * \
((1 - global_step / decay_steps) ** power) + end_learning_rate
return decayed_lr return decayed_lr
...@@ -277,28 +270,28 @@ def piecewise_decay(boundaries, values): ...@@ -277,28 +270,28 @@ def piecewise_decay(boundaries, values):
global_step = _decay_step_counter() global_step = _decay_step_counter()
with init_on_cpu(): lr = tensor.create_global_var(
lr = tensor.create_global_var( shape=[1],
shape=[1], value=0.0,
value=0.0, dtype='float32',
dtype='float32', persistable=True,
persistable=True, name="learning_rate")
name="learning_rate")
with control_flow.Switch() as switch: with control_flow.Switch() as switch:
for i in range(len(boundaries)): for i in range(len(boundaries)):
boundary_val = tensor.fill_constant( boundary_val = tensor.fill_constant(
shape=[1], dtype='float32', value=float(boundaries[i]))
value_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(values[i]))
with switch.case(global_step < boundary_val):
tensor.assign(value_var, lr)
last_value_var = tensor.fill_constant(
shape=[1], shape=[1],
dtype='float32', dtype='float32',
value=float(values[len(values) - 1])) value=float(boundaries[i]),
with switch.default(): force_cpu=True)
tensor.assign(last_value_var, lr) value_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(values[i]))
with switch.case(global_step < boundary_val):
tensor.assign(value_var, lr)
last_value_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(values[len(values) - 1]))
with switch.default():
tensor.assign(last_value_var, lr)
return lr return lr
...@@ -333,9 +326,9 @@ def append_LARS(params_grads, learning_rate, weight_decay): ...@@ -333,9 +326,9 @@ def append_LARS(params_grads, learning_rate, weight_decay):
grad_norm = ops.sqrt(nn.reduce_sum(input=ops.square(grad))) grad_norm = ops.sqrt(nn.reduce_sum(input=ops.square(grad)))
if type(param_lr) == float and param_lr == 1.0: if type(param_lr) == float and param_lr == 1.0:
decayed_lr = learning_rate * param_norm \ decayed_lr = learning_rate * param_norm \
/ _balanced_weight(param_norm, grad_norm) / _balanced_weight(param_norm, grad_norm)
else: else:
decayed_lr = learning_rate * param_lr * param_norm \ decayed_lr = learning_rate * param_lr * param_norm \
/ _balanced_weight(param_norm, grad_norm) / _balanced_weight(param_norm, grad_norm)
# set back param local learning rate # set back param local learning rate
param.optimize_attr['learning_rate'] = decayed_lr param.optimize_attr['learning_rate'] = decayed_lr
...@@ -91,20 +91,21 @@ class TestLearningRateDecay(unittest.TestCase): ...@@ -91,20 +91,21 @@ class TestLearningRateDecay(unittest.TestCase):
def check_decay_with_place(self, place, python_decay_fn, fluid_decay_fn, def check_decay_with_place(self, place, python_decay_fn, fluid_decay_fn,
kwargs): kwargs):
main_prog = fluid.Program()
startup_prog = fluid.Program()
decayed_lr = fluid_decay_fn(**kwargs) with fluid.program_guard(main_prog, startup_prog):
decayed_lr = fluid_decay_fn(**kwargs)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(fluid.default_startup_program()) exe.run(startup_prog)
fluid.memory_optimize(fluid.default_main_program()) fluid.memory_optimize(main_prog)
for step in range(10): for step in range(10):
lr_val, = exe.run(fluid.default_main_program(), lr_val, = exe.run(main_prog, feed={}, fetch_list=[decayed_lr])
feed={},
fetch_list=[decayed_lr])
python_decayed_lr = python_decay_fn( python_decayed_lr = python_decay_fn(
global_step=float(step), **kwargs) global_step=float(step), **kwargs)
self.assertAlmostEqual( self.assertAlmostEqual(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册