diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 6071e3e74218e4db4cddc223818d3a9b7086fd86..4196f229f6edf6c1e9c1232588cf6da1959c1ee3 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -277,28 +277,28 @@ def piecewise_decay(boundaries, values): global_step = _decay_step_counter() - with init_on_cpu(): - lr = tensor.create_global_var( - shape=[1], - value=0.0, - dtype='float32', - persistable=True, - name="learning_rate") - - with control_flow.Switch() as switch: - for i in range(len(boundaries)): - boundary_val = tensor.fill_constant( - shape=[1], dtype='float32', value=float(boundaries[i])) - value_var = tensor.fill_constant( - shape=[1], dtype='float32', value=float(values[i])) - with switch.case(global_step < boundary_val): - tensor.assign(value_var, lr) - last_value_var = tensor.fill_constant( + lr = tensor.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate") + + with control_flow.Switch() as switch: + for i in range(len(boundaries)): + boundary_val = tensor.fill_constant( shape=[1], dtype='float32', - value=float(values[len(values) - 1])) - with switch.default(): - tensor.assign(last_value_var, lr) + value=float(boundaries[i]), + force_cpu=True) + value_var = tensor.fill_constant( + shape=[1], dtype='float32', value=float(values[i])) + with switch.case(global_step < boundary_val): + tensor.assign(value_var, lr) + last_value_var = tensor.fill_constant( + shape=[1], dtype='float32', value=float(values[len(values) - 1])) + with switch.default(): + tensor.assign(last_value_var, lr) return lr @@ -333,9 +333,9 @@ def append_LARS(params_grads, learning_rate, weight_decay): grad_norm = ops.sqrt(nn.reduce_sum(input=ops.square(grad))) if type(param_lr) == float and param_lr == 1.0: decayed_lr = learning_rate * param_norm \ - / _balanced_weight(param_norm, grad_norm) + / _balanced_weight(param_norm, grad_norm) else: decayed_lr = learning_rate * param_lr * param_norm \ - / _balanced_weight(param_norm, grad_norm) + / _balanced_weight(param_norm, grad_norm) # set back param local learning rate param.optimize_attr['learning_rate'] = decayed_lr diff --git a/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py b/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py index 6382e290eb30c621da64d5c600be6d8a7c6254f1..49ea18c1691d68e7d1c4093f95f847b72062f0af 100644 --- a/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py @@ -91,20 +91,21 @@ class TestLearningRateDecay(unittest.TestCase): def check_decay_with_place(self, place, python_decay_fn, fluid_decay_fn, kwargs): + main_prog = fluid.Program() + startup_prog = fluid.Program() - decayed_lr = fluid_decay_fn(**kwargs) + with fluid.program_guard(main_prog, startup_prog): + decayed_lr = fluid_decay_fn(**kwargs) place = fluid.CPUPlace() exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe.run(startup_prog) - fluid.memory_optimize(fluid.default_main_program()) + # fluid.memory_optimize(main_prog) for step in range(10): - lr_val, = exe.run(fluid.default_main_program(), - feed={}, - fetch_list=[decayed_lr]) + lr_val, = exe.run(main_prog, feed={}, fetch_list=[decayed_lr]) python_decayed_lr = python_decay_fn( global_step=float(step), **kwargs) self.assertAlmostEqual(