From 381bacaa492c52c8ee887eaa3bf8cc3d478aa245 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 18 Jul 2018 17:02:42 +0800 Subject: [PATCH] Fix piecewise_decay and fix a unittest error --- .../fluid/layers/learning_rate_scheduler.py | 44 +++++++++---------- .../unittests/test_learning_rate_scheduler.py | 13 +++--- 2 files changed, 29 insertions(+), 28 deletions(-) diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 6071e3e742..4196f229f6 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -277,28 +277,28 @@ def piecewise_decay(boundaries, values): global_step = _decay_step_counter() - with init_on_cpu(): - lr = tensor.create_global_var( - shape=[1], - value=0.0, - dtype='float32', - persistable=True, - name="learning_rate") - - with control_flow.Switch() as switch: - for i in range(len(boundaries)): - boundary_val = tensor.fill_constant( - shape=[1], dtype='float32', value=float(boundaries[i])) - value_var = tensor.fill_constant( - shape=[1], dtype='float32', value=float(values[i])) - with switch.case(global_step < boundary_val): - tensor.assign(value_var, lr) - last_value_var = tensor.fill_constant( + lr = tensor.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate") + + with control_flow.Switch() as switch: + for i in range(len(boundaries)): + boundary_val = tensor.fill_constant( shape=[1], dtype='float32', - value=float(values[len(values) - 1])) - with switch.default(): - tensor.assign(last_value_var, lr) + value=float(boundaries[i]), + force_cpu=True) + value_var = tensor.fill_constant( + shape=[1], dtype='float32', value=float(values[i])) + with switch.case(global_step < boundary_val): + tensor.assign(value_var, lr) + last_value_var = tensor.fill_constant( + shape=[1], dtype='float32', value=float(values[len(values) - 1])) + with switch.default(): + tensor.assign(last_value_var, lr) return lr @@ -333,9 +333,9 @@ def append_LARS(params_grads, learning_rate, weight_decay): grad_norm = ops.sqrt(nn.reduce_sum(input=ops.square(grad))) if type(param_lr) == float and param_lr == 1.0: decayed_lr = learning_rate * param_norm \ - / _balanced_weight(param_norm, grad_norm) + / _balanced_weight(param_norm, grad_norm) else: decayed_lr = learning_rate * param_lr * param_norm \ - / _balanced_weight(param_norm, grad_norm) + / _balanced_weight(param_norm, grad_norm) # set back param local learning rate param.optimize_attr['learning_rate'] = decayed_lr diff --git a/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py b/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py index 6382e290eb..49ea18c169 100644 --- a/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py @@ -91,20 +91,21 @@ class TestLearningRateDecay(unittest.TestCase): def check_decay_with_place(self, place, python_decay_fn, fluid_decay_fn, kwargs): + main_prog = fluid.Program() + startup_prog = fluid.Program() - decayed_lr = fluid_decay_fn(**kwargs) + with fluid.program_guard(main_prog, startup_prog): + decayed_lr = fluid_decay_fn(**kwargs) place = fluid.CPUPlace() exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe.run(startup_prog) - fluid.memory_optimize(fluid.default_main_program()) + # fluid.memory_optimize(main_prog) for step in range(10): - lr_val, = exe.run(fluid.default_main_program(), - feed={}, - fetch_list=[decayed_lr]) + lr_val, = exe.run(main_prog, feed={}, fetch_list=[decayed_lr]) python_decayed_lr = python_decay_fn( global_step=float(step), **kwargs) self.assertAlmostEqual( -- GitLab