diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 6b42b40403089b7ce88f81f44ba7e701ca335417..96488bfc96a1d6fd8e65aa658dfd998fa0dafb75 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -3850,12 +3850,12 @@ class RecomputeOptimizer(Optimizer): sgd = fluid.optimizer.Adam(learning_rate=0.01) sgd = fluid.optimizer.RecomputeOptimizer(sgd) + sgd._set_checkpoints([fc_1, pred]) params_grads = sgd.backward( cost, startup_program=None, parameter_list=None, - no_grad_set=None, - checkpoints=[fc_1, pred]) + no_grad_set=None) program = cost.block.program with framework.program_guard(program, None): @@ -3871,8 +3871,7 @@ class RecomputeOptimizer(Optimizer): startup_program=None, parameter_list=None, no_grad_set=None, - callbacks=None, - checkpoints=None): + callbacks=None): """ call append_backward with checkpoints. @@ -3906,12 +3905,12 @@ class RecomputeOptimizer(Optimizer): sgd = fluid.optimizer.Adam(learning_rate=0.01) sgd = fluid.optimizer.RecomputeOptimizer(sgd) + sgd._set_checkpoints([fc_1, pred]) params_grads = sgd.backward( cost, startup_program=None, parameter_list=None, - no_grad_set=None, - checkpoints=[fc_1, pred]) + no_grad_set=None) print("Finished backward") """ @@ -3958,12 +3957,12 @@ class RecomputeOptimizer(Optimizer): sgd = fluid.optimizer.Adam(learning_rate=0.01) sgd = fluid.optimizer.RecomputeOptimizer(sgd) + sgd._set_checkpoints([fc_1, pred]) params_grads = sgd.backward( cost, startup_program=None, parameter_list=None, - no_grad_set=None, - checkpoints=[fc_1, pred]) + no_grad_set=None) optimize_ops = sgd.apply_optimize( cost, startup_program=None, params_grads=params_grads) @@ -3993,8 +3992,7 @@ class RecomputeOptimizer(Optimizer): loss, startup_program=startup_program, parameter_list=parameter_list, - no_grad_set=no_grad_set, - checkpoints=self._checkpoints) + no_grad_set=no_grad_set) if grad_clip: # TODO(guru4elephant): should add grad_clip for static graph diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index f97c40b6d99ed06367998b122944cc047f9f0744..fefcac6ede784542a139eba121f7d4791c50c7a4 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -791,8 +791,7 @@ class TestRecomputeOptimizer(unittest.TestCase): mean_out, startup_program=None, parameter_list=None, - no_grad_set=None, - checkpoints=[b1_out]) + no_grad_set=None) # apply gradient program = mean_out.block.program