From 60a6d68fb9465e466916b8b038a27ea879a435c4 Mon Sep 17 00:00:00 2001 From: Aurelius84 Date: Wed, 15 Jan 2020 15:10:58 +0800 Subject: [PATCH] remove _optimized_guard in dygrahpe_mode (#22143) * remove _optimized_guard in dygrahpe_mode test=develop * remove comment code test=develop * remove list append test=develop * remove list append test=develop --- python/paddle/fluid/optimizer.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index a67aaad432..afb9cc4c49 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -441,17 +441,12 @@ class Optimizer(object): [p[0] for p in parameters_and_grads if p[0].trainable]) self._create_global_learning_rate() - optimize_ops = [] if framework.in_dygraph_mode(): for param_and_grad in parameters_and_grads: if param_and_grad[1] is None: continue - with param_and_grad[0].block.program._optimized_guard( - param_and_grad): - if param_and_grad[0].trainable is True: - optimize_op = self._append_optimize_op(target_block, - param_and_grad) - optimize_ops.append(optimize_op) + if param_and_grad[0].trainable is True: + self._append_optimize_op(target_block, param_and_grad) else: for param_and_grad in parameters_and_grads: if param_and_grad[1] is None: @@ -459,9 +454,7 @@ class Optimizer(object): with param_and_grad[0].block.program._optimized_guard( param_and_grad), name_scope("optimizer"): if param_and_grad[0].trainable is True: - optimize_op = self._append_optimize_op(target_block, - param_and_grad) - optimize_ops.append(optimize_op) + self._append_optimize_op(target_block, param_and_grad) # Get custom finish ops for subclasses # FIXME: Need to fix this once we figure out how to handle dependencies -- GitLab