未验证 提交 4914da1b 编写于 作者: Q Qiyang Min 提交者: GitHub

Merge pull request #16744 from velconia/locl_rel_1_4_imperative_fix_growing_dict

Fix auto growth bug of optimizer in dygraph mode
...@@ -275,15 +275,26 @@ class Optimizer(object): ...@@ -275,15 +275,26 @@ class Optimizer(object):
self._create_global_learning_rate() self._create_global_learning_rate()
optimize_ops = [] optimize_ops = []
for param_and_grad in parameters_and_grads: if framework._in_dygraph_mode():
if param_and_grad[1] is None: for param_and_grad in parameters_and_grads:
continue if param_and_grad[1] is None:
with param_and_grad[0].block.program._optimized_guard( continue
param_and_grad), name_scope("optimizer"): with param_and_grad[0].block.program._optimized_guard(
if param_and_grad[0].trainable is True: param_and_grad):
optimize_op = self._append_optimize_op(global_block, if param_and_grad[0].trainable is True:
param_and_grad) optimize_op = self._append_optimize_op(global_block,
optimize_ops.append(optimize_op) param_and_grad)
optimize_ops.append(optimize_op)
else:
for param_and_grad in parameters_and_grads:
if param_and_grad[1] is None:
continue
with param_and_grad[0].block.program._optimized_guard(
param_and_grad), name_scope("optimizer"):
if param_and_grad[0].trainable is True:
optimize_op = self._append_optimize_op(global_block,
param_and_grad)
optimize_ops.append(optimize_op)
# Get custom finish ops for subclasses # Get custom finish ops for subclasses
# FIXME: Need to fix this once we figure out how to handle dependencies # FIXME: Need to fix this once we figure out how to handle dependencies
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册