diff --git a/python/paddle/fluid/clip.py b/python/paddle/fluid/clip.py index 590d1329ab69106bbe7a8d51e210ceb48ed87ced..66c3fc6b66d61bc9578f84594409ad0f24c99910 100644 --- a/python/paddle/fluid/clip.py +++ b/python/paddle/fluid/clip.py @@ -215,7 +215,7 @@ def set_gradient_clip(clip, param_list=None, program=None): def append_gradient_clip_ops(param_grad): context = dict() for p, g in param_grad: - with p.block.program.optimization_guard(p): + with p.block.program.optimized_guard(p): clip_attr = getattr(p, 'gradient_clip_attr', NullGradientClipAttr()) if clip_attr is None: clip_attr = NullGradientClipAttr() @@ -228,7 +228,7 @@ def append_gradient_clip_ops(param_grad): res = [] for p, g in param_grad: - with p.block.program.optimization_guard(p): + with p.block.program.optimized_guard(p): res.append(clip_attr.create_operators(param=p, grad=g)) return res diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 73a66c328004b58bc1de9e1da41c3b5da05d0e3b..92dbb40f628db812ed0548e7e71536fb2bb83a96 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1103,7 +1103,7 @@ class Program(object): self._op_role_var = [var_name] @contextlib.contextmanager - def optimization_guard(self, var): + def optimized_guard(self, var): """ A with guard to set :code:`Optimization` :code:`OpRole` and :code:`OpRoleVar` automatically. @@ -1116,7 +1116,7 @@ class Program(object): Examples: >>> p, g = backward(...) - >>> with program.optimization_guard(p): + >>> with program.optimized_guard(p): >>> p = p - 0.001 * g """ OpRole = core.op_proto_and_checker_maker.OpRole diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 89e8e09e5a4841791beba7d2e668ae752da88d39..54fe9356275c313cd18fbb12edc9d35f38bda772 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -226,7 +226,7 @@ class Optimizer(object): optimize_ops = [] for param_and_grad in parameters_and_grads: - with param_and_grad[0].block.program.optimization_guard( + with param_and_grad[0].block.program.optimized_guard( param_and_grad[0]): if param_and_grad[0].trainable is True and param_and_grad[ 1] is not None: diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index cec45a317aba8d1cc34f43b9f3f8e30bf89af9ea..c4d6829599616cb3ea7791a189e7070974de6ae3 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -43,7 +43,7 @@ def append_regularization_ops(parameters_and_grads, regularization=None): """ params_and_grads = [] for param, grad in parameters_and_grads: - with param.block.program.optimization_guard(param): + with param.block.program.optimized_guard(param): # If no gradient then we don't need to do anything if grad is None: params_and_grads.append((param, grad))