From 1c19f1ab44596de17a25eb7b171847d874b524d5 Mon Sep 17 00:00:00 2001 From: yuyang18 Date: Tue, 19 Jun 2018 12:24:58 +0800 Subject: [PATCH] Do not change API in doc PR --- python/paddle/fluid/clip.py | 4 ++-- python/paddle/fluid/framework.py | 4 ++-- python/paddle/fluid/optimizer.py | 2 +- python/paddle/fluid/regularizer.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/python/paddle/fluid/clip.py b/python/paddle/fluid/clip.py index 590d1329ab..66c3fc6b66 100644 --- a/python/paddle/fluid/clip.py +++ b/python/paddle/fluid/clip.py @@ -215,7 +215,7 @@ def set_gradient_clip(clip, param_list=None, program=None): def append_gradient_clip_ops(param_grad): context = dict() for p, g in param_grad: - with p.block.program.optimization_guard(p): + with p.block.program.optimized_guard(p): clip_attr = getattr(p, 'gradient_clip_attr', NullGradientClipAttr()) if clip_attr is None: clip_attr = NullGradientClipAttr() @@ -228,7 +228,7 @@ def append_gradient_clip_ops(param_grad): res = [] for p, g in param_grad: - with p.block.program.optimization_guard(p): + with p.block.program.optimized_guard(p): res.append(clip_attr.create_operators(param=p, grad=g)) return res diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 73a66c3280..92dbb40f62 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1103,7 +1103,7 @@ class Program(object): self._op_role_var = [var_name] @contextlib.contextmanager - def optimization_guard(self, var): + def optimized_guard(self, var): """ A with guard to set :code:`Optimization` :code:`OpRole` and :code:`OpRoleVar` automatically. @@ -1116,7 +1116,7 @@ class Program(object): Examples: >>> p, g = backward(...) - >>> with program.optimization_guard(p): + >>> with program.optimized_guard(p): >>> p = p - 0.001 * g """ OpRole = core.op_proto_and_checker_maker.OpRole diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 89e8e09e5a..54fe935627 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -226,7 +226,7 @@ class Optimizer(object): optimize_ops = [] for param_and_grad in parameters_and_grads: - with param_and_grad[0].block.program.optimization_guard( + with param_and_grad[0].block.program.optimized_guard( param_and_grad[0]): if param_and_grad[0].trainable is True and param_and_grad[ 1] is not None: diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index cec45a317a..c4d6829599 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -43,7 +43,7 @@ def append_regularization_ops(parameters_and_grads, regularization=None): """ params_and_grads = [] for param, grad in parameters_and_grads: - with param.block.program.optimization_guard(param): + with param.block.program.optimized_guard(param): # If no gradient then we don't need to do anything if grad is None: params_and_grads.append((param, grad)) -- GitLab