提交 1c19f1ab 编写于 作者: Y yuyang18

Do not change API in doc PR

上级 7747e01b
...@@ -215,7 +215,7 @@ def set_gradient_clip(clip, param_list=None, program=None): ...@@ -215,7 +215,7 @@ def set_gradient_clip(clip, param_list=None, program=None):
def append_gradient_clip_ops(param_grad): def append_gradient_clip_ops(param_grad):
context = dict() context = dict()
for p, g in param_grad: for p, g in param_grad:
with p.block.program.optimization_guard(p): with p.block.program.optimized_guard(p):
clip_attr = getattr(p, 'gradient_clip_attr', NullGradientClipAttr()) clip_attr = getattr(p, 'gradient_clip_attr', NullGradientClipAttr())
if clip_attr is None: if clip_attr is None:
clip_attr = NullGradientClipAttr() clip_attr = NullGradientClipAttr()
...@@ -228,7 +228,7 @@ def append_gradient_clip_ops(param_grad): ...@@ -228,7 +228,7 @@ def append_gradient_clip_ops(param_grad):
res = [] res = []
for p, g in param_grad: for p, g in param_grad:
with p.block.program.optimization_guard(p): with p.block.program.optimized_guard(p):
res.append(clip_attr.create_operators(param=p, grad=g)) res.append(clip_attr.create_operators(param=p, grad=g))
return res return res
......
...@@ -1103,7 +1103,7 @@ class Program(object): ...@@ -1103,7 +1103,7 @@ class Program(object):
self._op_role_var = [var_name] self._op_role_var = [var_name]
@contextlib.contextmanager @contextlib.contextmanager
def optimization_guard(self, var): def optimized_guard(self, var):
""" """
A with guard to set :code:`Optimization` :code:`OpRole` and A with guard to set :code:`Optimization` :code:`OpRole` and
:code:`OpRoleVar` automatically. :code:`OpRoleVar` automatically.
...@@ -1116,7 +1116,7 @@ class Program(object): ...@@ -1116,7 +1116,7 @@ class Program(object):
Examples: Examples:
>>> p, g = backward(...) >>> p, g = backward(...)
>>> with program.optimization_guard(p): >>> with program.optimized_guard(p):
>>> p = p - 0.001 * g >>> p = p - 0.001 * g
""" """
OpRole = core.op_proto_and_checker_maker.OpRole OpRole = core.op_proto_and_checker_maker.OpRole
......
...@@ -226,7 +226,7 @@ class Optimizer(object): ...@@ -226,7 +226,7 @@ class Optimizer(object):
optimize_ops = [] optimize_ops = []
for param_and_grad in parameters_and_grads: for param_and_grad in parameters_and_grads:
with param_and_grad[0].block.program.optimization_guard( with param_and_grad[0].block.program.optimized_guard(
param_and_grad[0]): param_and_grad[0]):
if param_and_grad[0].trainable is True and param_and_grad[ if param_and_grad[0].trainable is True and param_and_grad[
1] is not None: 1] is not None:
......
...@@ -43,7 +43,7 @@ def append_regularization_ops(parameters_and_grads, regularization=None): ...@@ -43,7 +43,7 @@ def append_regularization_ops(parameters_and_grads, regularization=None):
""" """
params_and_grads = [] params_and_grads = []
for param, grad in parameters_and_grads: for param, grad in parameters_and_grads:
with param.block.program.optimization_guard(param): with param.block.program.optimized_guard(param):
# If no gradient then we don't need to do anything # If no gradient then we don't need to do anything
if grad is None: if grad is None:
params_and_grads.append((param, grad)) params_and_grads.append((param, grad))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册