未验证 提交 e5c89143 编写于 作者: F fengjiayi 提交者: GitHub

Merge pull request #7431 from JiayiFeng/dev_error_clip_test

ErrorClip test on MNIST
...@@ -4,6 +4,7 @@ from . import core ...@@ -4,6 +4,7 @@ from . import core
__all__ = [ __all__ = [
'GradientClipByValue', 'GradientClipByValue',
'ErrorClipByValue',
'append_gradient_clip_ops', 'append_gradient_clip_ops',
'error_clip_callback', 'error_clip_callback',
] ]
...@@ -25,12 +26,12 @@ class ErrorClipByValue(BaseErrorClipAttr): ...@@ -25,12 +26,12 @@ class ErrorClipByValue(BaseErrorClipAttr):
self.min = min self.min = min
def append_clip_op(self, block, grad_name): def append_clip_op(self, block, grad_name):
block.append_op( clip_op_desc = block.desc.append_op()
type="clip", clip_op_desc.set_type("clip")
inputs={"X": grad_name}, clip_op_desc.set_input("X", [grad_name])
outputs={"Out": grad_name}, clip_op_desc.set_output("Out", [grad_name])
attrs={"min": self.min, clip_op_desc.set_attr("min", self.min)
"max": self.max}) clip_op_desc.set_attr("max", self.max)
def error_clip_callback(block, context): def error_clip_callback(block, context):
...@@ -41,6 +42,11 @@ def error_clip_callback(block, context): ...@@ -41,6 +42,11 @@ def error_clip_callback(block, context):
op_desc.output_arg_names()): op_desc.output_arg_names()):
fwd_var = block.var_recursive(grad_to_var[grad_n]) fwd_var = block.var_recursive(grad_to_var[grad_n])
error_clip = getattr(fwd_var, "error_clip", None) error_clip = getattr(fwd_var, "error_clip", None)
if not (error_clip is None or isinstance(error_clip,
BaseErrorClipAttr)):
raise TypeError(
"Variable's error_clip should be an instance of BaseErrorClipAttr or None."
)
if error_clip is not None: if error_clip is not None:
error_clip.append_clip_op(block, grad_n) error_clip.append_clip_op(block, grad_n)
......
...@@ -280,6 +280,9 @@ class Variable(object): ...@@ -280,6 +280,9 @@ class Variable(object):
uid = core.unique_integer(prefix) # unique during whole process. uid = core.unique_integer(prefix) # unique during whole process.
return "_".join([prefix, str(uid)]) return "_".join([prefix, str(uid)])
def set_error_clip(self, error_clip):
self.error_clip = error_clip
def get_all_op_protos(): def get_all_op_protos():
""" """
......
from __future__ import print_function
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
BATCH_SIZE = 128
CLIP_MAX = 2e-6
CLIP_MIN = -1e-6
prog = fluid.framework.Program()
with fluid.program_guard(main_program=prog):
image = fluid.layers.data(name='x', shape=[784], dtype='float32')
hidden1 = fluid.layers.fc(input=image, size=128, act='relu')
hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu')
predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
prog_clip = prog.clone()
prog_clip.block(0).var(hidden1.name).set_error_clip(
fluid.clip.ErrorClipByValue(
max=CLIP_MAX, min=CLIP_MIN))
avg_cost_clip = prog_clip.block(0).var(avg_cost.name)
fluid.backward.append_backward(loss=avg_cost)
fluid.backward.append_backward(
loss=avg_cost_clip, callback=fluid.clip.error_clip_callback)
hidden1_grad = prog.block(0).var(hidden1.name + "@GRAD")
hidden1_grad_clip = prog_clip.block(0).var(hidden1.name + "@GRAD")
hidden2_grad = prog.block(0).var(hidden2.name + "@GRAD")
hidden2_grad_clip = prog_clip.block(0).var(hidden2.name + "@GRAD")
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=8192),
batch_size=BATCH_SIZE)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=[image, label], place=place)
exe.run(fluid.default_startup_program())
count = 0
for data in train_reader():
count += 1
if count > 5:
break
out1, out2 = exe.run(prog,
feed=feeder.feed(data),
fetch_list=[hidden1_grad, hidden2_grad])
out1_clip, out2_clip = exe.run(
prog_clip,
feed=feeder.feed(data),
fetch_list=[hidden1_grad_clip, hidden2_grad_clip])
if not ((out1.clip(
min=CLIP_MIN, max=CLIP_MAX) == out1_clip).all() and
(out2 == out2_clip).all()):
exit(1)
exit(0)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册