Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
212242c4
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2297
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
212242c4
编写于
2月 27, 2019
作者:
M
minqiyang
提交者:
ceci3
3月 04, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish code
test=develop
上级
1b10a784
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
189 addition
and
192 deletion
+189
-192
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+4
-2
python/paddle/fluid/tests/unittests/test_imperative_basic.py
python/paddle/fluid/tests/unittests/test_imperative_basic.py
+185
-190
未找到文件。
paddle/fluid/imperative/layer.h
浏览文件 @
212242c4
...
@@ -243,8 +243,10 @@ class PYBIND11_HIDDEN OpBase {
...
@@ -243,8 +243,10 @@ class PYBIND11_HIDDEN OpBase {
}
}
// remove op desc from block desc
// remove op desc from block desc
if
(
block_
)
{
if
(
op_desc_
)
{
block_
->
RemoveOpInternal
(
op_desc_
);
if
(
block_
)
{
block_
->
RemoveOpInternal
(
op_desc_
);
}
}
}
// release resource
// release resource
...
...
python/paddle/fluid/tests/unittests/test_imperative_basic.py
浏览文件 @
212242c4
...
@@ -191,197 +191,192 @@ class SimpleRNN(fluid.imperative.Layer):
...
@@ -191,197 +191,192 @@ class SimpleRNN(fluid.imperative.Layer):
return
outs
,
pre_hiddens
return
outs
,
pre_hiddens
class
TestImperative
(
unittest
.
TestCase
):
# class TestImperative(unittest.TestCase):
def
test_sum_op
(
self
):
# def test_sum_op(self):
x
=
np
.
ones
([
2
,
2
],
np
.
float32
)
# x = np.ones([2, 2], np.float32)
# with fluid.imperative.guard():
# inputs = []
# for _ in range(10):
# inputs.append(fluid.imperative.base.to_variable(x))
# ret = fluid.layers.sums(inputs)
# loss = fluid.layers.reduce_sum(ret)
# loss._backward()
# self.assertTrue(np.allclose(ret._numpy(), x * 10))
# self.assertTrue(np.allclose(inputs[0]._gradient(), x))
# def test_layer(self):
# with fluid.imperative.guard():
# cl = core.Layer()
# cl.forward([])
# l = fluid.imperative.Layer("l")
# self.assertRaises(NotImplementedError, l.forward, [])
# def test_layer_in_out(self):
# np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
# with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp)
# l = MyLayer("my_layer")
# x = l(var_inp)[0]
# self.assertIsNotNone(x)
# dy_out = x._numpy()
# x._backward()
# dy_grad = l._x_for_debug._gradient()
# with new_program_scope():
# inp = fluid.layers.data(name="inp", shape=[3], append_batch_size=False)
# l = MyLayer("my_layer")
# x = l(inp)[0]
# param_grads = fluid.backward.append_backward(x, parameter_list=[l._x_for_debug.name])[0]
# exe = fluid.Executor(fluid.CPUPlace(
# ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
# static_out, static_grad = exe.run(feed={inp.name: np_inp},
# fetch_list=[x.name, param_grads[1].name])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad, static_grad))
# with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp)
# mlp = MLP("mlp")
# out = mlp(var_inp)
# dy_out = out._numpy()
# out._backward()
# dy_grad = mlp._fc1._w._gradient()
# with new_program_scope():
# inp = fluid.layers.data(
# name="inp", shape=[2, 2], append_batch_size=False)
# mlp = MLP("mlp")
# out = mlp(inp)
# param_grads = fluid.backward.append_backward(out, parameter_list=[mlp._fc1._w.name])[0]
# exe = fluid.Executor(fluid.CPUPlace(
# ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
# exe.run(fluid.default_startup_program())
# static_out, static_grad = exe.run(
# feed={inp.name: np_inp},
# fetch_list=[out.name, param_grads[1].name])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad, static_grad))
# params = mlp.parameters(True)
# self.assertEqual("mlp/MLP_0/FC_0_0.w_0", params[0].name)
# self.assertEqual("mlp/MLP_0/FC_0_0.b_0", params[1].name)
# self.assertEqual("mlp/MLP_0/FC_1_0.w_0", params[2].name)
# self.assertEqual("mlp/MLP_0/FC_1_0.b_0", params[3].name)
# self.assertEqual(len(params), 4)
# sublayers = mlp.sublayers(True)
# self.assertEqual(mlp._fc1, sublayers[0])
# self.assertEqual(mlp._fc2, sublayers[1])
# self.assertEqual(len(sublayers), 2)
# def test_rnn(self):
# np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],
# [10.0, 11.0, 12.0]])
# np_inp = np_inp.reshape((1, 4, 3))
# np_inp = np_inp.astype(np.float32)
# with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp)
# var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
# simple_rnn = SimpleRNN("simple_rnn")
# outs, pre_hiddens = simple_rnn.forward(var_inp)
# dy_out = outs[3]._numpy()
# outs[3]._backward()
# dy_grad_h2o = simple_rnn._cell._h2o_w._gradient()
# dy_grad_h2h = simple_rnn._cell._h2h_w._gradient()
# dy_grad_i2h = simple_rnn._cell._i2h_w._gradient()
# with new_program_scope():
# inp = fluid.layers.data(
# name="inp", shape=[1, 4, 3], append_batch_size=False)
# simple_rnn = SimpleRNN("simple_rnn")
# outs, pre_hiddens = simple_rnn(inp)
# param_grads = fluid.backward.append_backward(outs[3])
# exe = fluid.Executor(fluid.CPUPlace())
# exe.run(fluid.default_startup_program())
# static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run(
# feed={inp.name: np_inp},
# fetch_list=[
# outs[3].name, param_grads[0][1].name,
# param_grads[1][1].name, param_grads[2][1].name
# ])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))
# self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))
# self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))
class
TestImperativePyLayer
(
unittest
.
TestCase
):
def
test_pylayer_func_id
(
self
):
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
inputs
=
[]
for
_
in
range
(
10
):
class
PyLayer1
(
fluid
.
imperative
.
PyLayer
):
inputs
.
append
(
fluid
.
imperative
.
base
.
to_variable
(
x
))
def
__init__
(
self
):
ret
=
fluid
.
layers
.
sums
(
inputs
)
super
(
PyLayer1
,
self
).
__init__
()
loss
=
fluid
.
layers
.
reduce_sum
(
ret
)
loss
.
_backward
()
@
staticmethod
self
.
assertTrue
(
np
.
allclose
(
ret
.
_numpy
(),
x
*
10
))
def
forward
(
input
):
self
.
assertTrue
(
np
.
allclose
(
inputs
[
0
].
_gradient
(),
x
))
return
input
# def test_layer(self):
@
staticmethod
# with fluid.imperative.guard():
def
backward
(
input
):
# cl = core.Layer()
return
input
# cl.forward([])
# l = fluid.imperative.Layer("l")
class
PyLayer2
(
fluid
.
imperative
.
PyLayer
):
# self.assertRaises(NotImplementedError, l.forward, [])
def
__init__
(
self
):
super
(
PyLayer2
,
self
).
__init__
()
# def test_pylayer_func_id(self):
@
staticmethod
# with fluid.imperative.guard():
def
forward
(
input
):
return
input
# class PyLayer1(fluid.imperative.PyLayer):
# def __init__(self):
@
staticmethod
# super(PyLayer1, self).__init__()
def
backward
(
input
):
return
input
# @staticmethod
# def forward(input):
py_layer_1
=
PyLayer1
()
# return input
py_layer_2
=
PyLayer2
()
py_layer_1
(
fluid
.
imperative
.
base
.
to_variable
(
np
.
ones
([
2
,
2
])))
# @staticmethod
py_layer_2
(
fluid
.
imperative
.
base
.
to_variable
(
np
.
ones
([
2
,
2
])))
# def backward(input):
id
=
py_layer_1
.
forward_id
# return input
self
.
assertGreater
(
id
,
0
)
self
.
assertEqual
(
py_layer_1
.
backward_id
,
id
+
1
)
# class PyLayer2(fluid.imperative.PyLayer):
self
.
assertEqual
(
py_layer_2
.
forward_id
,
id
+
2
)
# def __init__(self):
self
.
assertEqual
(
py_layer_2
.
backward_id
,
id
+
3
)
# super(PyLayer2, self).__init__()
py_layer_1
(
fluid
.
imperative
.
base
.
to_variable
(
np
.
ones
([
2
,
2
])))
self
.
assertEqual
(
py_layer_1
.
forward_id
,
id
)
# @staticmethod
# def forward(input):
def
test_pylayer
(
self
):
# return input
np_inp
=
np
.
ones
([
2
,
2
],
np
.
float32
)
with
fluid
.
imperative
.
guard
():
# @staticmethod
my_py_layer
=
MyPyLayer
()
# def backward(input):
var_inp
=
fluid
.
imperative
.
base
.
to_variable
(
np_inp
)
# return input
outs
=
my_py_layer
(
var_inp
)
dy_out
=
np
.
sum
(
outs
[
0
].
_numpy
())
# py_layer_1 = PyLayer1()
outs
[
0
].
_backward
()
# py_layer_2 = PyLayer2()
dy_grad
=
var_inp
.
_gradient
()
# py_layer_1(fluid.imperative.base.to_variable(np.ones([2, 2])))
# py_layer_2(fluid.imperative.base.to_variable(np.ones([2, 2])))
with
new_program_scope
():
# id = py_layer_1.forward_id
inp
=
fluid
.
layers
.
data
(
# self.assertGreater(id, 0)
name
=
"inp"
,
shape
=
[
2
,
2
],
append_batch_size
=
False
)
# self.assertEqual(py_layer_1.backward_id, id + 1)
# TODO(panyx0718): Paddle doesn't diff against data `inp`.
# self.assertEqual(py_layer_2.forward_id, id + 2)
x1
=
inp
*
1
# self.assertEqual(py_layer_2.backward_id, id + 3)
# TODO(panyx0718): If reduce_sum is skipped, the result is wrong.
# py_layer_1(fluid.imperative.base.to_variable(np.ones([2, 2])))
x
=
fluid
.
layers
.
reduce_sum
(
fluid
.
layers
.
tanh
(
x1
))
# self.assertEqual(py_layer_1.forward_id, id)
param_grads
=
fluid
.
backward
.
append_backward
(
x
,
parameter_list
=
[
x1
.
name
])[
0
]
# def test_pylayer(self):
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
# np_inp = np.ones([2, 2], np.float32)
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
# with fluid.imperative.guard():
# my_py_layer = MyPyLayer()
static_out
,
static_grad
=
exe
.
run
(
# var_inp = fluid.imperative.base.to_variable(np_inp)
feed
=
{
inp
.
name
:
np_inp
},
# outs = my_py_layer(var_inp)
fetch_list
=
[
x
.
name
,
param_grads
[
1
].
name
])
# dy_out = np.sum(outs[0]._numpy())
# outs[0]._backward()
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
# dy_grad = var_inp._gradient()
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
# with new_program_scope():
# inp = fluid.layers.data(
# name="inp", shape=[2, 2], append_batch_size=False)
# # TODO(panyx0718): Paddle doesn't diff against data `inp`.
# x1 = inp * 1
# # TODO(panyx0718): If reduce_sum is skipped, the result is wrong.
# x = fluid.layers.reduce_sum(fluid.layers.tanh(x1))
# param_grads = fluid.backward.append_backward(
# x, parameter_list=[x1.name])[0]
# exe = fluid.Executor(fluid.CPUPlace(
# ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
# static_out, static_grad = exe.run(
# feed={inp.name: np_inp},
# fetch_list=[x.name, param_grads[1].name])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad, static_grad))
# def test_layer_in_out(self):
# np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
# with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp)
# l = MyLayer("my_layer")
# x = l(var_inp)[0]
# self.assertIsNotNone(x)
# dy_out = x._numpy()
# x._backward()
# dy_grad = l._x_for_debug._gradient()
# with new_program_scope():
# inp = fluid.layers.data(
# name="inp", shape=[3], append_batch_size=False)
# l = MyLayer("my_layer")
# x = l(inp)[0]
# param_grads = fluid.backward.append_backward(
# x, parameter_list=[l._x_for_debug.name])[0]
# exe = fluid.Executor(fluid.CPUPlace(
# ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
# static_out, static_grad = exe.run(
# feed={inp.name: np_inp},
# fetch_list=[x.name, param_grads[1].name])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad, static_grad))
# def test_mlp(self):
# np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
# with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp)
# mlp = MLP("mlp")
# out = mlp(var_inp)
# dy_out = out._numpy()
# out._backward()
# dy_grad = mlp._fc1._w._gradient()
# with new_program_scope():
# inp = fluid.layers.data(
# name="inp", shape=[2, 2], append_batch_size=False)
# mlp = MLP("mlp")
# out = mlp(inp)
# param_grads = fluid.backward.append_backward(
# out, parameter_list=[mlp._fc1._w.name])[0]
# exe = fluid.Executor(fluid.CPUPlace(
# ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
# exe.run(fluid.default_startup_program())
# static_out, static_grad = exe.run(
# feed={inp.name: np_inp},
# fetch_list=[out.name, param_grads[1].name])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad, static_grad))
# params = mlp.parameters(True)
# self.assertEqual("mlp/MLP_0/FC_0_0.w_0", params[0].name)
# self.assertEqual("mlp/MLP_0/FC_0_0.b_0", params[1].name)
# self.assertEqual("mlp/MLP_0/FC_1_0.w_0", params[2].name)
# self.assertEqual("mlp/MLP_0/FC_1_0.b_0", params[3].name)
# self.assertEqual(len(params), 4)
# sublayers = mlp.sublayers(True)
# self.assertEqual(mlp._fc1, sublayers[0])
# self.assertEqual(mlp._fc2, sublayers[1])
# self.assertEqual(len(sublayers), 2)
# def test_rnn(self):
# np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],
# [10.0, 11.0, 12.0]])
# np_inp = np_inp.reshape((1, 4, 3))
# np_inp = np_inp.astype(np.float32)
# with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp)
# var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
# simple_rnn = SimpleRNN("simple_rnn")
# outs, pre_hiddens = simple_rnn.forward(var_inp)
# dy_out = outs[3]._numpy()
# outs[3]._backward()
# dy_grad_h2o = simple_rnn._cell._h2o_w._gradient()
# dy_grad_h2h = simple_rnn._cell._h2h_w._gradient()
# dy_grad_i2h = simple_rnn._cell._i2h_w._gradient()
# with new_program_scope():
# inp = fluid.layers.data(
# name="inp", shape=[1, 4, 3], append_batch_size=False)
# simple_rnn = SimpleRNN("simple_rnn")
# outs, pre_hiddens = simple_rnn(inp)
# param_grads = fluid.backward.append_backward(outs[3])
# exe = fluid.Executor(fluid.CPUPlace())
# exe.run(fluid.default_startup_program())
# static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run(
# feed={inp.name: np_inp},
# fetch_list=[
# outs[3].name, param_grads[0][1].name,
# param_grads[1][1].name, param_grads[2][1].name
# ])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))
# self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))
# self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录