Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
3723dcc3
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3723dcc3
编写于
2月 27, 2019
作者:
M
minqiyang
提交者:
ceci3
3月 04, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish code
test=develop
上级
afc3fcd5
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
136 addition
and
135 deletion
+136
-135
paddle/fluid/framework/block_desc.cc
paddle/fluid/framework/block_desc.cc
+1
-0
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+2
-7
python/paddle/fluid/initializer.py
python/paddle/fluid/initializer.py
+9
-9
python/paddle/fluid/tests/unittests/test_imperative_basic.py
python/paddle/fluid/tests/unittests/test_imperative_basic.py
+124
-119
未找到文件。
paddle/fluid/framework/block_desc.cc
浏览文件 @
3723dcc3
...
@@ -156,6 +156,7 @@ void BlockDesc::RemoveOp(size_t s, size_t e) {
...
@@ -156,6 +156,7 @@ void BlockDesc::RemoveOp(size_t s, size_t e) {
}
}
void
BlockDesc
::
RemoveOpInternal
(
const
OpDesc
*
op_desc
)
{
void
BlockDesc
::
RemoveOpInternal
(
const
OpDesc
*
op_desc
)
{
// TODO(minqiyang): make this faster
for
(
auto
it
=
ops_
.
begin
();
it
!=
ops_
.
end
();
++
it
)
{
for
(
auto
it
=
ops_
.
begin
();
it
!=
ops_
.
end
();
++
it
)
{
if
(
it
->
get
()
==
op_desc
)
{
if
(
it
->
get
()
==
op_desc
)
{
ops_
.
erase
(
it
);
ops_
.
erase
(
it
);
...
...
paddle/fluid/imperative/layer.h
浏览文件 @
3723dcc3
...
@@ -235,6 +235,8 @@ class PYBIND11_HIDDEN OpBase {
...
@@ -235,6 +235,8 @@ class PYBIND11_HIDDEN OpBase {
backward_hooks_
()
{}
backward_hooks_
()
{}
virtual
~
OpBase
()
{
virtual
~
OpBase
()
{
// TODO(minqiyang): remove op_desc from block_desc in tracer
//
// reset all output vars' pre op
// reset all output vars' pre op
for
(
auto
iter
:
output_vars_
)
{
for
(
auto
iter
:
output_vars_
)
{
for
(
VarBase
*
var
:
iter
.
second
)
{
for
(
VarBase
*
var
:
iter
.
second
)
{
...
@@ -242,13 +244,6 @@ class PYBIND11_HIDDEN OpBase {
...
@@ -242,13 +244,6 @@ class PYBIND11_HIDDEN OpBase {
}
}
}
}
// remove op desc from block desc
if
(
op_desc_
)
{
if
(
block_
)
{
block_
->
RemoveOpInternal
(
op_desc_
);
}
}
// release resource
// release resource
for
(
framework
::
OpDesc
*
desc
:
grad_op_descs_
)
{
for
(
framework
::
OpDesc
*
desc
:
grad_op_descs_
)
{
delete
desc
;
delete
desc
;
...
...
python/paddle/fluid/initializer.py
浏览文件 @
3723dcc3
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
from
.wrapped_decorator
import
signature_safe_contextmanager
from
.wrapped_decorator
import
signature_safe_contextmanager
from
.core
import
VarDesc
from
.core
import
VarDesc
from
.
import
unique_name
from
.
import
unique_name
from
.imperative
import
base
from
.imperative
import
base
as
imperative_base
__all__
=
[
__all__
=
[
'Constant'
,
'Uniform'
,
'Normal'
,
'TruncatedNormal'
,
'Xavier'
,
'Bilinear'
,
'Constant'
,
'Uniform'
,
'Normal'
,
'TruncatedNormal'
,
'Xavier'
,
'Bilinear'
,
...
@@ -166,7 +166,7 @@ class ConstantInitializer(Initializer):
...
@@ -166,7 +166,7 @@ class ConstantInitializer(Initializer):
'force_cpu'
:
self
.
_force_cpu
or
force_init_on_cpu
()
'force_cpu'
:
self
.
_force_cpu
or
force_init_on_cpu
()
},
},
stop_gradient
=
True
)
stop_gradient
=
True
)
if
not
base
.
enabled
():
if
not
imperative_
base
.
enabled
():
var
.
op
=
op
var
.
op
=
op
return
op
return
op
...
@@ -246,7 +246,7 @@ class UniformInitializer(Initializer):
...
@@ -246,7 +246,7 @@ class UniformInitializer(Initializer):
attrs
=
{
"in_dtype"
:
out_var
.
dtype
,
attrs
=
{
"in_dtype"
:
out_var
.
dtype
,
"out_dtype"
:
var
.
dtype
})
"out_dtype"
:
var
.
dtype
})
if
not
base
.
enabled
():
if
not
imperative_
base
.
enabled
():
var
.
op
=
op
var
.
op
=
op
return
op
return
op
...
@@ -325,7 +325,7 @@ class NormalInitializer(Initializer):
...
@@ -325,7 +325,7 @@ class NormalInitializer(Initializer):
outputs
=
{
"Out"
:
var
},
outputs
=
{
"Out"
:
var
},
attrs
=
{
"in_dtype"
:
out_var
.
dtype
,
attrs
=
{
"in_dtype"
:
out_var
.
dtype
,
"out_dtype"
:
var
.
dtype
})
"out_dtype"
:
var
.
dtype
})
if
not
base
.
enabled
():
if
not
imperative_
base
.
enabled
():
var
.
op
=
op
var
.
op
=
op
return
op
return
op
...
@@ -404,7 +404,7 @@ class TruncatedNormalInitializer(Initializer):
...
@@ -404,7 +404,7 @@ class TruncatedNormalInitializer(Initializer):
outputs
=
{
"Out"
:
var
},
outputs
=
{
"Out"
:
var
},
attrs
=
{
"in_dtype"
:
out_var
.
dtype
,
attrs
=
{
"in_dtype"
:
out_var
.
dtype
,
"out_dtype"
:
var
.
dtype
})
"out_dtype"
:
var
.
dtype
})
if
not
base
.
enabled
():
if
not
imperative_
base
.
enabled
():
var
.
op
=
op
var
.
op
=
op
return
op
return
op
...
@@ -510,7 +510,7 @@ class XavierInitializer(Initializer):
...
@@ -510,7 +510,7 @@ class XavierInitializer(Initializer):
"seed"
:
self
.
_seed
"seed"
:
self
.
_seed
},
},
stop_gradient
=
True
)
stop_gradient
=
True
)
if
not
base
.
enabled
():
if
not
imperative_
base
.
enabled
():
var
.
op
=
op
var
.
op
=
op
return
op
return
op
...
@@ -611,7 +611,7 @@ class MSRAInitializer(Initializer):
...
@@ -611,7 +611,7 @@ class MSRAInitializer(Initializer):
"seed"
:
self
.
_seed
"seed"
:
self
.
_seed
},
},
stop_gradient
=
True
)
stop_gradient
=
True
)
if
not
base
.
enabled
():
if
not
imperative_
base
.
enabled
():
var
.
op
=
op
var
.
op
=
op
return
op
return
op
...
@@ -710,7 +710,7 @@ class BilinearInitializer(Initializer):
...
@@ -710,7 +710,7 @@ class BilinearInitializer(Initializer):
'shape'
:
list
(
shape
),
'shape'
:
list
(
shape
),
value_name
:
values
value_name
:
values
})
})
if
not
base
.
enabled
():
if
not
imperative_
base
.
enabled
():
var
.
op
=
op
var
.
op
=
op
return
op
return
op
...
@@ -769,7 +769,7 @@ class NumpyArrayInitializer(Initializer):
...
@@ -769,7 +769,7 @@ class NumpyArrayInitializer(Initializer):
value_name
:
values
value_name
:
values
},
},
stop_gradient
=
True
)
stop_gradient
=
True
)
if
not
base
.
enabled
():
if
not
imperative_
base
.
enabled
():
var
.
op
=
op
var
.
op
=
op
return
op
return
op
...
...
python/paddle/fluid/tests/unittests/test_imperative_basic.py
浏览文件 @
3723dcc3
...
@@ -191,126 +191,28 @@ class SimpleRNN(fluid.imperative.Layer):
...
@@ -191,126 +191,28 @@ class SimpleRNN(fluid.imperative.Layer):
return
outs
,
pre_hiddens
return
outs
,
pre_hiddens
# class TestImperative(unittest.TestCase):
class
TestImperative
(
unittest
.
TestCase
):
# def test_sum_op(self):
def
test_sum_op
(
self
):
# x = np.ones([2, 2], np.float32)
x
=
np
.
ones
([
2
,
2
],
np
.
float32
)
# with fluid.imperative.guard():
with
fluid
.
imperative
.
guard
():
# inputs = []
inputs
=
[]
# for _ in range(10):
for
_
in
range
(
10
):
# inputs.append(fluid.imperative.base.to_variable(x))
inputs
.
append
(
fluid
.
imperative
.
base
.
to_variable
(
x
))
# ret = fluid.layers.sums(inputs)
ret
=
fluid
.
layers
.
sums
(
inputs
)
# loss = fluid.layers.reduce_sum(ret)
loss
=
fluid
.
layers
.
reduce_sum
(
ret
)
# loss._backward()
loss
.
_backward
()
# self.assertTrue(np.allclose(ret._numpy(), x * 10))
self
.
assertTrue
(
np
.
allclose
(
ret
.
_numpy
(),
x
*
10
))
# self.assertTrue(np.allclose(inputs[0]._gradient(), x))
self
.
assertTrue
(
np
.
allclose
(
inputs
[
0
].
_gradient
(),
x
))
# def test_layer(self):
def
test_layer
(
self
):
# with fluid.imperative.guard():
with
fluid
.
imperative
.
guard
():
# cl = core.Layer()
cl
=
core
.
Layer
()
# cl.forward([])
cl
.
forward
([])
# l = fluid.imperative.Layer("l")
l
=
fluid
.
imperative
.
Layer
(
"l"
)
# self.assertRaises(NotImplementedError, l.forward, [])
self
.
assertRaises
(
NotImplementedError
,
l
.
forward
,
[])
# def test_layer_in_out(self):
# np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
# with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp)
# l = MyLayer("my_layer")
# x = l(var_inp)[0]
# self.assertIsNotNone(x)
# dy_out = x._numpy()
# x._backward()
# dy_grad = l._x_for_debug._gradient()
# with new_program_scope():
# inp = fluid.layers.data(name="inp", shape=[3], append_batch_size=False)
# l = MyLayer("my_layer")
# x = l(inp)[0]
# param_grads = fluid.backward.append_backward(x, parameter_list=[l._x_for_debug.name])[0]
# exe = fluid.Executor(fluid.CPUPlace(
# ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
# static_out, static_grad = exe.run(feed={inp.name: np_inp},
# fetch_list=[x.name, param_grads[1].name])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad, static_grad))
# with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp)
# mlp = MLP("mlp")
# out = mlp(var_inp)
# dy_out = out._numpy()
# out._backward()
# dy_grad = mlp._fc1._w._gradient()
# with new_program_scope():
# inp = fluid.layers.data(
# name="inp", shape=[2, 2], append_batch_size=False)
# mlp = MLP("mlp")
# out = mlp(inp)
# param_grads = fluid.backward.append_backward(out, parameter_list=[mlp._fc1._w.name])[0]
# exe = fluid.Executor(fluid.CPUPlace(
# ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
# exe.run(fluid.default_startup_program())
# static_out, static_grad = exe.run(
# feed={inp.name: np_inp},
# fetch_list=[out.name, param_grads[1].name])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad, static_grad))
# params = mlp.parameters(True)
# self.assertEqual("mlp/MLP_0/FC_0_0.w_0", params[0].name)
# self.assertEqual("mlp/MLP_0/FC_0_0.b_0", params[1].name)
# self.assertEqual("mlp/MLP_0/FC_1_0.w_0", params[2].name)
# self.assertEqual("mlp/MLP_0/FC_1_0.b_0", params[3].name)
# self.assertEqual(len(params), 4)
# sublayers = mlp.sublayers(True)
# self.assertEqual(mlp._fc1, sublayers[0])
# self.assertEqual(mlp._fc2, sublayers[1])
# self.assertEqual(len(sublayers), 2)
# def test_rnn(self):
# np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],
# [10.0, 11.0, 12.0]])
# np_inp = np_inp.reshape((1, 4, 3))
# np_inp = np_inp.astype(np.float32)
# with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp)
# var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
# simple_rnn = SimpleRNN("simple_rnn")
# outs, pre_hiddens = simple_rnn.forward(var_inp)
# dy_out = outs[3]._numpy()
# outs[3]._backward()
# dy_grad_h2o = simple_rnn._cell._h2o_w._gradient()
# dy_grad_h2h = simple_rnn._cell._h2h_w._gradient()
# dy_grad_i2h = simple_rnn._cell._i2h_w._gradient()
# with new_program_scope():
# inp = fluid.layers.data(
# name="inp", shape=[1, 4, 3], append_batch_size=False)
# simple_rnn = SimpleRNN("simple_rnn")
# outs, pre_hiddens = simple_rnn(inp)
# param_grads = fluid.backward.append_backward(outs[3])
# exe = fluid.Executor(fluid.CPUPlace())
# exe.run(fluid.default_startup_program())
# static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run(
# feed={inp.name: np_inp},
# fetch_list=[
# outs[3].name, param_grads[0][1].name,
# param_grads[1][1].name, param_grads[2][1].name
# ])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))
# self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))
# self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))
class
TestImperativePyLayer
(
unittest
.
TestCase
):
def
test_pylayer_func_id
(
self
):
def
test_pylayer_func_id
(
self
):
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
class
PyLayer1
(
fluid
.
imperative
.
PyLayer
):
class
PyLayer1
(
fluid
.
imperative
.
PyLayer
):
...
@@ -378,6 +280,109 @@ class TestImperativePyLayer(unittest.TestCase):
...
@@ -378,6 +280,109 @@ class TestImperativePyLayer(unittest.TestCase):
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
def
test_layer_in_out
(
self
):
np_inp
=
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
)
with
fluid
.
imperative
.
guard
():
var_inp
=
fluid
.
imperative
.
base
.
to_variable
(
np_inp
)
l
=
MyLayer
(
"my_layer"
)
x
=
l
(
var_inp
)[
0
]
self
.
assertIsNotNone
(
x
)
dy_out
=
x
.
_numpy
()
x
.
_backward
()
dy_grad
=
l
.
_x_for_debug
.
_gradient
()
with
new_program_scope
():
inp
=
fluid
.
layers
.
data
(
name
=
"inp"
,
shape
=
[
3
],
append_batch_size
=
False
)
l
=
MyLayer
(
"my_layer"
)
x
=
l
(
inp
)[
0
]
param_grads
=
fluid
.
backward
.
append_backward
(
x
,
parameter_list
=
[
l
.
_x_for_debug
.
name
])[
0
]
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
static_out
,
static_grad
=
exe
.
run
(
feed
=
{
inp
.
name
:
np_inp
},
fetch_list
=
[
x
.
name
,
param_grads
[
1
].
name
])
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
def
test_mlp
(
self
):
np_inp
=
np
.
array
([[
1.0
,
2.0
],
[
3.0
,
4.0
]],
dtype
=
np
.
float32
)
with
fluid
.
imperative
.
guard
():
var_inp
=
fluid
.
imperative
.
base
.
to_variable
(
np_inp
)
mlp
=
MLP
(
"mlp"
)
out
=
mlp
(
var_inp
)
dy_out
=
out
.
_numpy
()
out
.
_backward
()
dy_grad
=
mlp
.
_fc1
.
_w
.
_gradient
()
with
new_program_scope
():
inp
=
fluid
.
layers
.
data
(
name
=
"inp"
,
shape
=
[
2
,
2
],
append_batch_size
=
False
)
mlp
=
MLP
(
"mlp"
)
out
=
mlp
(
inp
)
param_grads
=
fluid
.
backward
.
append_backward
(
out
,
parameter_list
=
[
mlp
.
_fc1
.
_w
.
name
])[
0
]
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
exe
.
run
(
fluid
.
default_startup_program
())
static_out
,
static_grad
=
exe
.
run
(
feed
=
{
inp
.
name
:
np_inp
},
fetch_list
=
[
out
.
name
,
param_grads
[
1
].
name
])
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
params
=
mlp
.
parameters
(
True
)
self
.
assertEqual
(
"mlp/MLP_0/FC_0_0.w_0"
,
params
[
0
].
name
)
self
.
assertEqual
(
"mlp/MLP_0/FC_0_0.b_0"
,
params
[
1
].
name
)
self
.
assertEqual
(
"mlp/MLP_0/FC_1_0.w_0"
,
params
[
2
].
name
)
self
.
assertEqual
(
"mlp/MLP_0/FC_1_0.b_0"
,
params
[
3
].
name
)
self
.
assertEqual
(
len
(
params
),
4
)
sublayers
=
mlp
.
sublayers
(
True
)
self
.
assertEqual
(
mlp
.
_fc1
,
sublayers
[
0
])
self
.
assertEqual
(
mlp
.
_fc2
,
sublayers
[
1
])
self
.
assertEqual
(
len
(
sublayers
),
2
)
def
test_rnn
(
self
):
np_inp
=
np
.
array
([[
1.0
,
2.0
,
3.0
],
[
4.0
,
5.0
,
6.0
],
[
7.0
,
8.0
,
9.0
],
[
10.0
,
11.0
,
12.0
]])
np_inp
=
np_inp
.
reshape
((
1
,
4
,
3
))
np_inp
=
np_inp
.
astype
(
np
.
float32
)
with
fluid
.
imperative
.
guard
():
var_inp
=
fluid
.
imperative
.
base
.
to_variable
(
np_inp
)
var_inp
=
fluid
.
layers
.
reshape
(
var_inp
,
shape
=
[
1
,
4
,
3
])
simple_rnn
=
SimpleRNN
(
"simple_rnn"
)
outs
,
pre_hiddens
=
simple_rnn
.
forward
(
var_inp
)
dy_out
=
outs
[
3
].
_numpy
()
outs
[
3
].
_backward
()
dy_grad_h2o
=
simple_rnn
.
_cell
.
_h2o_w
.
_gradient
()
dy_grad_h2h
=
simple_rnn
.
_cell
.
_h2h_w
.
_gradient
()
dy_grad_i2h
=
simple_rnn
.
_cell
.
_i2h_w
.
_gradient
()
with
new_program_scope
():
inp
=
fluid
.
layers
.
data
(
name
=
"inp"
,
shape
=
[
1
,
4
,
3
],
append_batch_size
=
False
)
simple_rnn
=
SimpleRNN
(
"simple_rnn"
)
outs
,
pre_hiddens
=
simple_rnn
(
inp
)
param_grads
=
fluid
.
backward
.
append_backward
(
outs
[
3
])
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
static_out
,
static_grad_h2o
,
static_grad_h2h
,
static_grad_i2h
=
exe
.
run
(
feed
=
{
inp
.
name
:
np_inp
},
fetch_list
=
[
outs
[
3
].
name
,
param_grads
[
0
][
1
].
name
,
param_grads
[
1
][
1
].
name
,
param_grads
[
2
][
1
].
name
])
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad_h2o
,
static_grad_h2o
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad_h2h
,
static_grad_h2h
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad_i2h
,
static_grad_i2h
))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录