Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
cce766d7
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cce766d7
编写于
4月 01, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Reverse iterator op's input
test=develop
上级
1a55f7d3
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
209 addition
and
234 deletion
+209
-234
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+5
-17
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+15
-10
python/paddle/fluid/tests/unittests/test_imperative_basic.py
python/paddle/fluid/tests/unittests/test_imperative_basic.py
+189
-207
未找到文件。
paddle/fluid/imperative/layer.cc
浏览文件 @
cce766d7
...
@@ -81,10 +81,6 @@ class TensorAddToFunctor : public boost::static_visitor<> {
...
@@ -81,10 +81,6 @@ class TensorAddToFunctor : public boost::static_visitor<> {
}
// namespace detail
}
// namespace detail
template
<
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenVector
=
framework
::
EigenVector
<
float
,
MajorType
,
IndexType
>
;
void
AddTo
(
Variable
*
src
,
Variable
*
dst
,
platform
::
Place
place
)
{
void
AddTo
(
Variable
*
src
,
Variable
*
dst
,
platform
::
Place
place
)
{
framework
::
Tensor
*
dst_tensor
=
dst
->
GetMutable
<
framework
::
LoDTensor
>
();
framework
::
Tensor
*
dst_tensor
=
dst
->
GetMutable
<
framework
::
LoDTensor
>
();
framework
::
Tensor
*
src_tensor
=
src
->
GetMutable
<
framework
::
LoDTensor
>
();
framework
::
Tensor
*
src_tensor
=
src
->
GetMutable
<
framework
::
LoDTensor
>
();
...
@@ -99,18 +95,10 @@ void AddTo(Variable* src, Variable* dst, platform::Place place) {
...
@@ -99,18 +95,10 @@ void AddTo(Variable* src, Variable* dst, platform::Place place) {
"dst_numel %lld vs. src_numel %lld"
,
dst_tensor
->
numel
(),
"dst_numel %lld vs. src_numel %lld"
,
dst_tensor
->
numel
(),
src_tensor
->
numel
());
src_tensor
->
numel
());
auto
result
=
EigenVector
<>::
Flatten
(
*
dst_tensor
);
detail
::
TensorAddToFunctor
<
float
>
func
(
auto
in_0_e
=
EigenVector
<>::
Flatten
(
*
dst_tensor
);
src_tensor
->
numel
(),
src_tensor
->
data
<
float
>
(),
auto
in_1_e
=
EigenVector
<>::
Flatten
(
*
src_tensor
);
dst_tensor
->
mutable_data
<
float
>
(
place
));
platform
::
DeviceContext
*
dev_ctx
=
boost
::
apply_visitor
(
func
,
place
);
platform
::
DeviceContextPool
::
Instance
().
Get
(
place
);
platform
::
CPUDeviceContext
*
x
=
reinterpret_cast
<
platform
::
CPUDeviceContext
*>
(
dev_ctx
);
result
.
device
(
*
x
->
eigen_device
())
=
in_0_e
+
in_1_e
;
// detail::TensorAddToFunctor<float> func(
// src_tensor->numel(), src_tensor->data<float>(),
// dst_tensor->mutable_data<float>(place));
// boost::apply_visitor(func, place);
}
}
class
Autograd
{
class
Autograd
{
...
@@ -134,7 +122,7 @@ class Autograd {
...
@@ -134,7 +122,7 @@ class Autograd {
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>
input_grads
=
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>
input_grads
=
ready_op
->
ApplyGrad
();
ready_op
->
ApplyGrad
();
for
(
auto
it
:
input_grads
)
{
for
(
auto
it
=
input_grads
.
rbegin
();
it
!=
input_grads
.
rend
();
++
it
)
{
const
std
::
vector
<
VarBase
*>&
ingrads
=
it
.
second
;
const
std
::
vector
<
VarBase
*>&
ingrads
=
it
.
second
;
for
(
int64_t
i
=
ingrads
.
size
()
-
1
;
i
>=
0
;
--
i
)
{
for
(
int64_t
i
=
ingrads
.
size
()
-
1
;
i
>=
0
;
--
i
)
{
if
(
!
ingrads
[
i
])
continue
;
if
(
!
ingrads
[
i
])
continue
;
...
...
python/paddle/fluid/framework.py
浏览文件 @
cce766d7
...
@@ -104,14 +104,14 @@ def cuda_places(device_ids=None):
...
@@ -104,14 +104,14 @@ def cuda_places(device_ids=None):
:code:`FLAGS_selected_gpus=0,1,2`, the returned list would
:code:`FLAGS_selected_gpus=0,1,2`, the returned list would
be [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
be [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
If :code:`FLAGS_selected_gpus` is not set, all visible
If :code:`FLAGS_selected_gpus` is not set, all visible
gpu places would be returned.
gpu places would be returned.
If :code:`device_ids` is not None, it should be the device
If :code:`device_ids` is not None, it should be the device
ids of gpus. For example, if :code:`device_ids=[0,1,2]`,
ids of gpus. For example, if :code:`device_ids=[0,1,2]`,
the returned list would be
the returned list would be
[fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
[fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
Args:
Args:
device_ids (None|list(int)|tuple(int)): gpu device id list.
device_ids (None|list(int)|tuple(int)): gpu device id list.
Returns:
Returns:
...
@@ -133,11 +133,11 @@ def cuda_places(device_ids=None):
...
@@ -133,11 +133,11 @@ def cuda_places(device_ids=None):
def
cpu_places
(
device_count
=
None
):
def
cpu_places
(
device_count
=
None
):
'''
'''
Create a list of :code:`fluid.CPUPlace` objects.
Create a list of :code:`fluid.CPUPlace` objects.
If :code:`device_count` is None, the device count would
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the device count would
If :code:`CPU_NUM` is not set, the device count would
be determined by :code:`multiprocessing.cpu_count()`.
be determined by :code:`multiprocessing.cpu_count()`.
Args:
Args:
device_count (None|int): device number.
device_count (None|int): device number.
...
@@ -155,9 +155,9 @@ def cuda_pinned_places(device_count=None):
...
@@ -155,9 +155,9 @@ def cuda_pinned_places(device_count=None):
Create a list of :code:`fluid.CUDAPinnedPlace` objects.
Create a list of :code:`fluid.CUDAPinnedPlace` objects.
If :code:`device_count` is None, the device count would
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the device count would
If :code:`CPU_NUM` is not set, the device count would
be determined by :code:`multiprocessing.cpu_count()`.
be determined by :code:`multiprocessing.cpu_count()`.
Args:
Args:
device_count (None|int): device number.
device_count (None|int): device number.
...
@@ -2716,6 +2716,11 @@ class Program(object):
...
@@ -2716,6 +2716,11 @@ class Program(object):
# whether the program is optimized by memory_optimize_transpiler
# whether the program is optimized by memory_optimize_transpiler
self
.
__is_mem_optimized
=
False
self
.
__is_mem_optimized
=
False
# if this program has been optimized by distributed optimizer
# fleet_opt will be given a value
self
.
_fleet_opt
=
None
self
.
_program_config
=
None
@
property
@
property
def
_is_mem_optimized
(
self
):
def
_is_mem_optimized
(
self
):
# if the program is optimized, operator input/outputs
# if the program is optimized, operator input/outputs
...
...
python/paddle/fluid/tests/unittests/test_imperative_basic.py
浏览文件 @
cce766d7
...
@@ -51,22 +51,23 @@ class MyPyLayer(fluid.dygraph.PyLayer):
...
@@ -51,22 +51,23 @@ class MyPyLayer(fluid.dygraph.PyLayer):
class
MLP
(
fluid
.
dygraph
.
Layer
):
class
MLP
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
name_scope
):
def
__init__
(
self
,
name_scope
):
super
(
MLP
,
self
).
__init__
(
name_scope
)
super
(
MLP
,
self
).
__init__
(
name_scope
)
self
.
_fc1
=
FC
(
self
.
full_name
(),
3
)
self
.
_fc1
=
FC
(
self
.
full_name
(),
# self._fc2 = FC(self.full_name(),
3
,
# 4)
param_attr
=
fluid
.
ParamAttr
(
# self._fc3 = FC(self.full_name(),
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.1
)),
# 4)
bias_attr
=
fluid
.
ParamAttr
(
self
.
_fc_list
=
[]
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.1
)))
for
i
in
range
(
100
):
self
.
_fc2
=
FC
(
self
.
full_name
(),
fc3
=
FC
(
self
.
full_name
(),
4
)
4
,
self
.
_fc_list
.
append
(
fc3
)
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.1
)),
bias_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.1
)))
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
x
=
self
.
_fc1
(
inputs
)
x
=
self
.
_fc1
(
inputs
)
y1
=
self
.
_fc2
(
x
)
x
=
self
.
_fc2
(
x
)
y2
=
self
.
_fc3
(
x
)
x
=
fluid
.
layers
.
reduce_sum
(
x
)
z
=
fluid
.
layers
.
concat
([
y1
,
y2
])
x
=
fluid
.
layers
.
reduce_sum
(
z
)
return
x
return
x
...
@@ -191,215 +192,196 @@ class SimpleRNN(fluid.dygraph.Layer):
...
@@ -191,215 +192,196 @@ class SimpleRNN(fluid.dygraph.Layer):
class
TestImperative
(
unittest
.
TestCase
):
class
TestImperative
(
unittest
.
TestCase
):
#
def test_sum_op(self):
def
test_sum_op
(
self
):
#
x = np.ones([2, 2], np.float32)
x
=
np
.
ones
([
2
,
2
],
np
.
float32
)
#
with fluid.dygraph.guard():
with
fluid
.
dygraph
.
guard
():
#
inputs = []
inputs
=
[]
#
for _ in range(10):
for
_
in
range
(
10
):
#
inputs.append(fluid.dygraph.base.to_variable(x))
inputs
.
append
(
fluid
.
dygraph
.
base
.
to_variable
(
x
))
#
ret = fluid.layers.sums(inputs)
ret
=
fluid
.
layers
.
sums
(
inputs
)
#
loss = fluid.layers.reduce_sum(ret)
loss
=
fluid
.
layers
.
reduce_sum
(
ret
)
#
loss._backward()
loss
.
_backward
()
#
self.assertTrue(np.allclose(ret._numpy(), x * 10))
self
.
assertTrue
(
np
.
allclose
(
ret
.
_numpy
(),
x
*
10
))
#
self.assertTrue(np.allclose(inputs[0]._gradient(), x))
self
.
assertTrue
(
np
.
allclose
(
inputs
[
0
].
_gradient
(),
x
))
#
def test_layer(self):
def
test_layer
(
self
):
#
with fluid.dygraph.guard():
with
fluid
.
dygraph
.
guard
():
#
cl = core.Layer()
cl
=
core
.
Layer
()
#
cl.forward([])
cl
.
forward
([])
#
l = fluid.dygraph.Layer("l")
l
=
fluid
.
dygraph
.
Layer
(
"l"
)
#
self.assertRaises(NotImplementedError, l.forward, [])
self
.
assertRaises
(
NotImplementedError
,
l
.
forward
,
[])
#
def test_pylayer_func_id(self):
def
test_pylayer_func_id
(
self
):
#
with fluid.dygraph.guard():
with
fluid
.
dygraph
.
guard
():
#
class PyLayer1(fluid.dygraph.PyLayer):
class
PyLayer1
(
fluid
.
dygraph
.
PyLayer
):
#
def __init__(self):
def
__init__
(
self
):
#
super(PyLayer1, self).__init__()
super
(
PyLayer1
,
self
).
__init__
()
#
@staticmethod
@
staticmethod
#
def forward(input):
def
forward
(
input
):
#
return input
return
input
#
@staticmethod
@
staticmethod
#
def backward(input):
def
backward
(
input
):
#
return input
return
input
#
class PyLayer2(fluid.dygraph.PyLayer):
class
PyLayer2
(
fluid
.
dygraph
.
PyLayer
):
#
def __init__(self):
def
__init__
(
self
):
#
super(PyLayer2, self).__init__()
super
(
PyLayer2
,
self
).
__init__
()
#
@staticmethod
@
staticmethod
#
def forward(input):
def
forward
(
input
):
#
return input
return
input
#
@staticmethod
@
staticmethod
#
def backward(input):
def
backward
(
input
):
#
return input
return
input
#
py_layer_1 = PyLayer1()
py_layer_1
=
PyLayer1
()
#
py_layer_2 = PyLayer2()
py_layer_2
=
PyLayer2
()
#
py_layer_1(fluid.dygraph.base.to_variable(np.ones([2, 2])))
py_layer_1
(
fluid
.
dygraph
.
base
.
to_variable
(
np
.
ones
([
2
,
2
])))
#
py_layer_2(fluid.dygraph.base.to_variable(np.ones([2, 2])))
py_layer_2
(
fluid
.
dygraph
.
base
.
to_variable
(
np
.
ones
([
2
,
2
])))
#
id = py_layer_1.forward_id
id
=
py_layer_1
.
forward_id
#
self.assertGreater(id, 0)
self
.
assertGreater
(
id
,
0
)
#
self.assertEqual(py_layer_1.backward_id, id + 1)
self
.
assertEqual
(
py_layer_1
.
backward_id
,
id
+
1
)
#
self.assertEqual(py_layer_2.forward_id, id + 2)
self
.
assertEqual
(
py_layer_2
.
forward_id
,
id
+
2
)
#
self.assertEqual(py_layer_2.backward_id, id + 3)
self
.
assertEqual
(
py_layer_2
.
backward_id
,
id
+
3
)
#
py_layer_1(fluid.dygraph.base.to_variable(np.ones([2, 2])))
py_layer_1
(
fluid
.
dygraph
.
base
.
to_variable
(
np
.
ones
([
2
,
2
])))
#
self.assertEqual(py_layer_1.forward_id, id)
self
.
assertEqual
(
py_layer_1
.
forward_id
,
id
)
#
def test_pylayer(self):
def
test_pylayer
(
self
):
#
np_inp = np.ones([2, 2], np.float32)
np_inp
=
np
.
ones
([
2
,
2
],
np
.
float32
)
#
with fluid.dygraph.guard():
with
fluid
.
dygraph
.
guard
():
#
my_py_layer = MyPyLayer()
my_py_layer
=
MyPyLayer
()
#
var_inp = fluid.dygraph.base.to_variable(np_inp)
var_inp
=
fluid
.
dygraph
.
base
.
to_variable
(
np_inp
)
#
outs = my_py_layer(var_inp)
outs
=
my_py_layer
(
var_inp
)
#
dy_out = np.sum(outs[0]._numpy())
dy_out
=
np
.
sum
(
outs
[
0
].
_numpy
())
#
outs[0]._backward()
outs
[
0
].
_backward
()
#
dy_grad = var_inp._gradient()
dy_grad
=
var_inp
.
_gradient
()
#
with new_program_scope():
with
new_program_scope
():
#
inp = fluid.layers.data(
inp
=
fluid
.
layers
.
data
(
#
name="inp", shape=[2, 2], append_batch_size=False)
name
=
"inp"
,
shape
=
[
2
,
2
],
append_batch_size
=
False
)
#
# TODO(panyx0718): Paddle doesn't diff against data `inp`.
# TODO(panyx0718): Paddle doesn't diff against data `inp`.
#
x1 = inp * 1
x1
=
inp
*
1
#
# TODO(panyx0718): If reduce_sum is skipped, the result is wrong.
# TODO(panyx0718): If reduce_sum is skipped, the result is wrong.
#
x = fluid.layers.reduce_sum(fluid.layers.tanh(x1))
x
=
fluid
.
layers
.
reduce_sum
(
fluid
.
layers
.
tanh
(
x1
))
#
param_grads = fluid.backward.append_backward(
param_grads
=
fluid
.
backward
.
append_backward
(
#
x, parameter_list=[x1.name])[0]
x
,
parameter_list
=
[
x1
.
name
])[
0
]
#
exe = fluid.Executor(fluid.CPUPlace(
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
#
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
#
static_out, static_grad = exe.run(
static_out
,
static_grad
=
exe
.
run
(
#
feed={inp.name: np_inp},
feed
=
{
inp
.
name
:
np_inp
},
#
fetch_list=[x.name, param_grads[1].name])
fetch_list
=
[
x
.
name
,
param_grads
[
1
].
name
])
#
self.assertTrue(np.allclose(dy_out, static_out))
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
#
self.assertTrue(np.allclose(dy_grad, static_grad))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
#
def test_layer_in_out(self):
def
test_layer_in_out
(
self
):
#
np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
np_inp
=
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
)
#
with fluid.dygraph.guard():
with
fluid
.
dygraph
.
guard
():
#
var_inp = fluid.dygraph.base.to_variable(np_inp)
var_inp
=
fluid
.
dygraph
.
base
.
to_variable
(
np_inp
)
#
l = MyLayer("my_layer")
l
=
MyLayer
(
"my_layer"
)
#
x = l(var_inp)[0]
x
=
l
(
var_inp
)[
0
]
#
self.assertIsNotNone(x)
self
.
assertIsNotNone
(
x
)
#
dy_out = x._numpy()
dy_out
=
x
.
_numpy
()
#
x._backward()
x
.
_backward
()
#
dy_grad = l._x_for_debug._gradient()
dy_grad
=
l
.
_x_for_debug
.
_gradient
()
#
with new_program_scope():
with
new_program_scope
():
#
inp = fluid.layers.data(
inp
=
fluid
.
layers
.
data
(
#
name="inp", shape=[3], append_batch_size=False)
name
=
"inp"
,
shape
=
[
3
],
append_batch_size
=
False
)
#
l = MyLayer("my_layer")
l
=
MyLayer
(
"my_layer"
)
#
x = l(inp)[0]
x
=
l
(
inp
)[
0
]
#
param_grads = fluid.backward.append_backward(
param_grads
=
fluid
.
backward
.
append_backward
(
#
x, parameter_list=[l._x_for_debug.name])[0]
x
,
parameter_list
=
[
l
.
_x_for_debug
.
name
])[
0
]
#
exe = fluid.Executor(fluid.CPUPlace(
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
#
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
#
static_out, static_grad = exe.run(
static_out
,
static_grad
=
exe
.
run
(
#
feed={inp.name: np_inp},
feed
=
{
inp
.
name
:
np_inp
},
#
fetch_list=[x.name, param_grads[1].name])
fetch_list
=
[
x
.
name
,
param_grads
[
1
].
name
])
#
self.assertTrue(np.allclose(dy_out, static_out))
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
#
self.assertTrue(np.allclose(dy_grad, static_grad))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
def
test_mlp
(
self
):
def
test_mlp
(
self
):
seed
=
90
np_inp
=
np
.
array
([[
1.0
,
2.0
],
[
3.0
,
4.0
]],
dtype
=
np
.
float32
)
np_inp
=
np
.
array
([[
1.0
,
2.0
],
[
3.0
,
4.0
]],
dtype
=
np
.
float32
)
with
fluid
.
dygraph
.
guard
(
place
=
fluid
.
CPUPlace
()):
with
fluid
.
dygraph
.
guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
var_inp
=
fluid
.
dygraph
.
base
.
to_variable
(
np_inp
)
var_inp
=
fluid
.
dygraph
.
base
.
to_variable
(
np_inp
)
mlp
=
MLP
(
"mlp"
)
mlp
=
MLP
(
"mlp"
)
opt
=
fluid
.
optimizer
.
SGDOptimizer
(
learning_rate
=
0.001
)
out
=
mlp
(
var_inp
)
for
i
in
range
(
100
):
dy_out
=
out
.
_numpy
()
out
=
mlp
(
var_inp
)
out
.
_backward
()
dy_out
=
out
.
_numpy
()
dy_grad
=
mlp
.
_fc1
.
_w
.
_gradient
()
out
.
_backward
()
opt
.
minimize
(
out
)
dy_grad
=
mlp
.
_fc1
.
_w
.
_gradient
()
dy_fc0_w0
=
mlp
.
_fc1
.
_w
.
_numpy
()
mlp
.
clear_gradients
()
with
new_program_scope
():
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
inp
=
fluid
.
layers
.
data
(
inp
=
fluid
.
layers
.
data
(
name
=
"inp"
,
shape
=
[
2
,
2
],
append_batch_size
=
False
)
name
=
"inp"
,
shape
=
[
2
,
2
],
append_batch_size
=
False
)
mlp
=
MLP
(
"mlp"
)
mlp
=
MLP
(
"mlp"
)
out
=
mlp
(
inp
)
out
=
mlp
(
inp
)
opt
=
fluid
.
optimizer
.
SGDOptimizer
(
learning_rate
=
0.001
)
param_grads
=
fluid
.
backward
.
append_backward
(
opt
.
minimize
(
out
)
out
,
parameter_list
=
[
mlp
.
_fc1
.
_w
.
name
])[
0
]
# param_grads = fluid.backward.append_backward(
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
# out, parameter_list=[mlp._fc1._w.name])[0]
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
exe
.
run
(
fluid
.
default_startup_program
())
for
i
in
range
(
100
):
static_out
,
static_grad
=
exe
.
run
(
static_out
,
static_grad
,
static_fc0_w0
=
exe
.
run
(
feed
=
{
inp
.
name
:
np_inp
},
feed
=
{
inp
.
name
:
np_inp
},
fetch_list
=
[
out
.
name
,
param_grads
[
1
].
name
])
fetch_list
=
[
out
.
name
,
"mlp/MLP_0/FC_0.w_0@GRAD"
,
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
"mlp/MLP_0/FC_0.w_0"
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
])
params
=
mlp
.
parameters
(
True
)
self
.
assertEqual
(
"mlp/MLP_0/FC_0.w_0"
,
params
[
0
].
name
)
self
.
assertEqual
(
"mlp/MLP_0/FC_0.b_0"
,
params
[
1
].
name
)
self
.
assertEqual
(
"mlp/MLP_0/FC_1.w_0"
,
params
[
2
].
name
)
self
.
assertEqual
(
"mlp/MLP_0/FC_1.b_0"
,
params
[
3
].
name
)
self
.
assertEqual
(
len
(
params
),
4
)
sublayers
=
mlp
.
sublayers
(
True
)
self
.
assertEqual
(
mlp
.
_fc1
,
sublayers
[
0
])
self
.
assertEqual
(
mlp
.
_fc2
,
sublayers
[
1
])
self
.
assertEqual
(
len
(
sublayers
),
2
)
def
test_rnn
(
self
):
np_inp
=
np
.
array
([[
1.0
,
2.0
,
3.0
],
[
4.0
,
5.0
,
6.0
],
[
7.0
,
8.0
,
9.0
],
[
10.0
,
11.0
,
12.0
]])
np_inp
=
np_inp
.
reshape
((
1
,
4
,
3
))
np_inp
=
np_inp
.
astype
(
np
.
float32
)
with
fluid
.
dygraph
.
guard
():
var_inp
=
fluid
.
dygraph
.
base
.
to_variable
(
np_inp
)
var_inp
=
fluid
.
layers
.
reshape
(
var_inp
,
shape
=
[
1
,
4
,
3
])
simple_rnn
=
SimpleRNN
(
"simple_rnn"
)
outs
,
pre_hiddens
=
simple_rnn
.
forward
(
var_inp
)
dy_out
=
outs
[
3
].
_numpy
()
outs
[
3
].
_backward
()
dy_grad_h2o
=
simple_rnn
.
_cell
.
_h2o_w
.
_gradient
()
dy_grad_h2h
=
simple_rnn
.
_cell
.
_h2h_w
.
_gradient
()
dy_grad_i2h
=
simple_rnn
.
_cell
.
_i2h_w
.
_gradient
()
print
(
dy_out
,
static_out
)
with
new_program_scope
():
inp
=
fluid
.
layers
.
data
(
name
=
"inp"
,
shape
=
[
1
,
4
,
3
],
append_batch_size
=
False
)
simple_rnn
=
SimpleRNN
(
"simple_rnn"
)
outs
,
pre_hiddens
=
simple_rnn
(
inp
)
param_grads
=
fluid
.
backward
.
append_backward
(
outs
[
3
])
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
static_out
,
static_grad_h2o
,
static_grad_h2h
,
static_grad_i2h
=
exe
.
run
(
feed
=
{
inp
.
name
:
np_inp
},
fetch_list
=
[
outs
[
3
].
name
,
param_grads
[
0
][
1
].
name
,
param_grads
[
1
][
1
].
name
,
param_grads
[
2
][
1
].
name
])
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
array_equal
(
dy_grad
,
static_grad
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad_h2o
,
static_grad_h2o
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad_h2h
,
static_grad_h2h
))
print
(
dy_fc0_w0
,
static_fc0_w0
)
self
.
assertTrue
(
np
.
allclose
(
dy_grad_i2h
,
static_grad_i2h
))
#params = mlp.parameters(True)
#self.assertEqual("mlp/MLP_0/FC_0.w_0", params[0].name)
#self.assertEqual("mlp/MLP_0/FC_0.b_0", params[1].name)
#self.assertEqual("mlp/MLP_0/FC_1.w_0", params[2].name)
#self.assertEqual("mlp/MLP_0/FC_1.b_0", params[3].name)
#self.assertEqual(len(params), 4)
#sublayers = mlp.sublayers(True)
#self.assertEqual(mlp._fc1, sublayers[0])
#self.assertEqual(mlp._fc2, sublayers[1])
#self.assertEqual(len(sublayers), 2)
# def test_rnn(self):
# np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],
# [10.0, 11.0, 12.0]])
# np_inp = np_inp.reshape((1, 4, 3))
# np_inp = np_inp.astype(np.float32)
# with fluid.dygraph.guard():
# var_inp = fluid.dygraph.base.to_variable(np_inp)
# var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
# simple_rnn = SimpleRNN("simple_rnn")
# outs, pre_hiddens = simple_rnn.forward(var_inp)
# dy_out = outs[3]._numpy()
# outs[3]._backward()
# dy_grad_h2o = simple_rnn._cell._h2o_w._gradient()
# dy_grad_h2h = simple_rnn._cell._h2h_w._gradient()
# dy_grad_i2h = simple_rnn._cell._i2h_w._gradient()
# with new_program_scope():
# inp = fluid.layers.data(
# name="inp", shape=[1, 4, 3], append_batch_size=False)
# simple_rnn = SimpleRNN("simple_rnn")
# outs, pre_hiddens = simple_rnn(inp)
# param_grads = fluid.backward.append_backward(outs[3])
# exe = fluid.Executor(fluid.CPUPlace())
# exe.run(fluid.default_startup_program())
# static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run(
# feed={inp.name: np_inp},
# fetch_list=[
# outs[3].name, param_grads[0][1].name,
# param_grads[1][1].name, param_grads[2][1].name
# ])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))
# self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))
# self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录