Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
be2d3ae6
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
be2d3ae6
编写于
1月 21, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add debug info
上级
a21f4e38
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
282 addition
and
46 deletion
+282
-46
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+2
-0
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
...on/paddle/fluid/tests/unittests/test_imperative_resnet.py
+280
-46
未找到文件。
python/paddle/fluid/layer_helper.py
浏览文件 @
be2d3ae6
...
...
@@ -437,8 +437,10 @@ class LayerHelper(object):
# NOTE(dzhwinter): some activation support inplace compution.
# NOTE(minqiyang): currently, we don't support inplace in imperative mode
if
not
force_no_inplace
and
core
.
IsInplace
(
act_type
):
print
(
"inplace"
)
tmp
=
input_var
else
:
print
(
"not inplace"
)
tmp
=
self
.
create_variable_for_type_inference
(
dtype
=
input_var
.
dtype
)
self
.
append_op
(
type
=
act_type
,
...
...
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
浏览文件 @
be2d3ae6
...
...
@@ -20,12 +20,13 @@ import six
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.imperative.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
FC
from
paddle.fluid.imperative.base
import
to_variable
from
test_imperative_base
import
new_program_scope
batch_size
=
8
batch_size
=
1
train_parameters
=
{
"input_size"
:
[
3
,
224
,
224
],
"input_mean"
:
[
0.485
,
0.456
,
0.406
],
...
...
@@ -88,11 +89,11 @@ class ConvBNLayer(fluid.imperative.Layer):
act
=
None
,
bias_attr
=
None
)
self
.
_batch_norm
=
BatchNorm
(
num_filters
,
act
=
act
)
#
self._batch_norm = BatchNorm(num_filters, act=act)
def
forward
(
self
,
inputs
):
y
=
self
.
_conv
(
inputs
)
y
=
self
.
_batch_norm
(
y
)
#
y = self._batch_norm(y)
return
y
...
...
@@ -139,7 +140,10 @@ class BottleneckBlock(fluid.imperative.Layer):
else
:
short
=
self
.
short
(
inputs
)
return
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
)
layer_helper
=
LayerHelper
(
'elementwise_add_activation'
,
act
=
'relu'
)
return
layer_helper
.
append_activation
(
y
,
force_no_inplace
=
True
)
class
ResNet
(
fluid
.
imperative
.
Layer
):
...
...
@@ -200,16 +204,233 @@ class ResNet(fluid.imperative.Layer):
class
TestImperativeResnet
(
unittest
.
TestCase
):
def
test_resnet_gpu_float32
(
self
):
# def test_resnet_gpu_float32(self):
# seed = 90
# batch_size = train_parameters["batch_size"]
# with fluid.imperative.guard():
# fluid.default_startup_program().random_seed = seed
# fluid.default_main_program().random_seed = seed
# resnet = ResNet()
# optimizer = optimizer_setting(train_parameters)
# np.random.seed(seed)
# import random
# random.seed = seed
# train_reader = paddle.batch(
# paddle.dataset.flowers.train(use_xmap=False),
# batch_size=batch_size)
# dy_param_init_value = {}
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# dy_param_init_value[param.name] = param._numpy()
# for batch_id, data in enumerate(train_reader()):
# if batch_id >= 1:
# break
# dy_x_data = np.array(
# [x[0].reshape(3, 224, 224) for x in data]).astype('float32')
# y_data = np.array([x[1] for x in data]).astype('int64').reshape(
# batch_size, 1)
# img = to_variable(dy_x_data)
# label = to_variable(y_data)
# label._stop_gradient = True
# out = resnet(img)
# loss = fluid.layers.cross_entropy(input=out, label=label)
# avg_loss = fluid.layers.mean(x=loss)
# dy_out = avg_loss._numpy()
# if batch_id == 0:
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# if param.name not in dy_param_init_value:
# dy_param_init_value[param.name] = param._numpy()
# avg_loss._backward()
# dy_grad_value = {}
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# if not param.stop_gradient:
# np_array = np.array(param._ivar._grad_ivar().value()
# .get_tensor())
# dy_grad_value[param.name + core.grad_var_suffix(
# )] = np_array
# optimizer.minimize(avg_loss)
# dy_param_value = {}
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# dy_param_value[param.name] = param._numpy()
# with new_program_scope():
# fluid.default_startup_program().random_seed = seed
# fluid.default_main_program().random_seed = seed
# exe = fluid.Executor(fluid.CUDAPlace(0))
# resnet = ResNet()
# optimizer = optimizer_setting(train_parameters)
# np.random.seed(seed)
# import random
# random.seed = seed
# train_reader = paddle.batch(
# paddle.dataset.flowers.train(use_xmap=False),
# batch_size=batch_size)
# img = fluid.layers.data(
# name='pixel', shape=[3, 224, 224], dtype='float32')
# label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# out = resnet(img)
# loss = fluid.layers.cross_entropy(input=out, label=label)
# avg_loss = fluid.layers.mean(x=loss)
# optimizer.minimize(avg_loss)
# # initialize params and fetch them
# static_param_init_value = {}
# static_param_name_list = []
# static_grad_name_list = []
# for param in fluid.default_startup_program().global_block(
# ).all_parameters():
# static_param_name_list.append(param.name)
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# if not param.stop_gradient:
# static_grad_name_list.append(param.name +
# core.grad_var_suffix())
# out = exe.run(fluid.default_startup_program(),
# fetch_list=static_param_name_list)
# for i in range(len(static_param_name_list)):
# static_param_init_value[static_param_name_list[i]] = out[i]
# for batch_id, data in enumerate(train_reader()):
# if batch_id >= 1:
# break
# static_x_data = np.array(
# [x[0].reshape(3, 224, 224) for x in data]).astype('float32')
# y_data = np.array([x[1] for x in data]).astype('int64').reshape(
# [batch_size, 1])
# fetch_list = [avg_loss.name]
# fetch_list.extend(static_param_name_list)
# fetch_list.extend(static_grad_name_list)
# out = exe.run(fluid.default_main_program(),
# feed={"pixel": static_x_data,
# "label": y_data},
# fetch_list=fetch_list)
# static_param_value = {}
# static_grad_value = {}
# static_out = out[0]
# param_start_pos = 1
# grad_start_pos = len(static_param_name_list) + param_start_pos
# for i in range(param_start_pos,
# len(static_param_name_list) + param_start_pos):
# static_param_value[static_param_name_list[
# i - param_start_pos]] = out[i]
# for i in range(grad_start_pos,
# len(static_grad_name_list) + grad_start_pos):
# static_grad_value[static_grad_name_list[
# i - grad_start_pos]] = out[i]
# self.assertTrue(np.allclose(static_out, dy_out))
# self.assertEqual(len(dy_param_init_value), len(static_param_init_value))
# for key, value in six.iteritems(static_param_init_value):
# self.assertTrue(np.allclose(value, dy_param_init_value[key]))
# self.assertEqual(len(dy_grad_value), len(static_grad_value))
# # TODO(minqiyang): find a way to align the gradient
# # for key, value in six.iteritems(static_grad_value):
# # self.assertTrue(
# # np.allclose(value, dy_grad_value[key]))
# self.assertEqual(len(dy_param_value), len(static_param_value))
# # for key, value in six.iteritems(static_param_value):
# # self.assertTrue(np.allclose(value, dy_param_value[key]))
def
test_resnet_cpu_float32
(
self
):
seed
=
90
batch_size
=
train_parameters
[
"batch_size"
]
with
fluid
.
imperative
.
guard
():
# with fluid.imperative.guard(device=None):
# fluid.default_startup_program().random_seed = seed
# fluid.default_main_program().random_seed = seed
# resnet = ResNet()
# optimizer = optimizer_setting(train_parameters)
# np.random.seed(seed)
# import random
# random.seed = seed
# train_reader = paddle.batch(
# paddle.dataset.flowers.train(use_xmap=False),
# batch_size=batch_size)
# dy_param_init_value = {}
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# dy_param_init_value[param.name] = param._numpy()
# for batch_id, data in enumerate(train_reader()):
# if batch_id >= 1:
# break
# dy_x_data = np.array(
# [x[0].reshape(3, 224, 224) for x in data]).astype('float32')
# y_data = np.array([x[1] for x in data]).astype('int64').reshape(
# batch_size, 1)
# img = to_variable(dy_x_data)
# label = to_variable(y_data)
# label._stop_gradient = True
# out = resnet(img)
# loss = fluid.layers.cross_entropy(input=out, label=label)
# avg_loss = fluid.layers.mean(x=loss)
# dy_out = avg_loss._numpy()
# if batch_id == 0:
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# if param.name not in dy_param_init_value:
# dy_param_init_value[param.name] = param._numpy()
# avg_loss._backward()
# dy_grad_value = {}
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# if not param.stop_gradient:
# np_array = np.array(param._ivar._grad_ivar().value()
# .get_tensor())
# dy_grad_value[param.name + core.grad_var_suffix(
# )] = np_array
# optimizer.minimize(avg_loss)
# dy_param_value = {}
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# dy_param_value[param.name] = param._numpy()
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
resnet
=
ResNet
()
optimizer
=
optimizer_setting
(
train_parameters
)
np
.
random
.
seed
(
seed
)
import
random
random
.
seed
=
seed
...
...
@@ -217,10 +438,32 @@ class TestImperativeResnet(unittest.TestCase):
paddle
.
dataset
.
flowers
.
train
(
use_xmap
=
False
),
batch_size
=
batch_size
)
img
=
fluid
.
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
224
,
224
],
dtype
=
'float32'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
out
=
resnet
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
input
=
out
,
label
=
label
)
avg_loss
=
fluid
.
layers
.
mean
(
x
=
loss
)
optimizer
.
minimize
(
avg_loss
)
# initialize params and fetch them
dy_param_init_value
=
{}
dy_param_name_list
=
[]
dy_grad_name_list
=
[]
for
param
in
fluid
.
default_startup_program
().
global_block
(
).
all_parameters
():
dy_param_name_list
.
append
(
param
.
name
)
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
if
not
param
.
stop_gradient
:
dy_grad_name_list
.
append
(
param
.
name
+
core
.
grad_var_suffix
(
))
out
=
exe
.
run
(
fluid
.
default_startup_program
(),
fetch_list
=
dy_param_name_list
)
for
i
in
range
(
len
(
dy_param_name_list
)):
dy_param_init_value
[
dy_param_name_list
[
i
]]
=
out
[
i
]
for
batch_id
,
data
in
enumerate
(
train_reader
()):
if
batch_id
>=
1
:
...
...
@@ -229,46 +472,35 @@ class TestImperativeResnet(unittest.TestCase):
dy_x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
3
,
224
,
224
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
batch_size
,
1
)
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
label
.
_stop_gradient
=
True
out
=
resnet
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
input
=
out
,
label
=
label
)
avg_loss
=
fluid
.
layers
.
mean
(
x
=
loss
)
dy_out
=
avg_loss
.
_numpy
()
if
batch_id
==
0
:
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
if
param
.
name
not
in
dy_param_init_value
:
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
avg_loss
.
_backward
()
dy_grad_value
=
{}
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
if
not
param
.
stop_gradient
:
np_array
=
np
.
array
(
param
.
_ivar
.
_grad_ivar
().
value
()
.
get_tensor
())
dy_grad_value
[
param
.
name
+
core
.
grad_var_suffix
(
)]
=
np_array
[
batch_size
,
1
])
optimizer
.
minimize
(
avg_loss
)
fetch_list
=
[
avg_loss
.
name
]
fetch_list
.
extend
(
dy_param_name_list
)
fetch_list
.
extend
(
dy_grad_name_list
)
out
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"pixel"
:
dy_x_data
,
"label"
:
y_data
},
fetch_list
=
fetch_list
)
dy_param_value
=
{}
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_value
[
param
.
name
]
=
param
.
_numpy
()
dy_grad_value
=
{}
dy_out
=
out
[
0
]
param_start_pos
=
1
grad_start_pos
=
len
(
dy_param_name_list
)
+
param_start_pos
for
i
in
range
(
param_start_pos
,
len
(
dy_param_name_list
)
+
param_start_pos
):
dy_param_value
[
dy_param_name_list
[
i
-
param_start_pos
]]
=
out
[
i
]
for
i
in
range
(
grad_start_pos
,
len
(
dy_grad_name_list
)
+
grad_start_pos
):
dy_grad_value
[
dy_grad_name_list
[
i
-
grad_start_pos
]]
=
out
[
i
]
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
exe
=
fluid
.
Executor
(
fluid
.
C
UDAPlace
(
0
))
exe
=
fluid
.
Executor
(
fluid
.
C
PUPlace
(
))
resnet
=
ResNet
()
optimizer
=
optimizer_setting
(
train_parameters
)
...
...
@@ -345,15 +577,17 @@ class TestImperativeResnet(unittest.TestCase):
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_init_value
[
key
]))
self
.
assertEqual
(
len
(
dy_grad_value
),
len
(
static_grad_value
))
# TODO(minqiyang): find a way to align the gradient
# for key, value in six.iteritems(static_grad_value):
# self.assertTrue(
# np.allclose(value, dy_grad_value[key]))
for
key
,
value
in
six
.
iteritems
(
static_grad_value
):
if
not
np
.
allclose
(
value
,
dy_grad_value
[
key
]):
# print(key, value, dy_grad_value[key])
print
(
key
)
# self.assertTrue(
# np.allclose(value, dy_grad_value[key]))
self
.
assertEqual
(
len
(
dy_param_value
),
len
(
static_param_value
))
#
for key, value in six.iteritems(static_param_value):
# self.assertTrue(np.allclose(value, dy_param_value[key]))
for
key
,
value
in
six
.
iteritems
(
static_param_value
):
print
(
key
)
# self.assertTrue(np.allclose(value, dy_param_value[key]))
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录