Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
49a7fba8
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
49a7fba8
编写于
1月 28, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish code
test=develop
上级
159c4073
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
28 addition
and
31 deletion
+28
-31
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+4
-2
python/paddle/fluid/imperative/layers.py
python/paddle/fluid/imperative/layers.py
+1
-2
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
...on/paddle/fluid/tests/unittests/test_imperative_resnet.py
+23
-27
未找到文件。
paddle/fluid/imperative/layer.h
浏览文件 @
49a7fba8
...
...
@@ -141,11 +141,13 @@ class VarBase {
void
RunBackward
();
void
TrackPreOp
(
OpBase
*
pre_op
,
const
std
::
string
&
pre_op_out_name
,
int
pre_op_out_idx
,
bool
stop_gradient
)
{
int
pre_op_out_idx
,
bool
pre_op_
stop_gradient
)
{
pre_op_
=
pre_op
;
pre_op_out_name_
=
pre_op_out_name
;
pre_op_out_idx_
=
pre_op_out_idx
;
stop_gradient_
=
stop_gradient
;
if
(
pre_op_stop_gradient
)
{
stop_gradient_
=
pre_op_stop_gradient
;
}
}
void
ClearGradient
()
{
...
...
python/paddle/fluid/imperative/layers.py
浏览文件 @
49a7fba8
...
...
@@ -51,9 +51,8 @@ class Layer(core.Layer):
return
params
def
clear_gradients
(
self
):
print
([
p
.
name
for
p
in
self
.
parameters
()])
for
p
in
self
.
parameters
():
if
p
.
name
not
in
set
([
'batch_norm_0.w_2'
,
'batch_norm_0.w_1'
])
:
if
not
p
.
_stop_gradient
:
p
.
_clear_gradient
()
def
_build_once
(
self
,
inputs
):
...
...
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
浏览文件 @
49a7fba8
...
...
@@ -168,22 +168,22 @@ class ResNet(fluid.imperative.Layer):
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
#
self.bottleneck_block_list = []
#
num_channels = 64
#
for block in range(len(depth)):
#
shortcut = False
#
for i in range(depth[block]):
#
bottleneck_block = BottleneckBlock(
#
num_channels=num_channels,
#
num_filters=num_filters[block],
#
stride=2 if i == 0 and block != 0 else 1,
#
shortcut=shortcut)
#
num_channels = bottleneck_block._num_channels_out
#
self.bottleneck_block_list.append(bottleneck_block)
#
shortcut = True
#
self.pool2d_avg = Pool2D(
#
pool_size=7, pool_type='avg', global_pooling=True)
self
.
bottleneck_block_list
=
[]
num_channels
=
64
for
block
in
range
(
len
(
depth
)):
shortcut
=
False
for
i
in
range
(
depth
[
block
]):
bottleneck_block
=
BottleneckBlock
(
num_channels
=
num_channels
,
num_filters
=
num_filters
[
block
],
stride
=
2
if
i
==
0
and
block
!=
0
else
1
,
shortcut
=
shortcut
)
num_channels
=
bottleneck_block
.
_num_channels_out
self
.
bottleneck_block_list
.
append
(
bottleneck_block
)
shortcut
=
True
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
import
math
stdv
=
1.0
/
math
.
sqrt
(
2048
*
1.0
)
...
...
@@ -196,9 +196,9 @@ class ResNet(fluid.imperative.Layer):
def
forward
(
self
,
inputs
):
y
=
self
.
conv
(
inputs
)
y
=
self
.
pool2d_max
(
y
)
#
for bottleneck_block in self.bottleneck_block_list:
#
y = bottleneck_block(y)
#
y = self.pool2d_avg(y)
for
bottleneck_block
in
self
.
bottleneck_block_list
:
y
=
bottleneck_block
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
self
.
out
(
y
)
return
y
...
...
@@ -209,7 +209,7 @@ class TestImperativeResnet(unittest.TestCase):
batch_size
=
train_parameters
[
"batch_size"
]
batch_num
=
1
with
fluid
.
imperative
.
guard
(
place
=
fluid
.
CPUPlace
()
):
with
fluid
.
imperative
.
guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
...
...
@@ -275,9 +275,8 @@ class TestImperativeResnet(unittest.TestCase):
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
# exe = fluid.Executor(fluid.CPUPlace(
# ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
resnet
=
ResNet
()
optimizer
=
optimizer_setting
(
train_parameters
)
...
...
@@ -347,7 +346,6 @@ class TestImperativeResnet(unittest.TestCase):
static_grad_value
[
static_grad_name_list
[
i
-
grad_start_pos
]]
=
out
[
i
]
print
(
static_out
,
dy_out
)
self
.
assertTrue
(
np
.
allclose
(
static_out
,
dy_out
))
self
.
assertEqual
(
len
(
dy_param_init_value
),
len
(
static_param_init_value
))
...
...
@@ -358,9 +356,7 @@ class TestImperativeResnet(unittest.TestCase):
self
.
assertEqual
(
len
(
dy_grad_value
),
len
(
static_grad_value
))
for
key
,
value
in
six
.
iteritems
(
static_grad_value
):
if
not
np
.
allclose
(
value
,
dy_grad_value
[
key
]):
print
(
key
)
#self.assertTrue(np.allclose(value, dy_grad_value[key]))
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_grad_value
[
key
]))
self
.
assertTrue
(
np
.
isfinite
(
value
.
all
()))
self
.
assertFalse
(
np
.
isnan
(
value
.
any
()))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录