Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
133f1005
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
133f1005
编写于
12月 29, 2018
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Complete the unittest of optimizers
test=develop
上级
2547f9d1
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
113 addition
and
36 deletion
+113
-36
python/paddle/fluid/imperative/nn.py
python/paddle/fluid/imperative/nn.py
+29
-18
python/paddle/fluid/tests/unittests/test_imperative.py
python/paddle/fluid/tests/unittests/test_imperative.py
+1
-10
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
+83
-8
未找到文件。
python/paddle/fluid/imperative/nn.py
浏览文件 @
133f1005
...
...
@@ -97,17 +97,23 @@ class Conv2D(layers.PyLayer):
persistable
=
True
,
type
=
core
.
VarDesc
.
VarType
.
RAW
)
self
.
_pre_bias
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
)
self
.
_bias_param
=
self
.
_helper
.
create_parameter
(
attr
=
self
.
_helper
.
bias_attr
,
shape
=
[
num_filter_channels
],
dtype
=
self
.
_dtype
,
is_bias
=
True
)
def
forward
(
self
,
input
):
pre_bias
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
)
self
.
_helper
.
append_op
(
type
=
self
.
_l_type
,
inputs
=
{
'Input'
:
input
,
'Filter'
:
self
.
_filter_param
,
},
outputs
=
{
"Output"
:
self
.
_
pre_bias
},
outputs
=
{
"Output"
:
pre_bias
},
attrs
=
{
'strides'
:
self
.
_stride
,
'paddings'
:
self
.
_padding
,
...
...
@@ -117,11 +123,17 @@ class Conv2D(layers.PyLayer):
'use_mkldnn'
:
False
,
})
self
.
_pre_act
=
self
.
_helper
.
append_bias_op
(
self
.
_pre_bias
,
dim_start
=
1
,
dim_end
=
2
)
pre_act
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
)
out
=
self
.
_helper
.
append_activation
(
self
.
_pre_act
)
return
out
self
.
_helper
.
append_op
(
type
=
'elementwise_add'
,
inputs
=
{
'X'
:
[
pre_bias
],
'Y'
:
[
self
.
_bias_param
]},
outputs
=
{
'Out'
:
[
pre_act
]},
attrs
=
{
'axis'
:
1
})
return
self
.
_helper
.
append_activation
(
pre_act
)
class
Pool2D
(
layers
.
PyLayer
):
...
...
@@ -162,14 +174,13 @@ class Pool2D(layers.PyLayer):
self
.
_exclusive
=
exclusive
self
.
_l_type
=
'pool2d'
self
.
_pool_out
=
self
.
_helper
.
create_variable_for_type_inference
(
self
.
_dtype
)
def
forward
(
self
,
input
):
pool_out
=
self
.
_helper
.
create_variable_for_type_inference
(
self
.
_dtype
)
self
.
_helper
.
append_op
(
type
=
self
.
_l_type
,
inputs
=
{
"X"
:
input
},
outputs
=
{
"Out"
:
self
.
_
pool_out
},
outputs
=
{
"Out"
:
pool_out
},
attrs
=
{
"pooling_type"
:
self
.
_pool_type
,
"ksize"
:
self
.
_pool_size
,
...
...
@@ -181,7 +192,7 @@ class Pool2D(layers.PyLayer):
"use_mkldnn"
:
False
,
"exclusive"
:
self
.
_exclusive
,
})
return
self
.
_
pool_out
return
pool_out
class
FC
(
layers
.
PyLayer
):
...
...
@@ -203,8 +214,6 @@ class FC(layers.PyLayer):
shape
=
[
size_in
,
size_out
],
dtype
=
self
.
_dtype
,
is_bias
=
False
)
self
.
_tmp
=
self
.
_helper
.
create_variable_for_type_inference
(
self
.
_dtype
)
self
.
_out
=
self
.
_helper
.
create_variable_for_type_inference
(
self
.
_dtype
)
def
_build_once
(
self
,
input
):
if
self
.
_size_in
!=
-
1
:
...
...
@@ -221,19 +230,21 @@ class FC(layers.PyLayer):
is_bias
=
False
)
def
forward
(
self
,
input
):
tmp
=
self
.
_helper
.
create_variable_for_type_inference
(
self
.
_dtype
)
self
.
_helper
.
append_op
(
type
=
"mul"
,
inputs
=
{
"X"
:
input
,
"Y"
:
self
.
_w
},
outputs
=
{
"Out"
:
self
.
_
tmp
},
outputs
=
{
"Out"
:
tmp
},
attrs
=
{
"x_num_col_dims"
:
self
.
_num_flatten_dims
,
"y_num_col_dims"
:
1
})
out
=
self
.
_helper
.
create_variable_for_type_inference
(
self
.
_dtype
)
self
.
_helper
.
append_op
(
type
=
"sum"
,
inputs
=
{
"X"
:
[
self
.
_
tmp
]},
outputs
=
{
"Out"
:
self
.
_
out
},
inputs
=
{
"X"
:
[
tmp
]},
outputs
=
{
"Out"
:
out
},
attrs
=
{
"use_mkldnn"
:
False
})
return
self
.
_
out
return
out
python/paddle/fluid/tests/unittests/test_imperative.py
浏览文件 @
133f1005
...
...
@@ -19,16 +19,7 @@ import numpy as np
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.layers.nn
import
FC
@
contextlib
.
contextmanager
def
new_program_scope
():
prog
=
fluid
.
Program
()
startup_prog
=
fluid
.
Program
()
scope
=
fluid
.
core
.
Scope
()
with
fluid
.
scope_guard
(
scope
):
with
fluid
.
program_guard
(
prog
,
startup_prog
):
yield
from
test_imperative_base
import
new_program_scope
class
MyLayer
(
fluid
.
imperative
.
PyLayer
):
...
...
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
浏览文件 @
133f1005
...
...
@@ -15,12 +15,15 @@
import
contextlib
import
unittest
import
numpy
as
np
import
six
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.imperative.nn
import
Conv2D
,
Pool2D
,
FC
from
paddle.fluid.imperative.base
import
to_variable
from
test_imperative_base
import
new_program_scope
class
SimpleImgConvPool
(
fluid
.
imperative
.
PyLayer
):
...
...
@@ -97,21 +100,93 @@ class MNIST(fluid.imperative.PyLayer):
class
TestImperativeMnist
(
unittest
.
TestCase
):
def
test_mnist_cpu_float32
(
self
):
seed
=
90
with
fluid
.
imperative
.
guard
():
mnist
=
MNIST
()
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
mnist
=
Conv2D
(
1
,
20
,
5
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
)
dy_param_value
=
{}
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_value
[
param
.
name
]
=
param
.
_numpy
()
for
batch_id
,
data
in
enumerate
(
train_reader
()):
if
batch_id
>=
1
:
break
x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
128
,
1
)
for
i
in
range
(
2
):
x_data
=
np
.
random
.
rand
(
128
,
1
,
28
,
28
).
astype
(
'float32'
)
img
=
to_variable
(
x_data
)
y_data
=
np
.
random
.
rand
(
128
,
1
).
astype
(
'int64'
)
label
=
to_variable
(
y_data
)
label
.
_stop_gradient
=
True
predict
=
mnist
(
img
)
out
=
fluid
.
layers
.
cross_entropy
(
predict
,
label
)
out
.
_backward
()
sgd
.
minimize
(
out
)
cost
=
mnist
(
img
)
loss
=
fluid
.
layers
.
reduce_mean
(
cost
)
dy_out
=
loss
.
_numpy
()
loss
.
_backward
()
sgd
.
minimize
(
loss
)
dy_filter_param
=
mnist
.
_filter_param
.
_numpy
()
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
mnist
=
Conv2D
(
1
,
20
,
5
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
)
img
=
fluid
.
layers
.
data
(
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
cost
=
mnist
(
img
)
loss
=
fluid
.
layers
.
reduce_mean
(
cost
)
sgd
.
minimize
(
loss
)
# initialize params and fetch them
static_param_value
=
{}
static_param_name_list
=
[]
for
param
in
fluid
.
default_startup_program
().
global_block
(
).
all_parameters
():
static_param_name_list
.
append
(
param
.
name
)
out
=
exe
.
run
(
fluid
.
default_startup_program
(),
fetch_list
=
static_param_name_list
)
for
i
in
range
(
len
(
static_param_name_list
)):
static_param_value
[
static_param_name_list
[
i
]]
=
out
[
i
]
for
batch_id
,
data
in
enumerate
(
train_reader
()):
if
batch_id
>=
1
:
break
x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
[
128
,
1
])
static_out
,
static_filter_param
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"pixel"
:
x_data
,
"label"
:
y_data
},
fetch_list
=
[
loss
.
name
,
mnist
.
_filter_param
.
name
])
for
key
,
value
in
six
.
iteritems
(
static_param_value
):
self
.
assertTrue
(
np
.
allclose
(
value
.
all
(),
dy_param_value
[
key
].
all
()))
self
.
assertTrue
(
np
.
allclose
(
static_out
.
all
(),
dy_out
.
all
()))
self
.
assertTrue
(
np
.
allclose
(
static_filter_param
.
all
(),
dy_filter_param
.
all
()))
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录