Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
hapi
提交
44291186
H
hapi
项目概览
PaddlePaddle
/
hapi
通知
11
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
4
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
H
hapi
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
4
Issue
4
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
44291186
编写于
3月 03, 2020
作者:
Q
qingqing01
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
small update
上级
5fd7458d
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
50 addition
and
22 deletion
+50
-22
mnist.py
mnist.py
+28
-17
mnist2.py
mnist2.py
+4
-3
model.py
model.py
+18
-2
未找到文件。
mnist.py
浏览文件 @
44291186
...
...
@@ -76,7 +76,7 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
class
MNIST
(
Model
):
def
__init__
(
self
,
inputs
,
targets
=
None
):
def
__init__
(
self
,
inputs
=
None
,
targets
=
None
):
super
(
MNIST
,
self
).
__init__
(
inputs
,
targets
)
self
.
_simple_img_conv_pool_1
=
SimpleImgConvPool
(
1
,
20
,
5
,
2
,
2
,
act
=
"relu"
)
...
...
@@ -87,12 +87,13 @@ class MNIST(Model):
pool_2_shape
=
50
*
4
*
4
SIZE
=
10
scale
=
(
2.0
/
(
pool_2_shape
**
2
*
SIZE
))
**
0.5
self
.
_fc
=
Linear
(
800
,
10
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
scale
)),
act
=
"softmax"
)
self
.
_fc
=
Linear
(
800
,
10
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
scale
)),
act
=
"softmax"
)
def
forward
(
self
,
inputs
):
x
=
self
.
_simple_img_conv_pool_1
(
inputs
)
...
...
@@ -139,11 +140,14 @@ def main():
device_ids
=
list
(
range
(
FLAGS
.
num_devices
))
with
guard
:
inputs
=
[
Input
([
None
,
1
,
28
,
28
],
'float32'
,
name
=
'image'
)]
labels
=
[
Input
([
None
,
1
],
'int64'
,
name
=
'label'
)]
inputs
=
[
Input
([
None
,
1
,
28
,
28
],
'float32'
,
name
=
'image'
)]
labels
=
[
Input
([
None
,
1
],
'int64'
,
name
=
'label'
)]
model
=
MNIST
(
inputs
,
labels
)
optim
=
Momentum
(
learning_rate
=
FLAGS
.
lr
,
momentum
=
.
9
,
parameter_list
=
model
.
parameters
())
#model = MNIST()
optim
=
Momentum
(
learning_rate
=
FLAGS
.
lr
,
momentum
=
.
9
,
parameter_list
=
model
.
parameters
())
model
.
prepare
(
optim
,
CrossEntropy
())
if
FLAGS
.
resume
is
not
None
:
model
.
load
(
FLAGS
.
resume
)
...
...
@@ -155,8 +159,8 @@ def main():
val_acc
=
0.0
print
(
"======== train epoch {} ========"
.
format
(
e
))
for
idx
,
batch
in
enumerate
(
train_loader
()):
outputs
,
losses
=
model
.
train
(
batch
[
0
],
batch
[
1
],
device
=
'gpu'
,
device_ids
=
device_ids
)
outputs
,
losses
=
model
.
train
(
batch
[
0
],
batch
[
1
],
device
=
'gpu'
,
device_ids
=
device_ids
)
acc
=
accuracy
(
outputs
[
0
],
batch
[
1
])[
0
]
train_loss
+=
np
.
sum
(
losses
)
...
...
@@ -167,8 +171,8 @@ def main():
print
(
"======== eval epoch {} ========"
.
format
(
e
))
for
idx
,
batch
in
enumerate
(
val_loader
()):
outputs
,
losses
=
model
.
eval
(
batch
[
0
],
batch
[
1
],
device
=
'gpu'
,
device_ids
=
device_ids
)
outputs
,
losses
=
model
.
eval
(
batch
[
0
],
batch
[
1
],
device
=
'gpu'
,
device_ids
=
device_ids
)
acc
=
accuracy
(
outputs
[
0
],
batch
[
1
])[
0
]
val_loss
+=
np
.
sum
(
losses
)
...
...
@@ -186,14 +190,21 @@ if __name__ == '__main__':
parser
.
add_argument
(
"-e"
,
"--epoch"
,
default
=
100
,
type
=
int
,
help
=
"number of epoch"
)
parser
.
add_argument
(
'--lr'
,
'--learning-rate'
,
default
=
1e-3
,
type
=
float
,
metavar
=
'LR'
,
'--lr'
,
'--learning-rate'
,
default
=
1e-3
,
type
=
float
,
metavar
=
'LR'
,
help
=
'initial learning rate'
)
parser
.
add_argument
(
"-b"
,
"--batch_size"
,
default
=
128
,
type
=
int
,
help
=
"batch size"
)
parser
.
add_argument
(
"-n"
,
"--num_devices"
,
default
=
4
,
type
=
int
,
help
=
"number of devices"
)
parser
.
add_argument
(
"-r"
,
"--resume"
,
default
=
None
,
type
=
str
,
"-r"
,
"--resume"
,
default
=
None
,
type
=
str
,
help
=
"checkpoint path to resume"
)
FLAGS
=
parser
.
parse_args
()
main
()
mnist2.py
浏览文件 @
44291186
...
...
@@ -76,7 +76,7 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
class
MNIST
(
Model
):
def
__init__
(
self
,
inputs
):
def
__init__
(
self
,
inputs
=
None
):
super
(
MNIST
,
self
).
__init__
(
inputs
)
self
.
_simple_img_conv_pool_1
=
SimpleImgConvPool
(
1
,
20
,
5
,
2
,
2
,
act
=
"relu"
)
...
...
@@ -146,8 +146,9 @@ def main():
with
guard
:
inputs
=
[
Input
(
[
None
,
1
,
28
,
28
],
'float32'
,
name
=
'image'
),
Input
(
[
None
,
1
],
'int64'
,
name
=
'label'
)
[
None
,
1
,
28
,
28
],
'float32'
,
name
=
'image'
),
Input
(
[
None
,
1
],
'int64'
,
name
=
'label'
),
]
model
=
MNIST
(
inputs
)
optim
=
Momentum
(
...
...
model.py
浏览文件 @
44291186
...
...
@@ -41,6 +41,8 @@ class Input(fluid.dygraph.Layer):
def
to_list
(
value
):
if
value
is
None
:
return
value
if
isinstance
(
value
,
(
list
,
tuple
)):
return
value
return
[
value
]
...
...
@@ -443,11 +445,25 @@ class DynamicGraphAdapter(object):
class
Model
(
fluid
.
dygraph
.
Layer
):
"""
FIXME: add more comments and usage
Args:
inputs (Input|list of Input|None): inputs, entry points of network,
could be a Input layer of lits of Input layers, or None.
For static graph, inputs must be set. For dynamic graph, it could
be None.
labels (Input|list of Input|None): labels, entry points of network,
could be a Input layer of lits of Input layers, or None.
For static graph, if set loss_function in Model.prepare(), it
must be set. Otherwise, it could be None.
"""
def
__init__
(
self
,
inputs
=
None
,
labels
=
None
):
super
(
Model
,
self
).
__init__
(
self
.
__class__
.
__name__
)
self
.
mode
=
'train'
self
.
_inputs
=
inputs
self
.
_labels
=
labels
self
.
_inputs
=
to_list
(
inputs
)
self
.
_labels
=
to_list
(
labels
)
self
.
_loss_function
=
None
self
.
_loss_weights
=
None
self
.
_loss
=
None
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录