Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
hapi
提交
a08ac369
H
hapi
项目概览
PaddlePaddle
/
hapi
通知
11
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
4
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
H
hapi
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
4
Issue
4
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a08ac369
编写于
3月 21, 2020
作者:
Q
qingqing01
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix multi-loss
上级
59b986c7
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
77 addition
and
47 deletion
+77
-47
model.py
model.py
+8
-6
progressbar.py
progressbar.py
+25
-21
tests/test_model.py
tests/test_model.py
+44
-20
未找到文件。
model.py
浏览文件 @
a08ac369
...
@@ -18,7 +18,6 @@ import inspect
...
@@ -18,7 +18,6 @@ import inspect
import
os
import
os
import
pickle
import
pickle
import
numpy
as
np
import
numpy
as
np
import
itertools
from
collections
import
Iterable
from
collections
import
Iterable
from
collections
import
OrderedDict
from
collections
import
OrderedDict
...
@@ -742,14 +741,17 @@ class Model(fluid.dygraph.Layer):
...
@@ -742,14 +741,17 @@ class Model(fluid.dygraph.Layer):
else
:
else
:
outs
=
self
.
eval
(
*
data
)
outs
=
self
.
eval
(
*
data
)
metrics
=
list
(
itertools
.
chain
.
from_iterable
(
outs
))
# losses
metrics
=
[
np
.
mean
(
metrics
[
0
])]
loss
=
outs
[
0
]
if
self
.
_metrics
else
outs
metrics
=
[[
l
[
0
]
for
l
in
loss
]]
# metrics
for
metric
in
self
.
_metrics
:
for
metric
in
self
.
_metrics
:
res
=
metric
.
accumulate
()
res
=
metric
.
accumulate
()
metrics
.
extend
(
to_list
(
res
))
metrics
.
extend
(
to_list
(
res
))
assert
len
(
metrics_name
)
==
len
(
metrics
)
assert
len
(
metrics_name
)
==
len
(
metrics
)
for
k
,
v
in
zip
(
metrics_name
,
metrics
):
for
k
,
v
in
zip
(
metrics_name
,
metrics
):
logs
[
k
]
=
np
.
mean
(
v
)
logs
[
k
]
=
v
logs
[
'step'
]
=
step
logs
[
'step'
]
=
step
logs
[
'batch_size'
]
=
data
[
0
].
shape
[
0
]
logs
[
'batch_size'
]
=
data
[
0
].
shape
[
0
]
...
@@ -761,7 +763,7 @@ class Model(fluid.dygraph.Layer):
...
@@ -761,7 +763,7 @@ class Model(fluid.dygraph.Layer):
cbks
.
on_begin
(
'train'
)
cbks
.
on_begin
(
'train'
)
for
epoch
in
range
(
epochs
):
for
epoch
in
range
(
epochs
):
cbks
.
on_epoch_begin
(
epoch
)
cbks
.
on_epoch_begin
(
epoch
)
# FIXME: adapt
e
to DataLoader
# FIXME: adapt to DataLoader
loader
=
train_loader
loader
=
train_loader
if
not
isinstance
(
train_loader
,
Iterable
):
if
not
isinstance
(
train_loader
,
Iterable
):
loader
=
train_loader
()
loader
=
train_loader
()
...
@@ -770,7 +772,7 @@ class Model(fluid.dygraph.Layer):
...
@@ -770,7 +772,7 @@ class Model(fluid.dygraph.Layer):
if
do_eval
and
epoch
%
eval_freq
==
0
:
if
do_eval
and
epoch
%
eval_freq
==
0
:
cbks
.
on_begin
(
'eval'
,
logs
)
cbks
.
on_begin
(
'eval'
,
logs
)
# FIXME: adapt
e
to DataLoader
# FIXME: adapt to DataLoader
loader
=
eval_loader
loader
=
eval_loader
if
not
isinstance
(
eval_loader
,
Iterable
):
if
not
isinstance
(
eval_loader
,
Iterable
):
loader
=
eval_loader
()
loader
=
eval_loader
()
...
...
progressbar.py
浏览文件 @
a08ac369
...
@@ -91,8 +91,10 @@ class ProgressBar(object):
...
@@ -91,8 +91,10 @@ class ProgressBar(object):
self
.
_total_width
=
len
(
bar_chars
)
self
.
_total_width
=
len
(
bar_chars
)
sys
.
stdout
.
write
(
bar_chars
)
sys
.
stdout
.
write
(
bar_chars
)
for
k
,
v
in
values
:
for
k
,
v
al
in
values
:
info
+=
' - %s:'
%
k
info
+=
' - %s:'
%
k
val
=
val
if
isinstance
(
val
,
list
)
else
[
val
]
for
i
,
v
in
enumerate
(
val
):
if
isinstance
(
v
,
(
float
,
np
.
float32
,
np
.
float64
)):
if
isinstance
(
v
,
(
float
,
np
.
float32
,
np
.
float64
)):
if
abs
(
v
)
>
1e-3
:
if
abs
(
v
)
>
1e-3
:
info
+=
' %.4f'
%
v
info
+=
' %.4f'
%
v
...
@@ -136,8 +138,10 @@ class ProgressBar(object):
...
@@ -136,8 +138,10 @@ class ProgressBar(object):
count
=
'step %3d'
%
current_num
count
=
'step %3d'
%
current_num
info
=
count
+
info
info
=
count
+
info
for
k
,
v
in
values
:
for
k
,
v
al
in
values
:
info
+=
' - %s:'
%
k
info
+=
' - %s:'
%
k
val
=
val
if
isinstance
(
val
,
list
)
else
[
val
]
for
v
in
val
:
if
isinstance
(
v
,
(
float
,
np
.
float32
,
np
.
float64
)):
if
isinstance
(
v
,
(
float
,
np
.
float32
,
np
.
float64
)):
if
abs
(
v
)
>
1e-3
:
if
abs
(
v
)
>
1e-3
:
info
+=
' %.4f'
%
v
info
+=
' %.4f'
%
v
...
...
tests/test_model.py
浏览文件 @
a08ac369
...
@@ -24,7 +24,7 @@ import contextlib
...
@@ -24,7 +24,7 @@ import contextlib
import
paddle
import
paddle
from
paddle
import
fluid
from
paddle
import
fluid
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
Linear
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
Linear
from
model
import
Model
,
CrossEntropy
,
Input
from
model
import
Model
,
CrossEntropy
,
Input
,
Loss
from
metrics
import
Accuracy
from
metrics
import
Accuracy
from
callbacks
import
ProgBarLogger
from
callbacks
import
ProgBarLogger
...
@@ -103,47 +103,65 @@ class MNIST(Model):
...
@@ -103,47 +103,65 @@ class MNIST(Model):
return
x
return
x
def
accuracy
(
pred
,
label
,
topk
=
(
1
,
)):
maxk
=
max
(
topk
)
pred
=
np
.
argsort
(
pred
)[:,
::
-
1
][:,
:
maxk
]
correct
=
(
pred
==
np
.
repeat
(
label
,
maxk
,
1
))
batch_size
=
label
.
shape
[
0
]
res
=
[]
for
k
in
topk
:
correct_k
=
correct
[:,
:
k
].
sum
()
res
.
append
(
100.0
*
correct_k
/
batch_size
)
return
res
@
contextlib
.
contextmanager
@
contextlib
.
contextmanager
def
null_guard
():
def
null_guard
():
yield
yield
class
MLP
(
Model
):
def
__init__
(
self
):
super
(
MLP
,
self
).
__init__
()
SIZE
=
10
self
.
_fc1
=
Linear
(
784
,
200
,
act
=
"relu"
)
self
.
_fc2
=
Linear
(
200
,
200
,
act
=
"relu"
)
self
.
_fc3
=
Linear
(
200
,
200
,
act
=
"relu"
)
self
.
_fc4
=
Linear
(
200
,
10
,
act
=
"softmax"
)
self
.
_fc5
=
Linear
(
200
,
10
,
act
=
"softmax"
)
def
forward
(
self
,
inputs
):
x1
=
self
.
_fc1
(
inputs
)
x2
=
self
.
_fc2
(
x1
)
x3
=
self
.
_fc3
(
x2
)
o1
=
self
.
_fc5
(
x3
)
o2
=
self
.
_fc4
(
x2
)
return
o1
,
o2
class
MyCrossEntropy
(
Loss
):
def
__init__
(
self
,
average
=
True
):
super
(
MyCrossEntropy
,
self
).
__init__
()
def
forward
(
self
,
outputs
,
labels
):
loss1
=
fluid
.
layers
.
cross_entropy
(
outputs
[
0
],
labels
[
0
])
loss2
=
fluid
.
layers
.
cross_entropy
(
outputs
[
1
],
labels
[
0
])
return
[
loss1
,
loss2
]
class
TestModel
(
unittest
.
TestCase
):
class
TestModel
(
unittest
.
TestCase
):
def
fit
(
self
,
dynamic
):
def
fit
(
self
,
dynamic
,
is_mlp
=
False
):
im_shape
=
(
-
1
,
784
)
if
is_mlp
else
(
-
1
,
1
,
28
,
28
)
guard
=
fluid
.
dygraph
.
guard
()
if
dynamic
else
null_guard
()
guard
=
fluid
.
dygraph
.
guard
()
if
dynamic
else
null_guard
()
batch_size
=
128
batch_size
=
128
train_loader
=
fluid
.
io
.
xmap_readers
(
train_loader
=
fluid
.
io
.
xmap_readers
(
lambda
b
:
[
np
.
array
([
x
[
0
]
for
x
in
b
]).
reshape
(
-
1
,
1
,
28
,
28
),
lambda
b
:
[
np
.
array
([
x
[
0
]
for
x
in
b
]).
reshape
(
im_shape
),
np
.
array
([
x
[
1
]
for
x
in
b
]).
reshape
(
-
1
,
1
)],
np
.
array
([
x
[
1
]
for
x
in
b
]).
reshape
(
-
1
,
1
)],
paddle
.
batch
(
fluid
.
io
.
shuffle
(
paddle
.
dataset
.
mnist
.
train
(),
6e4
),
paddle
.
batch
(
fluid
.
io
.
shuffle
(
paddle
.
dataset
.
mnist
.
train
(),
6e4
),
batch_size
=
batch_size
,
drop_last
=
True
),
1
,
1
)
batch_size
=
batch_size
,
drop_last
=
True
),
1
,
1
)
val_loader
=
fluid
.
io
.
xmap_readers
(
val_loader
=
fluid
.
io
.
xmap_readers
(
lambda
b
:
[
np
.
array
([
x
[
0
]
for
x
in
b
]).
reshape
(
-
1
,
1
,
28
,
28
),
lambda
b
:
[
np
.
array
([
x
[
0
]
for
x
in
b
]).
reshape
(
im_shape
),
np
.
array
([
x
[
1
]
for
x
in
b
]).
reshape
(
-
1
,
1
)],
np
.
array
([
x
[
1
]
for
x
in
b
]).
reshape
(
-
1
,
1
)],
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
test
(),
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
test
(),
batch_size
=
batch_size
,
drop_last
=
False
),
1
,
1
)
batch_size
=
batch_size
,
drop_last
=
False
),
1
,
1
)
with
guard
:
with
guard
:
inputs
=
[
Input
(
[
None
,
1
,
28
,
28
]
,
'float32'
,
name
=
'image'
)]
inputs
=
[
Input
(
im_shape
,
'float32'
,
name
=
'image'
)]
labels
=
[
Input
([
None
,
1
],
'int64'
,
name
=
'label'
)]
labels
=
[
Input
([
None
,
1
],
'int64'
,
name
=
'label'
)]
model
=
MNIST
()
model
=
MNIST
()
if
not
is_mlp
else
MLP
()
optim
=
fluid
.
optimizer
.
Momentum
(
optim
=
fluid
.
optimizer
.
Momentum
(
learning_rate
=
0.01
,
learning_rate
=
0.01
,
momentum
=
.
9
,
momentum
=
.
9
,
parameter_list
=
model
.
parameters
())
parameter_list
=
model
.
parameters
())
model
.
prepare
(
optim
,
CrossEntropy
(),
Accuracy
(),
inputs
,
labels
)
loss
=
CrossEntropy
()
if
not
is_mlp
else
MyCrossEntropy
()
model
.
prepare
(
optim
,
loss
,
Accuracy
(),
inputs
,
labels
)
cbk
=
ProgBarLogger
(
50
)
cbk
=
ProgBarLogger
(
50
)
model
.
fit
(
train_loader
,
val_loader
,
epochs
=
2
,
callbacks
=
cbk
)
model
.
fit
(
train_loader
,
val_loader
,
epochs
=
2
,
callbacks
=
cbk
)
...
@@ -153,6 +171,12 @@ class TestModel(unittest.TestCase):
...
@@ -153,6 +171,12 @@ class TestModel(unittest.TestCase):
def
test_fit_dygraph
(
self
):
def
test_fit_dygraph
(
self
):
self
.
fit
(
True
)
self
.
fit
(
True
)
def
test_fit_static_multi_loss
(
self
):
self
.
fit
(
False
,
MyCrossEntropy
())
def
test_fit_dygraph_multi_loss
(
self
):
self
.
fit
(
True
,
MyCrossEntropy
())
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录