Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
hapi
提交
37a67272
H
hapi
项目概览
PaddlePaddle
/
hapi
通知
11
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
4
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
H
hapi
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
4
Issue
4
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
37a67272
编写于
4月 02, 2020
作者:
Q
qingqing01
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' of
https://github.com/PaddlePaddle/hapi
into cyclegan
上级
b969c14d
7d1ea67d
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
324 addition
and
95 deletion
+324
-95
callbacks.py
callbacks.py
+6
-0
distributed.py
distributed.py
+36
-14
model.py
model.py
+248
-79
progressbar.py
progressbar.py
+2
-2
tests/test_model.py
tests/test_model.py
+32
-0
未找到文件。
callbacks.py
浏览文件 @
37a67272
...
...
@@ -242,6 +242,12 @@ class ProgBarLogger(Callback):
samples
=
logs
.
get
(
'batch_size'
,
1
)
self
.
evaled_samples
+=
samples
if
self
.
eval_step
%
self
.
log_freq
==
0
and
self
.
verbose
and
ParallelEnv
(
).
local_rank
==
0
:
# if steps is not None, last step will update in on_epoch_end
if
self
.
eval_steps
and
self
.
eval_step
<
self
.
eval_steps
:
self
.
_updates
(
logs
,
'eval'
)
def
on_eval_end
(
self
,
logs
=
None
):
logs
=
logs
or
{}
if
self
.
verbose
and
ParallelEnv
().
local_rank
==
0
:
...
...
distributed.py
浏览文件 @
37a67272
...
...
@@ -25,7 +25,6 @@ from paddle.fluid.layers import collective
from
paddle.fluid.dygraph.parallel
import
ParallelEnv
,
ParallelStrategy
from
paddle.fluid.io
import
BatchSampler
_parallel_context_initialized
=
False
...
...
@@ -67,7 +66,8 @@ class DistributedBatchSampler(BatchSampler):
self
.
nranks
=
ParallelEnv
().
nranks
self
.
local_rank
=
ParallelEnv
().
local_rank
self
.
epoch
=
0
self
.
num_samples
=
int
(
math
.
ceil
(
len
(
self
.
dataset
)
*
1.0
/
self
.
nranks
))
self
.
num_samples
=
int
(
math
.
ceil
(
len
(
self
.
dataset
)
*
1.0
/
self
.
nranks
))
self
.
total_size
=
self
.
num_samples
*
self
.
nranks
def
__iter__
(
self
):
...
...
@@ -78,9 +78,28 @@ class DistributedBatchSampler(BatchSampler):
if
self
.
shuffle
:
np
.
random
.
RandomState
(
self
.
epoch
).
shuffle
(
indices
)
self
.
epoch
+=
1
# subsample
indices
=
indices
[
self
.
local_rank
*
self
.
num_samples
:
(
self
.
local_rank
+
1
)
*
self
.
num_samples
]
def
_get_indices_by_batch_size
(
indices
):
subsampled_indices
=
[]
last_batch_size
=
self
.
total_size
%
(
self
.
batch_size
*
self
.
nranks
)
assert
last_batch_size
%
self
.
nranks
==
0
last_local_batch_size
=
last_batch_size
//
self
.
nranks
for
i
in
range
(
self
.
local_rank
*
self
.
batch_size
,
len
(
indices
)
-
last_batch_size
,
self
.
batch_size
*
self
.
nranks
):
subsampled_indices
.
extend
(
indices
[
i
:
i
+
self
.
batch_size
])
indices
=
indices
[
len
(
indices
)
-
last_batch_size
:]
subsampled_indices
.
extend
(
indices
[
self
.
local_rank
*
last_local_batch_size
:(
self
.
local_rank
+
1
)
*
last_local_batch_size
])
return
subsampled_indices
if
self
.
nranks
>
1
:
indices
=
_get_indices_by_batch_size
(
indices
)
assert
len
(
indices
)
==
self
.
num_samples
_sample_iter
=
iter
(
indices
)
...
...
@@ -103,7 +122,8 @@ class DistributedBatchSampler(BatchSampler):
def
_all_gather
(
x
,
nranks
,
ring_id
=
0
,
use_calc_stream
=
True
):
return
collective
.
_c_allgather
(
x
,
nranks
,
ring_id
=
ring_id
,
use_calc_stream
=
use_calc_stream
)
return
collective
.
_c_allgather
(
x
,
nranks
,
ring_id
=
ring_id
,
use_calc_stream
=
use_calc_stream
)
def
wait_server_ready
(
endpoints
):
...
...
@@ -114,8 +134,7 @@ def wait_server_ready(endpoints):
for
ep
in
endpoints
:
ip_port
=
ep
.
split
(
":"
)
with
contextlib
.
closing
(
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
))
as
sock
:
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
))
as
sock
:
sock
.
settimeout
(
2
)
result
=
sock
.
connect_ex
((
ip_port
[
0
],
int
(
ip_port
[
1
])))
if
result
!=
0
:
...
...
@@ -127,8 +146,8 @@ def wait_server_ready(endpoints):
break
def
init_communicator
(
program
,
rank
,
nranks
,
wait_port
,
current_endpoint
,
endpoints
):
def
init_communicator
(
program
,
rank
,
nranks
,
wait_port
,
current_endpoint
,
endpoints
):
if
nranks
<
2
:
return
other_endpoints
=
endpoints
[:]
...
...
@@ -178,11 +197,14 @@ def prepare_distributed_context(place=None):
global
_parallel_context_initialized
if
not
_parallel_context_initialized
and
isinstance
(
place
,
fluid
.
CUDAPlace
):
if
not
_parallel_context_initialized
and
isinstance
(
place
,
fluid
.
CUDAPlace
):
def
_init_context
():
communicator_prog
=
fluid
.
Program
()
init_communicator
(
communicator_prog
,
strategy
.
local_rank
,
strategy
.
nranks
,
True
,
strategy
.
current_endpoint
,
strategy
.
trainer_endpoints
)
init_communicator
(
communicator_prog
,
strategy
.
local_rank
,
strategy
.
nranks
,
True
,
strategy
.
current_endpoint
,
strategy
.
trainer_endpoints
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
communicator_prog
)
...
...
model.py
浏览文件 @
37a67272
...
...
@@ -20,6 +20,7 @@ import pickle
import
numpy
as
np
import
six
import
warnings
import
tqdm
from
collections
import
Iterable
from
paddle
import
fluid
...
...
@@ -28,6 +29,7 @@ from paddle.fluid.executor import global_scope
from
paddle.fluid.io
import
is_belong_to_optimizer
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid.dygraph.parallel
import
ParallelEnv
from
paddle.fluid.layers.utils
import
flatten
from
paddle.fluid.incubate.fleet.collective
import
fleet
,
DistributedStrategy
from
paddle.fluid.incubate.fleet.base
import
role_maker
from
paddle.fluid.io
import
DataLoader
,
Dataset
...
...
@@ -413,12 +415,6 @@ class StaticGraphAdapter(object):
losses
=
[]
metrics
=
[]
with
fluid
.
program_guard
(
prog
,
self
.
_startup_prog
):
if
isinstance
(
self
.
model
.
_inputs
,
dict
):
ins
=
[
self
.
model
.
_inputs
[
n
]
for
n
in
extract_args
(
self
.
model
.
forward
)
if
n
!=
'self'
]
else
:
ins
=
self
.
model
.
_inputs
lbls
=
self
.
model
.
_labels
if
self
.
model
.
_labels
else
[]
inputs
=
[
k
.
forward
()
for
k
in
to_list
(
ins
)]
...
...
@@ -587,10 +583,8 @@ class DynamicGraphAdapter(object):
samples
=
outputs
[
0
].
shape
[
0
]
current_count
=
self
.
_merge_count
.
get
(
self
.
mode
+
'_total'
,
0
)
if
current_count
+
samples
>=
total_size
:
outputs
=
[
o
[:
total_size
-
metric
.
count
[
0
]]
for
o
in
outputs
]
labels
=
[
l
[:
total_size
-
metric
.
count
[
0
]]
for
l
in
labels
]
outputs
=
[
o
[:
total_size
-
current_count
]
for
o
in
outputs
]
labels
=
[
l
[:
total_size
-
current_count
]
for
l
in
labels
]
self
.
_merge_count
[
self
.
mode
+
'_total'
]
=
0
self
.
_merge_count
[
self
.
mode
+
'_batch'
]
=
total_size
-
current_count
...
...
@@ -612,8 +606,9 @@ class DynamicGraphAdapter(object):
self
.
mode
=
'test'
inputs
=
[
to_variable
(
x
)
for
x
in
to_list
(
inputs
)]
outputs
=
self
.
model
.
forward
(
*
inputs
)
if
self
.
_nranks
>
2
:
if
self
.
_nranks
>
1
and
isinstance
(
self
.
model
.
_place
,
fluid
.
CUDAPlace
)
:
outputs
=
[
_all_gather
(
o
,
self
.
_nranks
)
for
o
in
to_list
(
outputs
)]
return
[
to_numpy
(
o
)
for
o
in
to_list
(
outputs
)]
def
parameters
(
self
,
*
args
,
**
kwargs
):
...
...
@@ -696,7 +691,6 @@ class Model(fluid.dygraph.Layer):
self
.
_loss_weights
=
None
self
.
_optimizer
=
None
self
.
_device
=
None
self
.
_device_ids
=
None
self
.
_optimizer
=
None
self
.
_test_dataloader
=
None
...
...
@@ -794,8 +788,7 @@ class Model(fluid.dygraph.Layer):
metrics
=
None
,
inputs
=
None
,
labels
=
None
,
device
=
None
,
device_ids
=
None
):
device
=
None
):
"""
FIXME: add comments
Args:
...
...
@@ -818,17 +811,6 @@ class Model(fluid.dygraph.Layer):
device (str|None): specify device type, 'CPU' or 'GPU'.
If None, automatically select device according to
installation package version.
device_ids (list[int]|None): specify device index. If None,
the available device will be obtained from the environment
variable when the model is executed: If the GPU is used, the
currently available device ID is obtained from the environment
variable FLAGS_selected_gpus or CUDA_VISIBLE_DEVICES when the
model is executed; CPU, when the model is executed,
the currently available CPU number is obtained from the
environment variable CPU_NUM. For example, export CPU_NUM=4,
if the environment variable is not set, the executor will add
the variable to the environment variable and set its value to 1.
The default is None.
"""
if
isinstance
(
device
,
fluid
.
CUDAPlace
)
or
\
...
...
@@ -878,8 +860,10 @@ class Model(fluid.dygraph.Layer):
metric
.
__class__
.
__name__
)
self
.
_metrics
=
to_list
(
metrics
)
self
.
_inputs
=
inputs
self
.
_labels
=
labels
self
.
_inputs
=
to_list
(
inputs
)
if
not
isinstance
(
inputs
,
dict
)
else
[
inputs
[
n
]
for
n
in
extract_args
(
self
.
forward
)
if
n
!=
'self'
]
self
.
_labels
=
to_list
(
labels
)
if
not
in_dygraph_mode
():
self
.
_adapter
.
prepare
()
...
...
@@ -916,7 +900,7 @@ class Model(fluid.dygraph.Layer):
eval_freq (int): The frequency, in number of epochs, an evalutation
is performed.
log_freq (int): The frequency, in number of steps, the training logs
is
printed.
are
printed.
save_dir(str|None): The directory to save checkpoint during training.
If None, will not save checkpoint.
save_freq (int): The frequency, in number of epochs, to save checkpoint.
...
...
@@ -989,25 +973,226 @@ class Model(fluid.dygraph.Layer):
verbose
=
verbose
,
metrics
=
self
.
_metrics_name
(),
)
def
_run_one_epoch
(
data_loader
,
callbacks
,
mode
):
size
=
len
(
data_loader
)
if
hasattr
(
data_loader
,
cbks
.
on_begin
(
'train'
)
for
epoch
in
range
(
epochs
):
# FIXME: adapt to DataLoader
loader
=
train_loader
if
not
isinstance
(
train_loader
,
Iterable
):
loader
=
train_loader
()
logs
=
self
.
_run_one_epoch
(
loader
,
cbks
,
'train'
,
metrics_name
,
epoch
=
epoch
)
if
do_eval
and
epoch
%
eval_freq
==
0
:
# FIXME: adapt to DataLoader
loader
=
eval_loader
if
not
isinstance
(
eval_loader
,
Iterable
):
loader
=
eval_loader
()
eval_steps
=
len
(
loader
)
if
hasattr
(
loader
,
'__len__'
)
else
None
cbks
.
on_begin
(
'eval'
,
{
'steps'
:
eval_steps
,
'metrics_name'
:
metrics_name
})
logs
=
self
.
_run_one_epoch
(
loader
,
cbks
,
'eval'
,
metrics_name
)
cbks
.
on_end
(
'eval'
,
logs
)
cbks
.
on_end
(
'train'
,
logs
)
self
.
_test_dataloader
=
None
def
evaluate
(
self
,
eval_data
,
batch_size
=
1
,
log_freq
=
10
,
verbose
=
2
,
num_workers
=
0
,
callbacks
=
None
,
):
"""
FIXME: add more comments and usage
Args:
eval_data (Dataset|DataLoader): An iterable data loader is used for
evaluation. An instance of paddle.fluid.io.Dataset or
paddle.fluid.io.Dataloader is recomended.
batch_size (int): Integer number. The batch size of train_data and eval_data.
When train_data and eval_data are both the instance of Dataloader, this
parameter will be ignored.
log_freq (int): The frequency, in number of steps, the eval logs
are printed.
verbose (int): The verbosity mode, should be 0, 1, or 2.
0 = silent, 1 = progress bar, 2 = one line per epoch.
num_workers (int): The number of subprocess to load data, 0 for no subprocess
used and loading data in main process. When train_data and eval_data are
both the instance of Dataloader, this parameter will be ignored.
callbacks (Callback|None): A list of `Callback` instances to apply
during training. If None, `ProgBarLogger` and `ModelCheckpoint`
are automatically inserted.
"""
if
fluid
.
in_dygraph_mode
():
feed_list
=
None
else
:
feed_list
=
[
x
.
forward
()
for
x
in
self
.
_inputs
+
self
.
_labels
]
if
eval_data
is
not
None
and
isinstance
(
eval_data
,
Dataset
):
eval_sampler
=
DistributedBatchSampler
(
eval_data
,
batch_size
=
batch_size
)
eval_loader
=
DataLoader
(
eval_data
,
batch_sampler
=
eval_sampler
,
places
=
self
.
_place
,
feed_list
=
feed_list
,
num_workers
=
num_workers
,
return_list
=
True
)
else
:
eval_loader
=
eval_data
self
.
_test_dataloader
=
eval_loader
metrics_name
=
self
.
_metrics_name
()
cbks
=
config_callbacks
(
callbacks
,
model
=
self
,
log_freq
=
log_freq
,
verbose
=
verbose
,
metrics
=
self
.
_metrics_name
(),
)
loader
=
eval_loader
if
not
isinstance
(
eval_loader
,
Iterable
):
loader
=
eval_loader
()
eval_steps
=
len
(
loader
)
if
hasattr
(
loader
,
'__len__'
)
else
None
cbks
.
on_begin
(
'eval'
,
{
'steps'
:
eval_steps
,
'metrics_name'
:
metrics_name
})
logs
=
self
.
_run_one_epoch
(
loader
,
cbks
,
'eval'
,
metrics_name
)
cbks
.
on_end
(
'eval'
,
logs
)
self
.
_test_dataloader
=
None
eval_result
=
{}
for
k
in
self
.
_metrics_name
():
eval_result
[
k
]
=
logs
[
k
]
return
eval_result
def
predict
(
self
,
test_data
,
batch_size
=
1
,
num_workers
=
0
):
"""
FIXME: add more comments and usage
Args:
test_data (Dataset|DataLoader): An iterable data loader is used for
predict. An instance of paddle.fluid.io.Dataset or paddle.fluid.io.Dataloader
is recomended.
batch_size (int): Integer number. The batch size of train_data and eval_data.
When train_data and eval_data are both the instance of Dataloader, this
parameter will be ignored.
num_workers (int): the number of subprocess to load data, 0 for no subprocess
used and loading data in main process. When train_data and eval_data are
both the instance of Dataloader, this parameter will be ignored.
"""
if
fluid
.
in_dygraph_mode
():
feed_list
=
None
else
:
feed_list
=
[
x
.
forward
()
for
x
in
self
.
_inputs
+
self
.
_labels
]
if
test_data
is
not
None
and
isinstance
(
test_data
,
Dataset
):
test_sampler
=
DistributedBatchSampler
(
test_data
,
batch_size
=
batch_size
)
test_loader
=
DataLoader
(
test_data
,
batch_sampler
=
test_sampler
,
places
=
self
.
_place
,
feed_list
=
feed_list
,
num_workers
=
num_workers
,
return_list
=
True
)
else
:
test_loader
=
test_data
self
.
_test_dataloader
=
test_loader
loader
=
test_loader
if
not
isinstance
(
test_loader
,
Iterable
):
loader
=
test_loader
()
outputs
=
None
for
data
in
tqdm
.
tqdm
(
loader
):
if
not
fluid
.
in_dygraph_mode
():
data
=
data
[
0
]
outs
=
self
.
test
(
*
data
)
if
outputs
is
None
:
outputs
=
outs
else
:
outputs
=
[
np
.
vstack
([
x
,
outs
[
i
]])
for
i
,
x
in
enumerate
(
outputs
)
]
self
.
_test_dataloader
=
None
if
test_loader
is
not
None
and
self
.
_adapter
.
_nranks
>
1
\
and
isinstance
(
test_loader
,
DataLoader
):
outputs
=
[
o
[:
len
(
test_loader
.
dataset
)]
for
o
in
outputs
]
return
outputs
def
set_eval_data
(
self
,
eval_data
):
"""
Args:
eval_data (Dataset|DataLoader|None): An iterable data loader is used for
eval. An instance of paddle.fluid.io.Dataset or
paddle.fluid.io.Dataloader is recomended.
"""
assert
isinstance
(
eval_data
,
DataLoader
),
"eval_data must be a instance of Dataloader!"
self
.
_test_dataloader
=
eval_data
def
_run_one_epoch
(
self
,
data_loader
,
callbacks
,
mode
,
metrics_name
,
epoch
=
None
):
size
=
len
(
data_loader
)
if
hasattr
(
data_loader
,
'__len__'
)
else
None
logs
=
{
'steps'
:
size
,
'metrics_name'
:
metrics_name
,
}
for
step
,
data
in
enumerate
(
data_loader
):
if
not
fluid
.
in_dygraph_mode
():
data
=
data
[
0
]
batch_size
=
data
[
0
].
shape
()[
0
]
else
:
batch_size
=
data
[
0
].
shape
[
0
]
cbks
.
on_batch_begin
(
mode
,
step
,
logs
)
if
mode
==
'train'
:
outs
=
self
.
train
(
*
data
)
assert
epoch
is
not
None
,
'when mode is train, epoch must be given'
callbacks
.
on_epoch_begin
(
epoch
)
for
step
,
data
in
enumerate
(
data_loader
):
# data might come from different types of data_loader and have
# different format, as following:
# 1. DataLoader in static graph:
# [[input1, input2, ..., label1, lable2, ...]]
# 2. DataLoader in dygraph
# [input1, input2, ..., label1, lable2, ...]
# 3. custumed iterator yield concated inputs and labels:
# [input1, input2, ..., label1, lable2, ...]
# 4. custumed iterator yield seperated inputs and labels:
# ([input1, input2, ...], [label1, lable2, ...])
# To handle all of these, flatten (nested) list to list.
data
=
flatten
(
data
)
# LoDTensor.shape is callable, where LoDTensor comes from
# DataLoader in static graph
batch_size
=
data
[
0
].
shape
()[
0
]
if
callable
(
data
[
0
].
shape
)
else
data
[
0
].
shape
[
0
]
callbacks
.
on_batch_begin
(
mode
,
step
,
logs
)
if
mode
==
'train'
:
outs
=
self
.
train
(
data
[:
len
(
self
.
_inputs
)],
data
[
len
(
self
.
_inputs
):])
else
:
outs
=
self
.
eval
(
*
data
)
outs
=
self
.
eval
(
data
[:
len
(
self
.
_inputs
)],
data
[
len
(
self
.
_inputs
):])
# losses
loss
=
outs
[
0
]
if
self
.
_metrics
else
outs
...
...
@@ -1030,30 +1215,14 @@ class Model(fluid.dygraph.Layer):
logs
[
'batch_size'
]
=
self
.
_adapter
.
_merge_count
[
mode
+
'_batch'
]
cb
ks
.
on_batch_end
(
mode
,
step
,
logs
)
callbac
ks
.
on_batch_end
(
mode
,
step
,
logs
)
self
.
_reset_metrics
()
return
logs
cbks
.
on_begin
(
'train'
)
for
epoch
in
range
(
epochs
):
cbks
.
on_epoch_begin
(
epoch
)
# FIXME: adapt to DataLoader
loader
=
train_loader
if
not
isinstance
(
train_loader
,
Iterable
):
loader
=
train_loader
()
logs
=
_run_one_epoch
(
loader
,
cbks
,
'train'
)
cbks
.
on_epoch_end
(
epoch
,
logs
)
if
do_eval
and
epoch
%
eval_freq
==
0
:
cbks
.
on_begin
(
'eval'
,
logs
)
# FIXME: adapt to DataLoader
loader
=
eval_loader
if
not
isinstance
(
eval_loader
,
Iterable
):
loader
=
eval_loader
()
logs
=
_run_one_epoch
(
loader
,
cbks
,
'eval'
)
cbks
.
on_end
(
'eval'
,
logs
)
if
mode
==
'train'
:
assert
epoch
is
not
None
,
'when mode is train, epoch must be given'
callbacks
.
on_epoch_end
(
epoch
)
cbks
.
on_end
(
'train'
,
logs
)
return
logs
def
_reset_metrics
(
self
):
for
metric
in
self
.
_metrics
:
...
...
progressbar.py
浏览文件 @
37a67272
...
...
@@ -148,7 +148,7 @@ class ProgressBar(object):
else
:
info
+=
' %.4e'
%
v
elif
isinstance
(
v
,
np
.
ndarray
)
and
\
isinstance
(
v
.
size
,
1
)
and
\
v
.
size
==
1
and
\
isinstance
(
v
.
dtype
,
(
np
.
float32
,
np
.
float64
)):
if
abs
(
v
[
0
])
>
1e-3
:
info
+=
' %.4f'
%
v
[
0
]
...
...
tests/test_model.py
浏览文件 @
37a67272
...
...
@@ -139,6 +139,26 @@ class MyCrossEntropy(Loss):
return
[
loss1
,
loss2
]
class
TestMnistDataset
(
MnistDataset
):
def
__init__
(
self
):
super
(
TestMnistDataset
,
self
).
__init__
(
mode
=
'test'
)
def
__getitem__
(
self
,
idx
):
return
self
.
images
[
idx
],
def
__len__
(
self
):
return
len
(
self
.
images
)
def
get_predict_accuracy
(
pred
,
gt
):
pred
=
np
.
argmax
(
pred
,
-
1
)
gt
=
np
.
array
(
gt
)
correct
=
pred
[:,
np
.
newaxis
]
==
gt
return
np
.
sum
(
correct
)
/
correct
.
shape
[
0
]
class
TestModel
(
unittest
.
TestCase
):
def
fit
(
self
,
dynamic
,
is_mlp
=
False
):
device
=
set_device
(
'gpu'
)
...
...
@@ -152,6 +172,7 @@ class TestModel(unittest.TestCase):
train_dataset
=
MnistDataset
(
mode
=
'train'
)
val_dataset
=
MnistDataset
(
mode
=
'test'
)
test_dataset
=
TestMnistDataset
()
model
=
MNIST
()
if
not
is_mlp
else
MLP
()
optim
=
fluid
.
optimizer
.
Momentum
(
...
...
@@ -159,12 +180,23 @@ class TestModel(unittest.TestCase):
loss
=
CrossEntropy
()
if
not
is_mlp
else
MyCrossEntropy
()
model
.
prepare
(
optim
,
loss
,
Accuracy
(),
inputs
,
labels
,
device
=
device
)
cbk
=
ProgBarLogger
(
50
)
model
.
fit
(
train_dataset
,
val_dataset
,
epochs
=
2
,
batch_size
=
batch_size
,
callbacks
=
cbk
)
eval_result
=
model
.
evaluate
(
val_dataset
,
batch_size
=
batch_size
)
output
=
model
.
predict
(
test_dataset
,
batch_size
=
batch_size
)
np
.
testing
.
assert_equal
(
output
[
0
].
shape
[
0
],
len
(
test_dataset
))
acc
=
get_predict_accuracy
(
output
[
0
],
val_dataset
.
labels
)
np
.
testing
.
assert_allclose
(
acc
,
eval_result
[
'acc'
])
def
test_fit_static
(
self
):
self
.
fit
(
False
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录