Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
a8efea5c
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a8efea5c
编写于
5月 28, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
5月 28, 2020
浏览文件
操作
浏览文件
下载
差异文件
!1588 GPU upadate resnet50 script in example
Merge pull request !1588 from VectorSL/r0.3
上级
6599cc1a
b5ce6c55
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
122 addition
and
56 deletion
+122
-56
example/resnet50_cifar10/README.md
example/resnet50_cifar10/README.md
+12
-0
example/resnet50_cifar10/dataset.py
example/resnet50_cifar10/dataset.py
+9
-3
example/resnet50_cifar10/eval.py
example/resnet50_cifar10/eval.py
+17
-11
example/resnet50_cifar10/train.py
example/resnet50_cifar10/train.py
+27
-16
example/resnet50_imagenet2012/README.md
example/resnet50_imagenet2012/README.md
+15
-0
example/resnet50_imagenet2012/dataset.py
example/resnet50_imagenet2012/dataset.py
+9
-4
example/resnet50_imagenet2012/eval.py
example/resnet50_imagenet2012/eval.py
+8
-6
example/resnet50_imagenet2012/train.py
example/resnet50_imagenet2012/train.py
+25
-16
未找到文件。
example/resnet50_cifar10/README.md
浏览文件 @
a8efea5c
...
...
@@ -123,3 +123,15 @@ Inference result will be stored in the example path, whose folder name is "infer
```
result: {'acc': 0.91446314102564111} ckpt=~/resnet50_cifar10/train_parallel0/resnet-90_195.ckpt
```
### Running on GPU
```
# distributed training example
mpirun -n 8 python train.py --dataset_path=~/cifar-10-batches-bin --device_target="GPU" --run_distribute=True
# standalone training example
python train.py --dataset_path=~/cifar-10-batches-bin --device_target="GPU"
# infer example
python eval.py --dataset_path=~/cifar10-10-verify-bin --device_target="GPU" --checkpoint_path=resnet-90_195.ckpt
```
\ No newline at end of file
example/resnet50_cifar10/dataset.py
浏览文件 @
a8efea5c
...
...
@@ -20,10 +20,11 @@ import mindspore.common.dtype as mstype
import
mindspore.dataset.engine
as
de
import
mindspore.dataset.transforms.vision.c_transforms
as
C
import
mindspore.dataset.transforms.c_transforms
as
C2
from
mindspore.communication.management
import
get_rank
,
get_group_size
from
config
import
config
def
create_dataset
(
dataset_path
,
do_train
,
repeat_num
=
1
,
batch_size
=
32
):
def
create_dataset
(
dataset_path
,
do_train
,
repeat_num
=
1
,
batch_size
=
32
,
target
=
"Ascend"
):
"""
create a train or eval dataset
...
...
@@ -32,12 +33,17 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32):
do_train(bool): whether dataset is used for train or eval.
repeat_num(int): the repeat times of dataset. Default: 1
batch_size(int): the batch size of dataset. Default: 32
target(str): the device target. Default: Ascend
Returns:
dataset
"""
device_num
=
int
(
os
.
getenv
(
"DEVICE_NUM"
))
rank_id
=
int
(
os
.
getenv
(
"RANK_ID"
))
if
target
==
"Ascend"
:
device_num
=
int
(
os
.
getenv
(
"DEVICE_NUM"
))
rank_id
=
int
(
os
.
getenv
(
"RANK_ID"
))
else
:
rank_id
=
get_rank
()
device_num
=
get_group_size
()
if
device_num
==
1
:
ds
=
de
.
Cifar10Dataset
(
dataset_path
,
num_parallel_workers
=
8
,
shuffle
=
True
)
...
...
example/resnet50_cifar10/eval.py
浏览文件 @
a8efea5c
...
...
@@ -25,7 +25,7 @@ from mindspore.parallel._auto_parallel_context import auto_parallel_context
from
mindspore.nn.loss
import
SoftmaxCrossEntropyWithLogits
from
mindspore.train.model
import
Model
,
ParallelMode
from
mindspore.train.serialization
import
load_checkpoint
,
load_param_into_net
from
mindspore.communication.management
import
init
from
mindspore.communication.management
import
init
,
get_group_size
parser
=
argparse
.
ArgumentParser
(
description
=
'Image classification'
)
parser
.
add_argument
(
'--run_distribute'
,
type
=
bool
,
default
=
False
,
help
=
'Run distribute'
)
...
...
@@ -34,26 +34,32 @@ parser.add_argument('--do_train', type=bool, default=False, help='Do train or no
parser
.
add_argument
(
'--do_eval'
,
type
=
bool
,
default
=
True
,
help
=
'Do eval or not.'
)
parser
.
add_argument
(
'--checkpoint_path'
,
type
=
str
,
default
=
None
,
help
=
'Checkpoint file path'
)
parser
.
add_argument
(
'--dataset_path'
,
type
=
str
,
default
=
None
,
help
=
'Dataset path'
)
parser
.
add_argument
(
'--device_target'
,
type
=
str
,
default
=
'Ascend'
,
help
=
'Device target'
)
args_opt
=
parser
.
parse_args
()
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
)
context
.
set_context
(
device_id
=
device_id
)
if
__name__
==
'__main__'
:
target
=
args_opt
.
device_target
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
target
,
save_graphs
=
False
)
if
not
args_opt
.
do_eval
and
args_opt
.
run_distribute
:
context
.
set_auto_parallel_context
(
device_num
=
args_opt
.
device_num
,
parallel_mode
=
ParallelMode
.
DATA_PARALLEL
,
mirror_mean
=
True
)
auto_parallel_context
().
set_all_reduce_fusion_split_indices
([
140
])
init
()
if
target
==
"Ascend"
:
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
device_id
=
device_id
)
context
.
set_auto_parallel_context
(
device_num
=
args_opt
.
device_num
,
parallel_mode
=
ParallelMode
.
DATA_PARALLEL
,
mirror_mean
=
True
)
auto_parallel_context
().
set_all_reduce_fusion_split_indices
([
140
])
init
()
elif
target
==
"GPU"
:
init
(
"nccl"
)
context
.
set_auto_parallel_context
(
device_num
=
get_group_size
(),
parallel_mode
=
ParallelMode
.
DATA_PARALLEL
,
mirror_mean
=
True
)
epoch_size
=
config
.
epoch_size
net
=
resnet50
(
class_num
=
config
.
class_num
)
loss
=
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
)
if
args_opt
.
do_eval
:
dataset
=
create_dataset
(
dataset_path
=
args_opt
.
dataset_path
,
do_train
=
False
,
batch_size
=
config
.
batch_size
)
dataset
=
create_dataset
(
dataset_path
=
args_opt
.
dataset_path
,
do_train
=
False
,
batch_size
=
config
.
batch_size
,
target
=
target
)
step_size
=
dataset
.
get_dataset_size
()
if
args_opt
.
checkpoint_path
:
...
...
example/resnet50_cifar10/train.py
浏览文件 @
a8efea5c
...
...
@@ -29,7 +29,7 @@ from mindspore.train.model import Model, ParallelMode
from
mindspore.train.callback
import
ModelCheckpoint
,
CheckpointConfig
,
LossMonitor
,
TimeMonitor
from
mindspore.train.loss_scale_manager
import
FixedLossScaleManager
from
mindspore.communication.management
import
init
from
mindspore.communication.management
import
init
,
get_rank
,
get_group_size
parser
=
argparse
.
ArgumentParser
(
description
=
'Image classification'
)
parser
.
add_argument
(
'--run_distribute'
,
type
=
bool
,
default
=
False
,
help
=
'Run distribute'
)
...
...
@@ -37,28 +37,37 @@ parser.add_argument('--device_num', type=int, default=1, help='Device num.')
parser
.
add_argument
(
'--do_train'
,
type
=
bool
,
default
=
True
,
help
=
'Do train or not.'
)
parser
.
add_argument
(
'--do_eval'
,
type
=
bool
,
default
=
False
,
help
=
'Do eval or not.'
)
parser
.
add_argument
(
'--dataset_path'
,
type
=
str
,
default
=
None
,
help
=
'Dataset path'
)
parser
.
add_argument
(
'--device_target'
,
type
=
str
,
default
=
'Ascend'
,
help
=
'Device target'
)
args_opt
=
parser
.
parse_args
()
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
,
device_id
=
device_id
,
enable_auto_mixed_precision
=
True
)
if
__name__
==
'__main__'
:
target
=
args_opt
.
device_target
if
not
args_opt
.
do_eval
and
args_opt
.
run_distribute
:
context
.
set_auto_parallel_context
(
device_num
=
args_opt
.
device_num
,
parallel_mode
=
ParallelMode
.
DATA_PARALLEL
,
mirror_mean
=
True
)
auto_parallel_context
().
set_all_reduce_fusion_split_indices
([
107
,
160
])
init
()
if
target
==
"Ascend"
:
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
,
device_id
=
device_id
,
enable_auto_mixed_precision
=
True
)
init
()
context
.
set_auto_parallel_context
(
device_num
=
args_opt
.
device_num
,
parallel_mode
=
ParallelMode
.
DATA_PARALLEL
,
mirror_mean
=
True
)
auto_parallel_context
().
set_all_reduce_fusion_split_indices
([
107
,
160
])
ckpt_save_dir
=
config
.
save_checkpoint_path
loss
=
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
reduction
=
'mean'
)
elif
target
==
"GPU"
:
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
,
save_graphs
=
False
)
init
(
"nccl"
)
context
.
set_auto_parallel_context
(
device_num
=
get_group_size
(),
parallel_mode
=
ParallelMode
.
DATA_PARALLEL
,
mirror_mean
=
True
)
ckpt_save_dir
=
config
.
save_checkpoint_path
+
"ckpt_"
+
str
(
get_rank
())
+
"/"
loss
=
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
is_grad
=
False
,
reduction
=
'mean'
)
epoch_size
=
config
.
epoch_size
net
=
resnet50
(
class_num
=
config
.
class_num
)
loss
=
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
reduction
=
'mean'
)
if
args_opt
.
do_train
:
dataset
=
create_dataset
(
dataset_path
=
args_opt
.
dataset_path
,
do_train
=
True
,
repeat_num
=
epoch_size
,
batch_size
=
config
.
batch_size
)
repeat_num
=
epoch_size
,
batch_size
=
config
.
batch_size
,
target
=
target
)
step_size
=
dataset
.
get_dataset_size
()
loss_scale
=
FixedLossScaleManager
(
config
.
loss_scale
,
drop_overflow_update
=
False
)
...
...
@@ -67,9 +76,11 @@ if __name__ == '__main__':
lr_decay_mode
=
'poly'
))
opt
=
Momentum
(
filter
(
lambda
x
:
x
.
requires_grad
,
net
.
get_parameters
()),
lr
,
config
.
momentum
,
config
.
weight_decay
,
config
.
loss_scale
)
model
=
Model
(
net
,
loss_fn
=
loss
,
optimizer
=
opt
,
loss_scale_manager
=
loss_scale
,
metrics
=
{
'acc'
},
amp_level
=
"O2"
,
keep_batchnorm_fp32
=
False
)
if
target
==
'GPU'
:
model
=
Model
(
net
,
loss_fn
=
loss
,
optimizer
=
opt
,
metrics
=
{
'acc'
})
else
:
model
=
Model
(
net
,
loss_fn
=
loss
,
optimizer
=
opt
,
loss_scale_manager
=
loss_scale
,
metrics
=
{
'acc'
},
amp_level
=
"O2"
,
keep_batchnorm_fp32
=
True
)
time_cb
=
TimeMonitor
(
data_size
=
step_size
)
loss_cb
=
LossMonitor
()
...
...
@@ -77,6 +88,6 @@ if __name__ == '__main__':
if
config
.
save_checkpoint
:
config_ck
=
CheckpointConfig
(
save_checkpoint_steps
=
config
.
save_checkpoint_steps
,
keep_checkpoint_max
=
config
.
keep_checkpoint_max
)
ckpt_cb
=
ModelCheckpoint
(
prefix
=
"resnet"
,
directory
=
c
onfig
.
save_checkpoint_path
,
config
=
config_ck
)
ckpt_cb
=
ModelCheckpoint
(
prefix
=
"resnet"
,
directory
=
c
kpt_save_dir
,
config
=
config_ck
)
cb
+=
[
ckpt_cb
]
model
.
train
(
epoch_size
,
dataset
,
callbacks
=
cb
)
example/resnet50_imagenet2012/README.md
浏览文件 @
a8efea5c
...
...
@@ -133,3 +133,18 @@ Inference result will be stored in the example path, whose folder name is "infer
```
result: {'acc': 0.7671054737516005} ckpt=train_parallel0/resnet-90_5004.ckpt
```
### Running on GPU
```
# distributed training example
mpirun -n 8 python train.py --dataset_path=dataset/ilsvrc/train --device_target="GPU" --run_distribute=True
# standalone training example
python train.py --dataset_path=dataset/ilsvrc/train --device_target="GPU"
# standalone training example with pretrained checkpoint
python train.py --dataset_path=dataset/ilsvrc/train --device_target="GPU" --pre_trained=pretrained.ckpt
# infer example
python eval.py --dataset_path=dataset/ilsvrc/val --device_target="GPU" --checkpoint_path=resnet-90_5004ss.ckpt
```
\ No newline at end of file
example/resnet50_imagenet2012/dataset.py
浏览文件 @
a8efea5c
...
...
@@ -20,9 +20,9 @@ import mindspore.common.dtype as mstype
import
mindspore.dataset.engine
as
de
import
mindspore.dataset.transforms.vision.c_transforms
as
C
import
mindspore.dataset.transforms.c_transforms
as
C2
from
mindspore.communication.management
import
get_rank
,
get_group_size
def
create_dataset
(
dataset_path
,
do_train
,
repeat_num
=
1
,
batch_size
=
32
):
def
create_dataset
(
dataset_path
,
do_train
,
repeat_num
=
1
,
batch_size
=
32
,
target
=
"Ascend"
):
"""
create a train or eval dataset
...
...
@@ -31,12 +31,17 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32):
do_train(bool): whether dataset is used for train or eval.
repeat_num(int): the repeat times of dataset. Default: 1
batch_size(int): the batch size of dataset. Default: 32
target(str): the device target. Default: Ascend
Returns:
dataset
"""
device_num
=
int
(
os
.
getenv
(
"DEVICE_NUM"
))
rank_id
=
int
(
os
.
getenv
(
"RANK_ID"
))
if
target
==
"Ascend"
:
device_num
=
int
(
os
.
getenv
(
"DEVICE_NUM"
))
rank_id
=
int
(
os
.
getenv
(
"RANK_ID"
))
else
:
rank_id
=
get_rank
()
device_num
=
get_group_size
()
if
device_num
==
1
:
ds
=
de
.
ImageFolderDatasetV2
(
dataset_path
,
num_parallel_workers
=
8
,
shuffle
=
True
)
...
...
example/resnet50_imagenet2012/eval.py
浏览文件 @
a8efea5c
...
...
@@ -32,12 +32,13 @@ parser.add_argument('--do_train', type=bool, default=False, help='Do train or no
parser
.
add_argument
(
'--do_eval'
,
type
=
bool
,
default
=
True
,
help
=
'Do eval or not.'
)
parser
.
add_argument
(
'--checkpoint_path'
,
type
=
str
,
default
=
None
,
help
=
'Checkpoint file path'
)
parser
.
add_argument
(
'--dataset_path'
,
type
=
str
,
default
=
None
,
help
=
'Dataset path'
)
parser
.
add_argument
(
'--device_target'
,
type
=
str
,
default
=
'Ascend'
,
help
=
'Device target'
)
args_opt
=
parser
.
parse_args
()
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
)
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
)
context
.
set_context
(
device_id
=
device_id
)
target
=
args_opt
.
device_target
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
target
,
save_graphs
=
False
)
if
target
==
"Ascend"
:
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
)
)
context
.
set_context
(
device_id
=
device_id
)
if
__name__
==
'__main__'
:
...
...
@@ -47,7 +48,8 @@ if __name__ == '__main__':
loss
=
CrossEntropy
(
smooth_factor
=
config
.
label_smooth_factor
,
num_classes
=
config
.
class_num
)
if
args_opt
.
do_eval
:
dataset
=
create_dataset
(
dataset_path
=
args_opt
.
dataset_path
,
do_train
=
False
,
batch_size
=
config
.
batch_size
)
dataset
=
create_dataset
(
dataset_path
=
args_opt
.
dataset_path
,
do_train
=
False
,
batch_size
=
config
.
batch_size
,
target
=
target
)
step_size
=
dataset
.
get_dataset_size
()
if
args_opt
.
checkpoint_path
:
...
...
example/resnet50_imagenet2012/train.py
浏览文件 @
a8efea5c
...
...
@@ -29,7 +29,7 @@ from mindspore.train.model import Model, ParallelMode
from
mindspore.train.callback
import
ModelCheckpoint
,
CheckpointConfig
,
LossMonitor
,
TimeMonitor
from
mindspore.train.loss_scale_manager
import
FixedLossScaleManager
from
mindspore.train.serialization
import
load_checkpoint
,
load_param_into_net
from
mindspore.communication.management
import
init
from
mindspore.communication.management
import
init
,
get_rank
,
get_group_size
import
mindspore.nn
as
nn
import
mindspore.common.initializer
as
weight_init
from
crossentropy
import
CrossEntropy
...
...
@@ -40,21 +40,28 @@ parser.add_argument('--device_num', type=int, default=1, help='Device num.')
parser
.
add_argument
(
'--do_train'
,
type
=
bool
,
default
=
True
,
help
=
'Do train or not.'
)
parser
.
add_argument
(
'--do_eval'
,
type
=
bool
,
default
=
False
,
help
=
'Do eval or not.'
)
parser
.
add_argument
(
'--dataset_path'
,
type
=
str
,
default
=
None
,
help
=
'Dataset path'
)
parser
.
add_argument
(
'--device_target'
,
type
=
str
,
default
=
'Ascend'
,
help
=
'Device target'
)
parser
.
add_argument
(
'--pre_trained'
,
type
=
str
,
default
=
None
,
help
=
'Pretrained checkpoint path'
)
args_opt
=
parser
.
parse_args
()
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
,
device_id
=
device_id
,
enable_auto_mixed_precision
=
True
)
if
__name__
==
'__main__'
:
target
=
args_opt
.
device_target
if
not
args_opt
.
do_eval
and
args_opt
.
run_distribute
:
context
.
set_auto_parallel_context
(
device_num
=
args_opt
.
device_num
,
parallel_mode
=
ParallelMode
.
DATA_PARALLEL
,
mirror_mean
=
True
,
parameter_broadcast
=
True
)
auto_parallel_context
().
set_all_reduce_fusion_split_indices
([
107
,
160
])
init
()
if
target
==
"Ascend"
:
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
,
device_id
=
device_id
,
enable_auto_mixed_precision
=
True
)
init
()
context
.
set_auto_parallel_context
(
device_num
=
args_opt
.
device_num
,
parallel_mode
=
ParallelMode
.
DATA_PARALLEL
,
mirror_mean
=
True
)
auto_parallel_context
().
set_all_reduce_fusion_split_indices
([
107
,
160
])
ckpt_save_dir
=
config
.
save_checkpoint_path
elif
target
==
"GPU"
:
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
,
save_graphs
=
False
)
init
(
"nccl"
)
context
.
set_auto_parallel_context
(
device_num
=
get_group_size
(),
parallel_mode
=
ParallelMode
.
DATA_PARALLEL
,
mirror_mean
=
True
)
ckpt_save_dir
=
config
.
save_checkpoint_path
+
"ckpt_"
+
str
(
get_rank
())
+
"/"
epoch_size
=
config
.
epoch_size
net
=
resnet50
(
class_num
=
config
.
class_num
)
...
...
@@ -81,7 +88,7 @@ if __name__ == '__main__':
if
args_opt
.
do_train
:
dataset
=
create_dataset
(
dataset_path
=
args_opt
.
dataset_path
,
do_train
=
True
,
repeat_num
=
epoch_size
,
batch_size
=
config
.
batch_size
)
repeat_num
=
epoch_size
,
batch_size
=
config
.
batch_size
,
target
=
target
)
step_size
=
dataset
.
get_dataset_size
()
loss_scale
=
FixedLossScaleManager
(
config
.
loss_scale
,
drop_overflow_update
=
False
)
...
...
@@ -93,9 +100,11 @@ if __name__ == '__main__':
opt
=
Momentum
(
filter
(
lambda
x
:
x
.
requires_grad
,
net
.
get_parameters
()),
lr
,
config
.
momentum
,
config
.
weight_decay
,
config
.
loss_scale
)
model
=
Model
(
net
,
loss_fn
=
loss
,
optimizer
=
opt
,
loss_scale_manager
=
loss_scale
,
metrics
=
{
'acc'
},
amp_level
=
"O2"
,
keep_batchnorm_fp32
=
False
)
if
target
==
"Ascend"
:
model
=
Model
(
net
,
loss_fn
=
loss
,
optimizer
=
opt
,
loss_scale_manager
=
loss_scale
,
metrics
=
{
'acc'
},
amp_level
=
"O2"
,
keep_batchnorm_fp32
=
False
)
elif
target
==
"GPU"
:
model
=
Model
(
net
,
loss_fn
=
loss
,
optimizer
=
opt
,
loss_scale_manager
=
loss_scale
,
metrics
=
{
'acc'
})
time_cb
=
TimeMonitor
(
data_size
=
step_size
)
...
...
@@ -104,6 +113,6 @@ if __name__ == '__main__':
if
config
.
save_checkpoint
:
config_ck
=
CheckpointConfig
(
save_checkpoint_steps
=
config
.
save_checkpoint_epochs
*
step_size
,
keep_checkpoint_max
=
config
.
keep_checkpoint_max
)
ckpt_cb
=
ModelCheckpoint
(
prefix
=
"resnet"
,
directory
=
c
onfig
.
save_checkpoint_path
,
config
=
config_ck
)
ckpt_cb
=
ModelCheckpoint
(
prefix
=
"resnet"
,
directory
=
c
kpt_save_dir
,
config
=
config_ck
)
cb
+=
[
ckpt_cb
]
model
.
train
(
epoch_size
,
dataset
,
callbacks
=
cb
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录