Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
854290c0
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
854290c0
编写于
7月 10, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
7月 10, 2020
浏览文件
操作
浏览文件
下载
差异文件
!2984 fix hswishquant and hsigmoidquant validation false bug
Merge pull request !2984 from chenzhongming/master
上级
87722b9e
2ff29f01
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
32 addition
and
25 deletion
+32
-25
mindspore/nn/layer/quant.py
mindspore/nn/layer/quant.py
+2
-2
model_zoo/mobilenetv2/train.py
model_zoo/mobilenetv2/train.py
+15
-12
model_zoo/mobilenetv3/train.py
model_zoo/mobilenetv3/train.py
+15
-11
未找到文件。
mindspore/nn/layer/quant.py
浏览文件 @
854290c0
...
...
@@ -920,7 +920,7 @@ class HSwishQuant(_QuantActivation):
symmetric
=
symmetric
,
narrow_range
=
narrow_range
,
quant_delay
=
quant_delay
)
if
is
instance
(
activation
,
nn
.
HSwish
):
if
is
subclass
(
activation
,
nn
.
HSwish
):
self
.
act
=
activation
()
else
:
raise
ValueError
(
"Activation should be `nn.HSwish`"
)
...
...
@@ -989,7 +989,7 @@ class HSigmoidQuant(_QuantActivation):
symmetric
=
symmetric
,
narrow_range
=
narrow_range
,
quant_delay
=
quant_delay
)
if
is
instance
(
activation
,
nn
.
HSwish
):
if
is
subclass
(
activation
,
nn
.
HSwish
):
self
.
act
=
activation
()
else
:
raise
ValueError
(
"Activation should be `nn.HSigmoid`"
)
...
...
model_zoo/mobilenetv2/train.py
浏览文件 @
854290c0
...
...
@@ -18,6 +18,7 @@ import time
import
argparse
import
random
import
numpy
as
np
from
mindspore
import
context
from
mindspore
import
Tensor
from
mindspore
import
nn
...
...
@@ -32,8 +33,9 @@ from mindspore.train.model import Model, ParallelMode
from
mindspore.train.callback
import
ModelCheckpoint
,
CheckpointConfig
,
Callback
from
mindspore.train.loss_scale_manager
import
FixedLossScaleManager
from
mindspore.train.serialization
import
load_checkpoint
,
load_param_into_net
from
mindspore.communication.management
import
init
,
get_group_size
from
mindspore.communication.management
import
init
,
get_group_size
,
get_rank
import
mindspore.dataset.engine
as
de
from
src.dataset
import
create_dataset
from
src.lr_generator
import
get_lr
from
src.config
import
config_gpu
,
config_ascend
...
...
@@ -60,9 +62,14 @@ if args_opt.platform == "Ascend":
device_id
=
device_id
,
save_graphs
=
False
)
elif
args_opt
.
platform
==
"GPU"
:
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
,
save_graphs
=
False
)
device_target
=
"GPU"
,
save_graphs
=
False
)
init
(
"nccl"
)
context
.
set_auto_parallel_context
(
device_num
=
get_group_size
(),
parallel_mode
=
ParallelMode
.
DATA_PARALLEL
,
mirror_mean
=
True
)
else
:
raise
ValueError
(
"Unsupport
platform
."
)
raise
ValueError
(
"Unsupport
ed device target
."
)
class
CrossEntropyWithLabelSmooth
(
_Loss
):
...
...
@@ -155,12 +162,8 @@ class Monitor(Callback):
if
__name__
==
'__main__'
:
if
args_opt
.
platform
==
"GPU"
:
# train on gpu
print
(
"train args: "
,
args_opt
,
"
\n
cfg: "
,
config_gpu
)
init
(
'nccl'
)
context
.
set_auto_parallel_context
(
parallel_mode
=
"data_parallel"
,
mirror_mean
=
True
,
device_num
=
get_group_size
())
print
(
"train args: "
,
args_opt
)
print
(
"cfg: "
,
config_gpu
)
# define net
net
=
mobilenet_v2
(
num_classes
=
config_gpu
.
num_classes
,
platform
=
"GPU"
)
...
...
@@ -201,13 +204,13 @@ if __name__ == '__main__':
loss_scale_manager
=
loss_scale
)
cb
=
[
Monitor
(
lr_init
=
lr
.
asnumpy
())]
ckpt_save_dir
=
config_gpu
.
save_checkpoint_path
+
"ckpt_"
+
str
(
get_rank
())
+
"/"
if
config_gpu
.
save_checkpoint
:
config_ck
=
CheckpointConfig
(
save_checkpoint_steps
=
config_gpu
.
save_checkpoint_epochs
*
step_size
,
keep_checkpoint_max
=
config_gpu
.
keep_checkpoint_max
)
ckpt_cb
=
ModelCheckpoint
(
prefix
=
"mobilenetV2"
,
directory
=
config_gpu
.
save_checkpoint_path
,
config
=
config_ck
)
ckpt_cb
=
ModelCheckpoint
(
prefix
=
"mobilenetV2"
,
directory
=
ckpt_save_dir
,
config
=
config_ck
)
cb
+=
[
ckpt_cb
]
# begin
e
train
# begin train
model
.
train
(
epoch_size
,
dataset
,
callbacks
=
cb
)
elif
args_opt
.
platform
==
"Ascend"
:
# train on ascend
...
...
model_zoo/mobilenetv3/train.py
浏览文件 @
854290c0
...
...
@@ -18,6 +18,7 @@ import time
import
argparse
import
random
import
numpy
as
np
from
mindspore
import
context
from
mindspore
import
Tensor
from
mindspore
import
nn
...
...
@@ -33,7 +34,8 @@ from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, Callback
from
mindspore.train.loss_scale_manager
import
FixedLossScaleManager
from
mindspore.train.serialization
import
load_checkpoint
,
load_param_into_net
import
mindspore.dataset.engine
as
de
from
mindspore.communication.management
import
init
,
get_group_size
from
mindspore.communication.management
import
init
,
get_group_size
,
get_rank
from
src.dataset
import
create_dataset
from
src.lr_generator
import
get_lr
from
src.config
import
config_gpu
,
config_ascend
...
...
@@ -57,10 +59,16 @@ if args_opt.platform == "Ascend":
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
device_id
=
device_id
,
save_graphs
=
False
)
device_id
=
device_id
,
save_graphs
=
False
)
elif
args_opt
.
platform
==
"GPU"
:
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
,
save_graphs
=
False
)
device_target
=
"GPU"
,
save_graphs
=
False
)
init
(
"nccl"
)
context
.
set_auto_parallel_context
(
device_num
=
get_group_size
(),
parallel_mode
=
ParallelMode
.
DATA_PARALLEL
,
mirror_mean
=
True
)
else
:
raise
ValueError
(
"Unsupport platform."
)
...
...
@@ -155,12 +163,8 @@ class Monitor(Callback):
if
__name__
==
'__main__'
:
if
args_opt
.
platform
==
"GPU"
:
# train on gpu
print
(
"train args: "
,
args_opt
,
"
\n
cfg: "
,
config_gpu
)
init
(
'nccl'
)
context
.
set_auto_parallel_context
(
parallel_mode
=
"data_parallel"
,
mirror_mean
=
True
,
device_num
=
get_group_size
())
print
(
"train args: "
,
args_opt
)
print
(
"cfg: "
,
config_gpu
)
# define net
net
=
mobilenet_v3_large
(
num_classes
=
config_gpu
.
num_classes
)
...
...
@@ -201,11 +205,11 @@ if __name__ == '__main__':
loss_scale_manager
=
loss_scale
)
cb
=
[
Monitor
(
lr_init
=
lr
.
asnumpy
())]
ckpt_save_dir
=
config_gpu
.
save_checkpoint_path
+
"ckpt_"
+
str
(
get_rank
())
+
"/"
if
config_gpu
.
save_checkpoint
:
config_ck
=
CheckpointConfig
(
save_checkpoint_steps
=
config_gpu
.
save_checkpoint_epochs
*
step_size
,
keep_checkpoint_max
=
config_gpu
.
keep_checkpoint_max
)
ckpt_cb
=
ModelCheckpoint
(
prefix
=
"mobilenetV3"
,
directory
=
config_gpu
.
save_checkpoint_path
,
config
=
config_ck
)
ckpt_cb
=
ModelCheckpoint
(
prefix
=
"mobilenetV3"
,
directory
=
ckpt_save_dir
,
config
=
config_ck
)
cb
+=
[
ckpt_cb
]
# begine train
model
.
train
(
epoch_size
,
dataset
,
callbacks
=
cb
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录