Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MindSpore
mindinsight
提交
63ddd22e
M
mindinsight
项目概览
MindSpore
/
mindinsight
通知
7
Star
3
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindinsight
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
63ddd22e
编写于
8月 21, 2020
作者:
M
moran
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix wizard template module to fit changed operator API
上级
4d482320
变更
9
显示空白变更内容
内联
并排
Showing
9 changed file
with
81 addition
and
43 deletion
+81
-43
mindinsight/wizard/conf/templates/network/alexnet/eval.py-tpl
...insight/wizard/conf/templates/network/alexnet/eval.py-tpl
+1
-1
mindinsight/wizard/conf/templates/network/alexnet/train.py-tpl
...nsight/wizard/conf/templates/network/alexnet/train.py-tpl
+11
-27
mindinsight/wizard/conf/templates/network/lenet/eval.py-tpl
mindinsight/wizard/conf/templates/network/lenet/eval.py-tpl
+1
-1
mindinsight/wizard/conf/templates/network/lenet/train.py-tpl
mindinsight/wizard/conf/templates/network/lenet/train.py-tpl
+1
-1
mindinsight/wizard/conf/templates/network/resnet50/README.md-tpl
...ight/wizard/conf/templates/network/resnet50/README.md-tpl
+1
-0
mindinsight/wizard/conf/templates/network/resnet50/eval.py-tpl
...nsight/wizard/conf/templates/network/resnet50/eval.py-tpl
+3
-2
mindinsight/wizard/conf/templates/network/resnet50/src/CrossEntropySmooth.py-tpl
.../templates/network/resnet50/src/CrossEntropySmooth.py-tpl
+38
-0
mindinsight/wizard/conf/templates/network/resnet50/train.py-tpl
...sight/wizard/conf/templates/network/resnet50/train.py-tpl
+9
-7
tests/st/func/wizard/test_resnet50.py
tests/st/func/wizard/test_resnet50.py
+16
-4
未找到文件。
mindinsight/wizard/conf/templates/network/alexnet/eval.py-tpl
浏览文件 @
63ddd22e
...
...
@@ -48,7 +48,7 @@ if __name__ == "__main__":
network
=
AlexNet
(
cfg
.
num_classes
)
{%
if
loss
==
'SoftmaxCrossEntropyWithLogits'
%}
net_loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
,
reduction
=
"mean"
)
net_loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
reduction
=
"mean"
)
{%
elif
loss
==
'SoftmaxCrossEntropyExpand'
%}
net_loss
=
nn
.
SoftmaxCrossEntropyExpand
(
sparse
=
True
)
{%
endif
%}
...
...
mindinsight/wizard/conf/templates/network/alexnet/train.py-tpl
浏览文件 @
63ddd22e
...
...
@@ -99,24 +99,8 @@ if __name__ == "__main__":
lr
=
Tensor
(
get_lr
(
0
,
cfg
.
lr
,
cfg
.
epoch_size
,
ds_train
.
get_dataset_size
()))
#
define
loss
,
model
if
target
==
"Ascend"
:
{%
if
loss
==
'SoftmaxCrossEntropyWithLogits'
%}
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
reduction
=
'mean'
)
{%
elif
loss
==
'SoftmaxCrossEntropyExpand'
%}
loss
=
nn
.
SoftmaxCrossEntropyExpand
(
sparse
=
True
)
{%
endif
%}
{%
if
optimizer
==
'Momentum'
%}
opt
=
nn
.
Momentum
(
filter
(
lambda
x
:
x
.
requires_grad
,
net
.
get_parameters
()),
learning_rate
=
lr
,
momentum
=
cfg
.
momentum
,
weight_decay
=
cfg
.
weight_decay
,
loss_scale
=
cfg
.
loss_scale
)
{%
else
%}
opt
=
nn
.{{
optimizer
}}(
net
.
trainable_params
(),
learning_rate
=
cfg
.
lr
)
{%
endif
%}
loss_scale
=
FixedLossScaleManager
(
cfg
.
loss_scale
,
drop_overflow_update
=
False
)
model
=
Model
(
net
,
loss_fn
=
loss
,
optimizer
=
opt
,
loss_scale_manager
=
loss_scale
,
metrics
={
'acc'
},
amp_level
=
"O2"
,
keep_batchnorm_fp32
=
False
)
else
:
{%
if
loss
==
'SoftmaxCrossEntropyWithLogits'
%}
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
,
reduction
=
"mean"
)
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
reduction
=
"mean"
)
{%
elif
loss
==
'SoftmaxCrossEntropyExpand'
%}
loss
=
nn
.
SoftmaxCrossEntropyExpand
(
sparse
=
True
)
{%
endif
%}
...
...
mindinsight/wizard/conf/templates/network/lenet/eval.py-tpl
浏览文件 @
63ddd22e
...
...
@@ -48,7 +48,7 @@ if __name__ == "__main__":
network
=
LeNet5
(
cfg
.
num_classes
)
{%
if
loss
==
'SoftmaxCrossEntropyWithLogits'
%}
net_loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
,
reduction
=
"mean"
)
net_loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
reduction
=
"mean"
)
{%
elif
loss
==
'SoftmaxCrossEntropyExpand'
%}
net_loss
=
nn
.
SoftmaxCrossEntropyExpand
(
sparse
=
True
)
{%
endif
%}
...
...
mindinsight/wizard/conf/templates/network/lenet/train.py-tpl
浏览文件 @
63ddd22e
...
...
@@ -73,7 +73,7 @@ if __name__ == "__main__":
param_dict
=
load_checkpoint
(
args
.
pre_trained
)
load_param_into_net
(
network
,
param_dict
)
{%
if
loss
==
'SoftmaxCrossEntropyWithLogits'
%}
net_loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
,
reduction
=
"mean"
)
net_loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
reduction
=
"mean"
)
{%
elif
loss
==
'SoftmaxCrossEntropyExpand'
%}
net_loss
=
nn
.
SoftmaxCrossEntropyExpand
(
sparse
=
True
)
{%
endif
%}
...
...
mindinsight/wizard/conf/templates/network/resnet50/README.md-tpl
浏览文件 @
63ddd22e
...
...
@@ -52,6 +52,7 @@ ImageNet
├── config.py # parameter configuration
├── dataset.py # data preprocessing
├── lr_generator.py # generate learning rate for each step
├── CrossEntropySmooth.py # define the cross entropy loss function with smoothed labels
└── resnet50.py # resNet50 network definition
├── eval.py # eval net
└── train.py # train net
...
...
mindinsight/wizard/conf/templates/network/resnet50/eval.py-tpl
浏览文件 @
63ddd22e
...
...
@@ -22,6 +22,7 @@ from mindspore import context
from
mindspore
import
dataset
as
de
from
mindspore
.
train
.
model
import
Model
from
mindspore
.
train
.
serialization
import
load_checkpoint
,
load_param_into_net
from
src
.
CrossEntropySmooth
import
CrossEntropySmooth
parser
=
argparse
.
ArgumentParser
(
description
=
'Image classification'
)
...
...
@@ -69,7 +70,7 @@ if __name__ == '__main__':
{%
if
loss
==
'SoftmaxCrossEntropyWithLogits'
%}
if
not
cfg
.
use_label_smooth
:
cfg
.
label_smooth_factor
=
0.0
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
reduction
=
'mean'
,
loss
=
CrossEntropySmooth
(
sparse
=
True
,
reduction
=
'mean'
,
smooth_factor
=
cfg
.
label_smooth_factor
,
num_classes
=
cfg
.
num_classes
)
{%
elif
loss
==
'SoftmaxCrossEntropyExpand'
%}
loss
=
nn
.
SoftmaxCrossEntropyExpand
(
sparse
=
True
)
...
...
mindinsight/wizard/conf/templates/network/resnet50/src/CrossEntropySmooth.py-tpl
0 → 100644
浏览文件 @
63ddd22e
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""define loss function for network"""
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.nn.loss.loss import _Loss
from mindspore.ops import functional as F
from mindspore.ops import operations as P
class CrossEntropySmooth(_Loss):
"""CrossEntropy"""
def __init__(self, sparse=True, reduction='mean', smooth_factor=0., num_classes=1000):
super(CrossEntropySmooth, self).__init__()
self.onehot = P.OneHot()
self.sparse = sparse
self.on_value = Tensor(1.0 - smooth_factor, mstype.float32)
self.off_value = Tensor(1.0 * smooth_factor / (num_classes - 1), mstype.float32)
self.ce = nn.SoftmaxCrossEntropyWithLogits(reduction=reduction)
def construct(self, logit, label):
if self.sparse:
label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value)
loss = self.ce(logit, label)
return loss
mindinsight/wizard/conf/templates/network/resnet50/train.py-tpl
浏览文件 @
63ddd22e
...
...
@@ -30,6 +30,7 @@ from mindspore.communication.management import init, get_rank, get_group_size
import
mindspore
.
nn
as
nn
import
mindspore
.
common
.
initializer
as
weight_init
from
src
.
lr_generator
import
get_lr
,
warmup_cosine_annealing_lr
from
src
.
CrossEntropySmooth
import
CrossEntropySmooth
parser
=
argparse
.
ArgumentParser
(
description
=
'Image classification'
)
parser
.
add_argument
(
'--run_distribute'
,
type
=
bool
,
default
=
False
,
help
=
'Run distribute'
)
...
...
@@ -105,8 +106,9 @@ if __name__ == '__main__':
warmup_epochs
=
cfg
.
warmup_epochs
,
total_epochs
=
cfg
.
epoch_size
,
steps_per_epoch
=
step_size
,
lr_decay_mode
=
'poly'
)
{%
else
%}
lr
=
get_lr
(
lr_init
=
cfg
.
lr_init
,
lr_end
=
0.0
,
lr_max
=
cfg
.
lr_max
,
warmup_epochs
=
cfg
.
warmup_epochs
,
total_epochs
=
cfg
.
epoch_size
,
steps_per_epoch
=
step_size
,
lr_decay_mode
=
'cosine'
)
lr
=
get_lr
(
lr_init
=
cfg
.
lr_init
,
lr_end
=
0.0
,
lr_max
=
cfg
.
lr_max
,
warmup_epochs
=
cfg
.
warmup_epochs
,
total_epochs
=
cfg
.
epoch_size
,
steps_per_epoch
=
step_size
,
lr_decay_mode
=
'cosine'
)
{%
endif
%}
lr
=
Tensor
(
lr
)
...
...
@@ -125,7 +127,7 @@ if __name__ == '__main__':
{%
if
loss
==
'SoftmaxCrossEntropyWithLogits'
%}
if
not
cfg
.
use_label_smooth
:
cfg
.
label_smooth_factor
=
0.0
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
reduction
=
'mean'
,
loss
=
CrossEntropySmooth
(
sparse
=
True
,
reduction
=
'mean'
,
smooth_factor
=
cfg
.
label_smooth_factor
,
num_classes
=
cfg
.
num_classes
)
{%
elif
loss
==
'SoftmaxCrossEntropyExpand'
%}
loss
=
nn
.
SoftmaxCrossEntropyExpand
(
sparse
=
True
)
...
...
@@ -146,14 +148,14 @@ if __name__ == '__main__':
{%
if
loss
==
'SoftmaxCrossEntropyWithLogits'
%}
if
not
cfg
.
use_label_smooth
:
cfg
.
label_smooth_factor
=
0.0
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
is_grad
=
Fals
e
,
reduction
=
'mean'
,
loss
=
CrossEntropySmooth
(
sparse
=
Tru
e
,
reduction
=
'mean'
,
smooth_factor
=
cfg
.
label_smooth_factor
,
num_classes
=
cfg
.
num_classes
)
{%
elif
loss
==
'SoftmaxCrossEntropyExpand'
%}
loss
=
nn
.
SoftmaxCrossEntropyExpand
(
sparse
=
True
)
{%
endif
%}
{%
else
%}
{%
if
loss
==
'SoftmaxCrossEntropyWithLogits'
%}
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
is_grad
=
False
,
reduction
=
'mean'
)
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
sparse
=
True
,
reduction
=
'mean'
)
{%
elif
loss
==
'SoftmaxCrossEntropyExpand'
%}
loss
=
nn
.
SoftmaxCrossEntropyExpand
(
sparse
=
True
)
{%
endif
%}
...
...
tests/st/func/wizard/test_resnet50.py
浏览文件 @
63ddd22e
...
...
@@ -117,6 +117,7 @@ class TestResNet50:
config_dataset_is_right
=
False
config_optimizer_is_right
=
False
network_is_right
=
False
cross_entorpy_smooth_is_right
=
False
generator_lr_is_right
=
False
for
source_file
in
self
.
source_files
:
if
source_file
.
file_relative_path
==
'src/dataset.py'
:
...
...
@@ -124,6 +125,8 @@ class TestResNet50:
dataset_is_right
=
True
if
source_file
.
file_relative_path
==
os
.
path
.
join
(
'src'
,
NETWORK_NAME
.
lower
()
+
'.py'
):
network_is_right
=
True
if
source_file
.
file_relative_path
==
'src/CrossEntropySmooth.py'
:
cross_entorpy_smooth_is_right
=
True
if
source_file
.
file_relative_path
==
'src/lr_generator.py'
:
generator_lr_is_right
=
True
if
source_file
.
file_relative_path
==
'src/config.py'
:
...
...
@@ -136,6 +139,7 @@ class TestResNet50:
assert
config_dataset_is_right
assert
config_optimizer_is_right
assert
network_is_right
assert
cross_entorpy_smooth_is_right
assert
generator_lr_is_right
@
staticmethod
...
...
@@ -179,12 +183,20 @@ class TestResNet50:
for
source_file
in
self
.
source_files
:
if
source_file
.
file_relative_path
==
'train.py'
:
content
=
source_file
.
content
if
'resnet50'
in
content
and
loss_name
in
content
and
optimizer_name
in
content
:
if
'resnet50'
in
content
and
optimizer_name
in
content
:
if
dataset_name
==
'ImageNet'
and
loss_name
==
'SoftmaxCrossEntropyWithLogits'
\
and
'loss = CrossEntropySmooth'
in
content
:
train_is_right
=
True
elif
loss_name
in
content
:
train_is_right
=
True
if
source_file
.
file_relative_path
==
'eval.py'
:
content
=
source_file
.
content
if
'resnet50'
in
content
and
loss_name
in
content
:
if
'resnet50'
in
content
:
if
dataset_name
==
'ImageNet'
and
loss_name
==
'SoftmaxCrossEntropyWithLogits'
\
and
'loss = CrossEntropySmooth'
in
content
:
eval_is_right
=
True
elif
loss_name
in
content
:
eval_is_right
=
True
if
source_file
.
file_relative_path
==
'README.md'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录