Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
0040764d
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0040764d
编写于
5月 16, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
5月 16, 2020
浏览文件
操作
浏览文件
下载
差异文件
!1100 delete some context param(enable_loop_sink,enable_mem_reuse,enable_auto_mixed_precision)
Merge pull request !1100 from jinyaohui/context_opt_2
上级
e4121ffe
2907cf44
变更
44
隐藏空白更改
内联
并排
Showing
44 changed file
with
61 addition
and
149 deletion
+61
-149
example/alexnet_cifar10/eval.py
example/alexnet_cifar10/eval.py
+1
-1
example/alexnet_cifar10/train.py
example/alexnet_cifar10/train.py
+1
-1
example/bert_clue/README.md
example/bert_clue/README.md
+1
-4
example/bert_clue/finetune.py
example/bert_clue/finetune.py
+1
-2
example/bert_clue/run_distribute_pretrain.sh
example/bert_clue/run_distribute_pretrain.sh
+0
-2
example/bert_clue/run_pretrain.py
example/bert_clue/run_pretrain.py
+0
-4
example/bert_clue/run_standalone_pretrain.sh
example/bert_clue/run_standalone_pretrain.sh
+0
-2
example/googlenet_cifar10/eval.py
example/googlenet_cifar10/eval.py
+0
-1
example/googlenet_cifar10/train.py
example/googlenet_cifar10/train.py
+0
-2
example/lenet_mnist/eval.py
example/lenet_mnist/eval.py
+1
-1
example/lenet_mnist/train.py
example/lenet_mnist/train.py
+1
-1
example/mobilenetv2_imagenet2012/eval.py
example/mobilenetv2_imagenet2012/eval.py
+0
-2
example/mobilenetv2_imagenet2012/train.py
example/mobilenetv2_imagenet2012/train.py
+0
-2
example/resnet101_imagenet2012/eval.py
example/resnet101_imagenet2012/eval.py
+0
-2
example/resnet101_imagenet2012/train.py
example/resnet101_imagenet2012/train.py
+0
-2
example/resnet50_cifar10/eval.py
example/resnet50_cifar10/eval.py
+0
-2
example/resnet50_cifar10/train.py
example/resnet50_cifar10/train.py
+0
-2
example/resnet50_imagenet2012/eval.py
example/resnet50_imagenet2012/eval.py
+1
-3
example/resnet50_imagenet2012/train.py
example/resnet50_imagenet2012/train.py
+1
-3
example/ssd_coco2017/eval.py
example/ssd_coco2017/eval.py
+0
-1
example/ssd_coco2017/train.py
example/ssd_coco2017/train.py
+0
-1
example/vgg16_cifar10/eval.py
example/vgg16_cifar10/eval.py
+0
-1
example/vgg16_cifar10/train.py
example/vgg16_cifar10/train.py
+0
-2
example/yolov3_coco2017/eval.py
example/yolov3_coco2017/eval.py
+0
-1
example/yolov3_coco2017/train.py
example/yolov3_coco2017/train.py
+0
-1
mindspore/ccsrc/debug/e2e_dump.cc
mindspore/ccsrc/debug/e2e_dump.cc
+4
-0
mindspore/ccsrc/pipeline/init.cc
mindspore/ccsrc/pipeline/init.cc
+0
-8
mindspore/ccsrc/utils/context/ms_context.h
mindspore/ccsrc/utils/context/ms_context.h
+0
-6
mindspore/context.py
mindspore/context.py
+13
-52
mindspore/train/dataset_helper.py
mindspore/train/dataset_helper.py
+17
-12
mindspore/train/model.py
mindspore/train/model.py
+2
-5
tests/st/auto_parallel/onehot_model_parallel.py
tests/st/auto_parallel/onehot_model_parallel.py
+0
-1
tests/st/auto_parallel/soft_entropy_loss_expand_parallel.py
tests/st/auto_parallel/soft_entropy_loss_expand_parallel.py
+0
-1
tests/st/auto_parallel/test_resnet50_expand_loss_2p.py
tests/st/auto_parallel/test_resnet50_expand_loss_2p.py
+0
-1
tests/st/mem_reuse/resnet_cifar_memreuse.py
tests/st/mem_reuse/resnet_cifar_memreuse.py
+0
-2
tests/st/mem_reuse/resnet_cifar_normal.py
tests/st/mem_reuse/resnet_cifar_normal.py
+0
-2
tests/st/networks/models/bert/bert_tdt_lossscale.py
tests/st/networks/models/bert/bert_tdt_lossscale.py
+0
-2
tests/st/networks/test_gpu_lenet.py
tests/st/networks/test_gpu_lenet.py
+1
-1
tests/st/tbe_networks/export_geir.py
tests/st/tbe_networks/export_geir.py
+1
-1
tests/st/tbe_networks/resnet_cifar.py
tests/st/tbe_networks/resnet_cifar.py
+0
-2
tests/st/tbe_networks/test_resnet_cifar_1p.py
tests/st/tbe_networks/test_resnet_cifar_1p.py
+0
-4
tests/st/tbe_networks/test_resnet_cifar_8p.py
tests/st/tbe_networks/test_resnet_cifar_8p.py
+0
-2
tests/ut/python/parallel/test_auto_parallel_resnet.py
tests/ut/python/parallel/test_auto_parallel_resnet.py
+0
-1
tests/ut/python/pynative_mode/test_context.py
tests/ut/python/pynative_mode/test_context.py
+15
-0
未找到文件。
example/alexnet_cifar10/eval.py
浏览文件 @
0040764d
...
...
@@ -39,7 +39,7 @@ if __name__ == "__main__":
parser
.
add_argument
(
'--dataset_sink_mode'
,
type
=
bool
,
default
=
False
,
help
=
'dataset_sink_mode is False or True'
)
args
=
parser
.
parse_args
()
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args
.
device_target
,
enable_mem_reuse
=
False
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args
.
device_target
)
network
=
AlexNet
(
cfg
.
num_classes
)
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
,
reduction
=
"mean"
)
...
...
example/alexnet_cifar10/train.py
浏览文件 @
0040764d
...
...
@@ -39,7 +39,7 @@ if __name__ == "__main__":
parser
.
add_argument
(
'--dataset_sink_mode'
,
type
=
bool
,
default
=
False
,
help
=
'dataset_sink_mode is False or True'
)
args
=
parser
.
parse_args
()
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args
.
device_target
,
enable_mem_reuse
=
False
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args
.
device_target
)
network
=
AlexNet
(
cfg
.
num_classes
)
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
,
reduction
=
"mean"
)
...
...
example/bert_clue/README.md
浏览文件 @
0040764d
...
...
@@ -46,8 +46,7 @@ This example implements pre-training, fine-tuning and evaluation of [BERT-base](
### Pre-Training
```
usage: run_pretrain.py [--distribute DISTRIBUTE] [--epoch_size N] [----device_num N] [--device_id N]
[--enable_task_sink ENABLE_TASK_SINK] [--enable_loop_sink ENABLE_LOOP_SINK]
[--enable_mem_reuse ENABLE_MEM_REUSE] [--enable_save_ckpt ENABLE_SAVE_CKPT]
[--enable_save_ckpt ENABLE_SAVE_CKPT]
[--enable_lossscale ENABLE_LOSSSCALE] [--do_shuffle DO_SHUFFLE]
[--enable_data_sink ENABLE_DATA_SINK] [--data_sink_steps N] [--checkpoint_path CHECKPOINT_PATH]
[--save_checkpoint_steps N] [--save_checkpoint_num N]
...
...
@@ -58,8 +57,6 @@ options:
--epoch_size epoch size: N, default is 1
--device_num number of used devices: N, default is 1
--device_id device id: N, default is 0
--enable_loop_sink enable loop sink: "true" | "false", default is "true"
--enable_mem_reuse enable memory reuse: "true" | "false", default is "true"
--enable_save_ckpt enable save checkpoint: "true" | "false", default is "true"
--enable_lossscale enable lossscale: "true" | "false", default is "true"
--do_shuffle enable shuffle: "true" | "false", default is "true"
...
...
example/bert_clue/finetune.py
浏览文件 @
0040764d
...
...
@@ -83,8 +83,7 @@ def test_train():
pytest -s finetune.py::test_train
'''
devid
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
device_id
=
devid
,
enable_mem_reuse
=
True
,
enable_task_sink
=
True
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
device_id
=
devid
)
#BertCLSTrain for classification
#BertNERTrain for sequence labeling
if
cfg
.
task
==
'NER'
:
...
...
example/bert_clue/run_distribute_pretrain.sh
浏览文件 @
0040764d
...
...
@@ -50,8 +50,6 @@ do
--epoch_size
=
$EPOCH_SIZE
\
--device_id
=
$DEVICE_ID
\
--device_num
=
$RANK_SIZE
\
--enable_loop_sink
=
"true"
\
--enable_mem_reuse
=
"true"
\
--enable_save_ckpt
=
"true"
\
--enable_lossscale
=
"true"
\
--do_shuffle
=
"true"
\
...
...
example/bert_clue/run_pretrain.py
浏览文件 @
0040764d
...
...
@@ -59,8 +59,6 @@ def run_pretrain():
parser
.
add_argument
(
"--epoch_size"
,
type
=
int
,
default
=
"1"
,
help
=
"Epoch size, default is 1."
)
parser
.
add_argument
(
"--device_id"
,
type
=
int
,
default
=
0
,
help
=
"Device id, default is 0."
)
parser
.
add_argument
(
"--device_num"
,
type
=
int
,
default
=
1
,
help
=
"Use device nums, default is 1."
)
parser
.
add_argument
(
"--enable_loop_sink"
,
type
=
str
,
default
=
"true"
,
help
=
"Enable loop sink, default is true."
)
parser
.
add_argument
(
"--enable_mem_reuse"
,
type
=
str
,
default
=
"true"
,
help
=
"Enable mem reuse, default is true."
)
parser
.
add_argument
(
"--enable_save_ckpt"
,
type
=
str
,
default
=
"true"
,
help
=
"Enable save checkpoint, default is true."
)
parser
.
add_argument
(
"--enable_lossscale"
,
type
=
str
,
default
=
"true"
,
help
=
"Use lossscale or not, default is not."
)
parser
.
add_argument
(
"--do_shuffle"
,
type
=
str
,
default
=
"true"
,
help
=
"Enable shuffle for dataset, default is true."
)
...
...
@@ -75,8 +73,6 @@ def run_pretrain():
args_opt
=
parser
.
parse_args
()
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
device_id
=
args_opt
.
device_id
)
context
.
set_context
(
enable_loop_sink
=
(
args_opt
.
enable_loop_sink
==
"true"
),
enable_mem_reuse
=
(
args_opt
.
enable_mem_reuse
==
"true"
))
context
.
set_context
(
reserve_class_name_in_scope
=
False
)
if
args_opt
.
distribute
==
"true"
:
...
...
example/bert_clue/run_standalone_pretrain.sh
浏览文件 @
0040764d
...
...
@@ -29,8 +29,6 @@ python run_pretrain.py \
--distribute
=
"false"
\
--epoch_size
=
$EPOCH_SIZE
\
--device_id
=
$DEVICE_ID
\
--enable_loop_sink
=
"true"
\
--enable_mem_reuse
=
"true"
\
--enable_save_ckpt
=
"true"
\
--enable_lossscale
=
"true"
\
--do_shuffle
=
"true"
\
...
...
example/googlenet_cifar10/eval.py
浏览文件 @
0040764d
...
...
@@ -40,7 +40,6 @@ if __name__ == '__main__':
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args_opt
.
device_target
)
context
.
set_context
(
device_id
=
args_opt
.
device_id
)
context
.
set_context
(
enable_mem_reuse
=
True
)
net
=
GooGLeNet
(
num_classes
=
cfg
.
num_classes
)
opt
=
Momentum
(
filter
(
lambda
x
:
x
.
requires_grad
,
net
.
get_parameters
()),
0.01
,
cfg
.
momentum
,
...
...
example/googlenet_cifar10/train.py
浏览文件 @
0040764d
...
...
@@ -70,8 +70,6 @@ if __name__ == '__main__':
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args_opt
.
device_target
)
context
.
set_context
(
device_id
=
args_opt
.
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
device_num
=
int
(
os
.
environ
.
get
(
"DEVICE_NUM"
,
1
))
if
device_num
>
1
:
...
...
example/lenet_mnist/eval.py
浏览文件 @
0040764d
...
...
@@ -43,7 +43,7 @@ if __name__ == "__main__":
args
=
parser
.
parse_args
()
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args
.
device_target
,
enable_mem_reuse
=
False
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args
.
device_target
)
network
=
LeNet5
(
cfg
.
num_classes
)
net_loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
,
reduction
=
"mean"
)
...
...
example/lenet_mnist/train.py
浏览文件 @
0040764d
...
...
@@ -40,7 +40,7 @@ if __name__ == "__main__":
args
=
parser
.
parse_args
()
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args
.
device_target
,
enable_mem_reuse
=
False
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args
.
device_target
)
network
=
LeNet5
(
cfg
.
num_classes
)
net_loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
,
reduction
=
"mean"
)
...
...
example/mobilenetv2_imagenet2012/eval.py
浏览文件 @
0040764d
...
...
@@ -34,8 +34,6 @@ args_opt = parser.parse_args()
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
device_id
=
device_id
,
save_graphs
=
False
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
if
__name__
==
'__main__'
:
loss
=
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
,
reduction
=
'mean'
)
...
...
example/mobilenetv2_imagenet2012/train.py
浏览文件 @
0040764d
...
...
@@ -56,8 +56,6 @@ rank_size = int(os.getenv('RANK_SIZE'))
run_distribute
=
rank_size
>
1
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
device_id
=
device_id
,
save_graphs
=
False
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
class
CrossEntropyWithLabelSmooth
(
_Loss
):
"""
...
...
example/resnet101_imagenet2012/eval.py
浏览文件 @
0040764d
...
...
@@ -46,8 +46,6 @@ args_opt = parser.parse_args()
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
,
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
if
__name__
==
'__main__'
:
if
not
args_opt
.
do_eval
and
args_opt
.
run_distribute
:
...
...
example/resnet101_imagenet2012/train.py
浏览文件 @
0040764d
...
...
@@ -49,8 +49,6 @@ args_opt = parser.parse_args()
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
,
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
if
__name__
==
'__main__'
:
if
not
args_opt
.
do_eval
and
args_opt
.
run_distribute
:
...
...
example/resnet50_cifar10/eval.py
浏览文件 @
0040764d
...
...
@@ -40,8 +40,6 @@ device_id = int(os.getenv('DEVICE_ID'))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
)
context
.
set_context
(
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
if
__name__
==
'__main__'
:
if
not
args_opt
.
do_eval
and
args_opt
.
run_distribute
:
...
...
example/resnet50_cifar10/train.py
浏览文件 @
0040764d
...
...
@@ -43,8 +43,6 @@ device_id = int(os.getenv('DEVICE_ID'))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
)
context
.
set_context
(
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
if
__name__
==
'__main__'
:
if
not
args_opt
.
do_eval
and
args_opt
.
run_distribute
:
...
...
example/resnet50_imagenet2012/eval.py
浏览文件 @
0040764d
...
...
@@ -37,9 +37,7 @@ args_opt = parser.parse_args()
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
)
context
.
set_context
(
enable_task_sink
=
True
,
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
context
.
set_context
(
device_id
=
device_id
)
if
__name__
==
'__main__'
:
...
...
example/resnet50_imagenet2012/train.py
浏览文件 @
0040764d
...
...
@@ -44,9 +44,7 @@ args_opt = parser.parse_args()
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
)
context
.
set_context
(
enable_task_sink
=
True
,
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
context
.
set_context
(
device_id
=
device_id
)
if
__name__
==
'__main__'
:
if
not
args_opt
.
do_eval
and
args_opt
.
run_distribute
:
...
...
example/ssd_coco2017/eval.py
浏览文件 @
0040764d
...
...
@@ -71,7 +71,6 @@ if __name__ == '__main__':
args_opt
=
parser
.
parse_args
()
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
device_id
=
args_opt
.
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
,
enable_mem_reuse
=
True
)
config
=
ConfigSSD
()
prefix
=
"ssd_eval.mindrecord"
...
...
example/ssd_coco2017/train.py
浏览文件 @
0040764d
...
...
@@ -93,7 +93,6 @@ def main():
args_opt
=
parser
.
parse_args
()
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
device_id
=
args_opt
.
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
,
enable_mem_reuse
=
True
)
if
args_opt
.
distribute
:
device_num
=
args_opt
.
device_num
...
...
example/vgg16_cifar10/eval.py
浏览文件 @
0040764d
...
...
@@ -37,7 +37,6 @@ if __name__ == '__main__':
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args_opt
.
device_target
)
context
.
set_context
(
device_id
=
args_opt
.
device_id
)
context
.
set_context
(
enable_mem_reuse
=
True
)
net
=
vgg16
(
num_classes
=
cfg
.
num_classes
)
opt
=
Momentum
(
filter
(
lambda
x
:
x
.
requires_grad
,
net
.
get_parameters
()),
0.01
,
cfg
.
momentum
,
...
...
example/vgg16_cifar10/train.py
浏览文件 @
0040764d
...
...
@@ -64,8 +64,6 @@ if __name__ == '__main__':
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
args_opt
.
device_target
)
context
.
set_context
(
device_id
=
args_opt
.
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
device_num
=
int
(
os
.
environ
.
get
(
"DEVICE_NUM"
,
1
))
if
device_num
>
1
:
...
...
example/yolov3_coco2017/eval.py
浏览文件 @
0040764d
...
...
@@ -82,7 +82,6 @@ if __name__ == '__main__':
args_opt
=
parser
.
parse_args
()
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
device_id
=
args_opt
.
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
,
enable_mem_reuse
=
True
)
# It will generate mindrecord file in args_opt.mindrecord_dir,
# and the file name is yolo.mindrecord0, 1, ... file_num.
...
...
example/yolov3_coco2017/train.py
浏览文件 @
0040764d
...
...
@@ -84,7 +84,6 @@ def main():
args_opt
=
parser
.
parse_args
()
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
device_id
=
args_opt
.
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
,
enable_mem_reuse
=
True
)
if
args_opt
.
distribute
:
device_num
=
args_opt
.
device_num
context
.
reset_auto_parallel_context
()
...
...
mindspore/ccsrc/debug/e2e_dump.cc
浏览文件 @
0040764d
...
...
@@ -107,6 +107,10 @@ bool Dump::IsConfigValid(const nlohmann::json &dumpSettings) {
}
dump_enable_
=
enable
;
auto
context_ptr
=
MsContext
::
GetInstance
();
MS_EXCEPTION_IF_NULL
(
context_ptr
);
// dump_enable_ is true, close mem reuse
context_ptr
->
set_enable_mem_reuse
(
!
dump_enable_
);
trans_flag_
=
trans_flag
;
dump_mode_
=
mode
;
dump_path_
=
path
;
...
...
mindspore/ccsrc/pipeline/init.cc
浏览文件 @
0040764d
...
...
@@ -117,20 +117,12 @@ PYBIND11_MODULE(_c_expression, m) {
.
def
(
"close_tsd"
,
&
mindspore
::
MsContext
::
CloseTsd
,
"Close tdt dataset client."
)
.
def
(
"get_save_graphs_flag"
,
&
mindspore
::
MsContext
::
save_graphs_flag
,
"Get whether to save graphs."
)
.
def
(
"set_save_graphs_flag"
,
&
mindspore
::
MsContext
::
set_save_graphs_flag
,
"Set whether to save graphs."
)
.
def
(
"get_auto_mixed_precision_flag"
,
&
mindspore
::
MsContext
::
auto_mixed_precision_flag
,
"Get whether to enable auto mixed precision."
)
.
def
(
"set_auto_mixed_precision_flag"
,
&
mindspore
::
MsContext
::
set_auto_mixed_precision_flag
,
"Set whether to enable auto mixed precision."
)
.
def
(
"get_enable_reduce_precision_flag"
,
&
mindspore
::
MsContext
::
enable_reduce_precision
,
"Get whether to enable reduce precision."
)
.
def
(
"set_enable_reduce_precision_flag"
,
&
mindspore
::
MsContext
::
set_enable_reduce_precision
,
"Set whether to enable reduce precision."
)
.
def
(
"get_save_graphs_path"
,
&
mindspore
::
MsContext
::
save_graphs_path
,
"Get save graphs path."
)
.
def
(
"set_save_graphs_path"
,
&
mindspore
::
MsContext
::
set_save_graphs_path
,
"Set save graphs path."
)
.
def
(
"get_loop_sink_flag"
,
&
mindspore
::
MsContext
::
loop_sink_flag
,
"Get whether to enable loop sink."
)
.
def
(
"set_loop_sink_flag"
,
&
mindspore
::
MsContext
::
set_loop_sink_flag
,
"Set whether to enable loop sink."
)
.
def
(
"get_enable_mem_reuse"
,
&
mindspore
::
MsContext
::
enable_mem_reuse
,
"Get whether to enable mem reuse."
)
.
def
(
"set_enable_mem_reuse"
,
&
mindspore
::
MsContext
::
set_enable_mem_reuse
,
"Set whether to enable mem reuse."
)
.
def
(
"get_save_ms_model_flag"
,
&
mindspore
::
MsContext
::
save_ms_model_flag
,
"Get whether to save ms model."
)
.
def
(
"set_save_ms_model_flag"
,
&
mindspore
::
MsContext
::
set_save_ms_model_flag
,
"Set whether to save ms model."
)
.
def
(
"get_save_ms_model_path"
,
&
mindspore
::
MsContext
::
save_ms_model_path
,
"Get path to save ms model."
)
...
...
mindspore/ccsrc/utils/context/ms_context.h
浏览文件 @
0040764d
...
...
@@ -91,7 +91,6 @@ class MsContext {
bool
ir_fusion_flag
()
const
{
return
ir_fusion_flag_
;
}
void
set_loop_sink_flag
(
bool
loop_sink_flag
)
{
enable_loop_sink_
=
loop_sink_flag
;
}
bool
loop_sink_flag
()
const
{
return
enable_loop_sink_
;
}
void
set_enable_mem_reuse
(
bool
enable_mem_reuse
)
{
enable_mem_reuse_
=
enable_mem_reuse
;
}
...
...
@@ -106,11 +105,6 @@ class MsContext {
void
set_enable_gpu_summary
(
bool
enable_gpu_summary
)
{
enable_gpu_summary_
=
enable_gpu_summary
;
}
bool
enable_gpu_summary
()
const
{
return
enable_gpu_summary_
;
}
void
set_auto_mixed_precision_flag
(
bool
auto_mixed_precision_flag
)
{
auto_mixed_precision_flag_
=
auto_mixed_precision_flag
;
}
bool
auto_mixed_precision_flag
()
const
{
return
auto_mixed_precision_flag_
;
}
void
set_enable_reduce_precision
(
bool
flag
)
{
enable_reduce_precision_
=
flag
;
}
bool
enable_reduce_precision
()
const
{
return
enable_reduce_precision_
;
}
...
...
mindspore/context.py
浏览文件 @
0040764d
...
...
@@ -31,6 +31,8 @@ __all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'set_context', 'get_context', 'set_aut
GRAPH_MODE
=
0
PYNATIVE_MODE
=
1
# The max memory size of graph plus variable.
_DEVICE_APP_MEMORY_SIZE
=
31
def
_make_directory
(
path
):
...
...
@@ -215,22 +217,6 @@ class _Context:
if
not
success
:
raise
RuntimeError
(
"Device id set failed!!!"
)
@
property
def
enable_loop_sink
(
self
):
return
self
.
_context_handle
.
get_loop_sink_flag
()
@
enable_loop_sink
.
setter
def
enable_loop_sink
(
self
,
enable_loop_sink
):
self
.
_context_handle
.
set_loop_sink_flag
(
enable_loop_sink
)
@
property
def
enable_mem_reuse
(
self
):
return
self
.
_context_handle
.
get_enable_mem_reuse
()
@
enable_mem_reuse
.
setter
def
enable_mem_reuse
(
self
,
enable_mem_reuse
):
self
.
_context_handle
.
set_enable_mem_reuse
(
enable_mem_reuse
)
@
property
def
save_ms_model
(
self
):
return
self
.
_context_handle
.
get_save_ms_model_flag
()
...
...
@@ -247,14 +233,6 @@ class _Context:
def
save_ms_model_path
(
self
,
save_ms_model_path
):
self
.
_context_handle
.
set_save_ms_model_path
(
save_ms_model_path
)
@
property
def
enable_auto_mixed_precision
(
self
):
return
self
.
_context_handle
.
get_auto_mixed_precision_flag
()
@
enable_auto_mixed_precision
.
setter
def
enable_auto_mixed_precision
(
self
,
enable_auto_mixed_precision
):
self
.
_context_handle
.
set_auto_mixed_precision_flag
(
enable_auto_mixed_precision
)
@
property
def
enable_reduce_precision
(
self
):
return
self
.
_context_handle
.
get_enable_reduce_precision_flag
()
...
...
@@ -309,29 +287,21 @@ class _Context:
"""Sets whether to save the network class name in the scope."""
self
.
_thread_local_info
.
reserve_class_name_in_scope
=
reserve_class_name_in_scope
@
property
def
graph_memory_max_size
(
self
):
return
None
@
graph_memory_max_size
.
setter
def
graph_memory_max_size
(
self
,
graph_memory_max_size
):
if
check_input_format
(
graph_memory_max_size
):
graph_memory_max_size_
=
graph_memory_max_size
[:
-
2
]
+
" * 1024 * 1024 * 1024"
self
.
_context_handle
.
set_graph_memory_max_size
(
graph_memory_max_size_
)
else
:
raise
ValueError
(
"Context param graph_memory_max_size should be in correct format! Such as
\"
26GB
\"
"
)
@
property
def
variable_memory_max_size
(
self
):
return
None
@
variable_memory_max_size
.
setter
def
variable_memory_max_size
(
self
,
variable_memory_max_size
):
if
check_input_format
(
variable_memory_max_size
):
variable_memory_max_size_
=
variable_memory_max_size
[:
-
2
]
+
" * 1024 * 1024 * 1024"
self
.
_context_handle
.
set_variable_memory_max_size
(
variable_memory_max_size_
)
else
:
if
not
check_input_format
(
variable_memory_max_size
):
raise
ValueError
(
"Context param variable_memory_max_size should be in correct format! Such as
\"
5GB
\"
"
)
if
int
(
variable_memory_max_size
[:
-
2
])
>=
_DEVICE_APP_MEMORY_SIZE
:
raise
ValueError
(
"Context param variable_memory_max_size should be less than 31GB."
)
variable_memory_max_size_
=
variable_memory_max_size
[:
-
2
]
+
" * 1024 * 1024 * 1024"
graph_memory_max_size
=
_DEVICE_APP_MEMORY_SIZE
-
int
(
variable_memory_max_size
[:
-
2
])
graph_memory_max_size_
=
str
(
graph_memory_max_size
)
+
" * 1024 * 1024 * 1024"
self
.
_context_handle
.
set_variable_memory_max_size
(
variable_memory_max_size_
)
self
.
_context_handle
.
set_graph_memory_max_size
(
graph_memory_max_size_
)
@
property
def
enable_ge
(
self
):
...
...
@@ -469,10 +439,9 @@ def reset_auto_parallel_context():
@
args_type_check
(
mode
=
int
,
precompile_only
=
bool
,
device_target
=
str
,
device_id
=
int
,
save_graphs
=
bool
,
save_graphs_path
=
str
,
enable_loop_sink
=
bool
,
enable_mem_reuse
=
bool
,
save_ms_model
=
bool
,
save_ms_model_path
=
str
,
enable_auto_mixed_precision
=
bool
,
enable_dump
=
bool
,
save_dump_path
=
str
,
enable_reduce_precision
=
bool
,
graph_memory_max_size
=
str
,
variable_memory_max_size
=
str
,
enable_profiling
=
bool
,
profiling_options
=
str
)
save_graphs_path
=
str
,
save_ms_model
=
bool
,
save_ms_model_path
=
str
,
enable_dump
=
bool
,
save_dump_path
=
str
,
enable_reduce_precision
=
bool
,
variable_memory_max_size
=
str
,
enable_profiling
=
bool
,
profiling_options
=
str
)
def
set_context
(
**
kwargs
):
"""
Sets context for running environment.
...
...
@@ -490,8 +459,6 @@ def set_context(**kwargs):
Note:
Attribute name is required for setting attributes.
If need to config graph max memory size and variable max memory size, one must make sure:
The sum of graph_memory_max_size and variable_memory_max_size should be less than total memory size of
a device, while the total memory is supposed to be no more than 256GB.
Args:
mode (int): Running in GRAPH_MODE(0) or PYNATIVE_MODE(1). Default: PYNATIVE_MODE.
...
...
@@ -499,19 +466,15 @@ def set_context(**kwargs):
device_id (int): Id of target device, the value must be in [0, device_num_per_host-1],
while device_num_per_host should no more than 4096. Default: 0.
save_graphs (bool): Whether to save graphs. Default: False.
enable_loop_sink (bool): Whether to enable loop sink. Default: True.
enable_mem_reuse (bool): Whether to enable memory reuse. Default: True.
save_ms_model (bool): Whether to save lite model converted by graph. Default: False.
save_ms_model_path (str): Path to save converted lite model. Default: "."
save_graphs_path (str): Path to save graphs. Default: "."
enable_auto_mixed_precision (bool): Whether to enable auto mixed precision. Default: True.
reserve_class_name_in_scope (bool) : Whether to save the network class name in the scope. Default: True.
enable_reduce_precision (bool): Whether to enable precision reduction. Default: True.
enable_dump (bool): Whether to enable dump. Default: False.
save_dump_path (str): When the program is executed on Ascend, operators can dump data here.
The root dump path is configured in /home/HwHiAiUser/ide_daemon/ide_daemon.cfg.
So the real dump path is "{configured root dump path}/{`save_dump_path`}". Default: ".".
graph_memory_max_size (str): Sets graph memory max size. Default: "26GB".
variable_memory_max_size (str): Sets variable memory max size. Default: "5GB".
enable_profiling (bool): Whether to open profiling. Default: False.
profiling_options (str): Sets profiling collection options, operators can profiling data here.
...
...
@@ -538,12 +501,10 @@ def set_context(**kwargs):
>>> context.set_context(device_target="Ascend")
>>> context.set_context(device_id=0)
>>> context.set_context(save_graphs=True, save_graphs_path="./model.ms")
>>> context.set_context(enable_mem_reuse=True)
>>> context.set_context(enable_reduce_precision=True)
>>> context.set_context(save_ms_model=True, save_ms_model_path=".")
>>> context.set_context(enable_dump=True, save_dump_path=".")
>>> context.set_context(reserve_class_name_in_scope=True)
>>> context.set_context(graph_memory_max_size="25GB")
>>> context.set_context(variable_memory_max_size="6GB")
>>> context.set_context(mode=context.GRAPH_MODE,
>>> device_target="Ascend",device_id=0, save_graphs=True,
...
...
mindspore/train/dataset_helper.py
浏览文件 @
0040764d
...
...
@@ -44,15 +44,18 @@ class DatasetHelper:
def
__init__
(
self
,
dataset
,
dataset_sink_mode
=
True
):
check_bool
(
dataset_sink_mode
)
iterclass
=
_DatasetIterGE
if
not
dataset_sink_mode
:
iterclass
=
_DatasetIterFeed
elif
not
context
.
get_context
(
"enable_ge"
):
if
context
.
get_context
(
"enable_loop_sink"
):
iterclass
=
_DatasetIterMSLoopSink
if
dataset_sink_mode
:
if
context
.
get_context
(
"enable_ge"
):
iterclass
=
_DatasetIterGE
else
:
iterclass
=
_DatasetIterMS
if
context
.
get_context
(
"device_target"
)
==
"Ascend"
:
iterclass
=
_DatasetIterMSLoopSink
elif
context
.
get_context
(
"device_target"
)
==
"GPU"
:
iterclass
=
_DatasetIterMS
elif
context
.
get_context
(
"device_target"
)
==
"CPU"
:
raise
RuntimeError
(
"Currently dataset sink mode is not supported when the device target is CPU."
)
else
:
iterclass
=
_DatasetIterFeed
self
.
iter
=
iterclass
(
dataset
)
def
__iter__
(
self
):
...
...
@@ -104,12 +107,12 @@ class _DatasetIter:
if
dataset
.
get_dataset_size
()
%
loop_size
!=
0
:
raise
ValueError
(
f
'Dataset size
{
dataset
.
get_dataset_size
()
}
and '
f
'loop_size
{
loop_size
}
are not matched.'
)
loop_count
=
int
(
dataset
.
get_dataset_size
()
/
loop_size
)
loop_count
=
int
(
dataset
.
get_dataset_size
()
/
loop_size
)
return
loop_count
class
_DatasetIterMSLoopSink
(
_DatasetIter
):
"""Iter for context (
enable_loop_sink=True
)"""
"""Iter for context (
device_target=Ascend
)"""
def
__init__
(
self
,
dataset
):
super
(
_DatasetIterMSLoopSink
,
self
).
__init__
(
dataset
)
self
.
loop_count
=
self
.
get_loop_count
(
dataset
)
...
...
@@ -122,11 +125,12 @@ class _DatasetIterMSLoopSink(_DatasetIter):
def
op
():
return
tuple
()
self
.
op
=
op
class
_DatasetIterMS
(
_DatasetIter
):
"""Iter for context (
enable_loop_sink=False
)"""
"""Iter for context (
device_target=GPU
)"""
def
__init__
(
self
,
dataset
):
super
(
_DatasetIterMS
,
self
).
__init__
(
dataset
)
self
.
loop_count
=
dataset
.
get_dataset_size
()
...
...
@@ -149,11 +153,12 @@ class _DatasetIterGE(_DatasetIter):
def
op
():
return
tensor_list_run
self
.
op
=
op
class
_DatasetIterFeed
:
"""Iter for
feed data
"""
"""Iter for
normal(non sink) mode, feed the data from host.
"""
def
__init__
(
self
,
dataset
):
self
.
dataset
=
dataset
self
.
device_num
=
_get_device_num
()
...
...
mindspore/train/model.py
浏览文件 @
0040764d
...
...
@@ -279,7 +279,7 @@ class Model:
"""
# remove later to deal with loop sink
need_wrap
=
False
if
not
hasattr
(
train_dataset
,
'__ME_INITED__'
)
and
context
.
get_context
(
"
enable_loop_sink"
)
\
if
not
hasattr
(
train_dataset
,
'__ME_INITED__'
)
and
context
.
get_context
(
"
device_target"
)
==
"Ascend"
\
and
not
context
.
get_context
(
"enable_ge"
):
need_wrap
=
True
...
...
@@ -420,9 +420,6 @@ class Model:
_device_number_check
(
self
.
_parallel_mode
,
self
.
_device_number
)
_parameter_broadcast_check
(
self
.
_parallel_mode
,
self
.
_parameter_broadcast
)
if
context
.
get_context
(
"device_target"
)
in
[
"CPU"
,
"GPU"
]
and
context
.
get_context
(
"enable_loop_sink"
):
raise
ValueError
(
"CPU and GPU can't support loop sink, please set enable_loop_sink=False."
)
self
.
_train
(
epoch
,
train_dataset
,
callbacks
=
callbacks
,
...
...
@@ -446,7 +443,7 @@ class Model:
# remove later to deal with loop sink
need_wrap
=
False
if
not
hasattr
(
valid_dataset
,
'__ME_INITED__'
)
and
context
.
get_context
(
"
enable_loop_sink"
)
\
if
not
hasattr
(
valid_dataset
,
'__ME_INITED__'
)
and
context
.
get_context
(
"
device_target"
)
==
"Ascend"
\
and
not
context
.
get_context
(
"enable_ge"
):
need_wrap
=
True
...
...
tests/st/auto_parallel/onehot_model_parallel.py
浏览文件 @
0040764d
...
...
@@ -34,7 +34,6 @@ def setup_module():
np
.
random
.
seed
(
0
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
context
.
set_context
(
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
False
)
distributedTool
.
init
()
device_num
=
distributedTool
.
get_group_size
()
rank_id
=
distributedTool
.
get_rank
()
...
...
tests/st/auto_parallel/soft_entropy_loss_expand_parallel.py
浏览文件 @
0040764d
...
...
@@ -47,7 +47,6 @@ def setup_module():
np
.
random
.
seed
(
0
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
context
.
set_context
(
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
False
)
distributedTool
.
init
()
rank_id
=
distributedTool
.
get_rank
()
device_num
=
distributedTool
.
get_group_size
()
...
...
tests/st/auto_parallel/test_resnet50_expand_loss_2p.py
浏览文件 @
0040764d
...
...
@@ -32,7 +32,6 @@ from mindspore.parallel import set_algo_parameters
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
context
.
set_context
(
device_id
=
int
(
os
.
getenv
(
'DEVICE_ID'
)))
context
.
set_context
(
enable_loop_sink
=
False
)
init
()
context
.
set_auto_parallel_context
(
mirror_mean
=
True
,
parallel_mode
=
ParallelMode
.
AUTO_PARALLEL
)
...
...
tests/st/mem_reuse/resnet_cifar_memreuse.py
浏览文件 @
0040764d
...
...
@@ -54,8 +54,6 @@ data_home = args_opt.dataset_path
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
context
.
set_context
(
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
def
create_dataset
(
repeat_num
=
1
,
training
=
True
):
...
...
tests/st/mem_reuse/resnet_cifar_normal.py
浏览文件 @
0040764d
...
...
@@ -54,8 +54,6 @@ data_home = args_opt.dataset_path
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
context
.
set_context
(
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
False
)
def
create_dataset
(
repeat_num
=
1
,
training
=
True
):
...
...
tests/st/networks/models/bert/bert_tdt_lossscale.py
浏览文件 @
0040764d
...
...
@@ -127,8 +127,6 @@ class ModelCallback(Callback):
def
test_bert_tdt
():
"""test bert tdt"""
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
reserve_class_name_in_scope
=
False
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
ds
=
me_de_train_dataset
()
version
=
os
.
getenv
(
'VERSION'
,
'large'
)
batch_size
=
int
(
os
.
getenv
(
'BATCH_SIZE'
,
'16'
))
...
...
tests/st/networks/test_gpu_lenet.py
浏览文件 @
0040764d
...
...
@@ -141,7 +141,7 @@ def create_dataset(data_path, batch_size=32, repeat_size=1,
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
env_onecard
def
test_train_and_eval_lenet
():
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
,
enable_mem_reuse
=
False
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
network
=
LeNet5
(
10
)
net_loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
,
reduction
=
"mean"
)
net_opt
=
nn
.
Momentum
(
network
.
trainable_params
(),
0.01
,
0.9
)
...
...
tests/st/tbe_networks/export_geir.py
浏览文件 @
0040764d
...
...
@@ -20,7 +20,7 @@ from mindspore import Tensor
from
mindspore.train.serialization
import
save
,
load
,
_check_filedir_or_create
,
_chg_model_file_name_if_same_exist
,
\
_read_file_last_line
,
context
,
export
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
enable_loop_sink
=
True
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
def
test_resnet50_export
(
batch_size
=
1
,
num_classes
=
5
):
...
...
tests/st/tbe_networks/resnet_cifar.py
浏览文件 @
0040764d
...
...
@@ -55,8 +55,6 @@ data_home = args_opt.dataset_path
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
context
.
set_context
(
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
def
create_dataset
(
repeat_num
=
1
,
training
=
True
):
...
...
tests/st/tbe_networks/test_resnet_cifar_1p.py
浏览文件 @
0040764d
...
...
@@ -138,8 +138,6 @@ def train_process(device_id, epoch_size, num_classes, device_num, batch_size):
os
.
chdir
(
str
(
device_id
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
context
.
set_context
(
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
net
=
resnet50
(
batch_size
,
num_classes
)
loss
=
CrossEntropyLoss
()
...
...
@@ -160,8 +158,6 @@ def train_process(device_id, epoch_size, num_classes, device_num, batch_size):
def
eval
(
batch_size
,
num_classes
):
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
context
.
set_context
(
device_id
=
0
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
net
=
resnet50
(
batch_size
,
num_classes
)
loss
=
CrossEntropyLoss
()
...
...
tests/st/tbe_networks/test_resnet_cifar_8p.py
浏览文件 @
0040764d
...
...
@@ -148,8 +148,6 @@ def train_process(q, device_id, epoch_size, num_classes, device_num, batch_size,
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
False
)
context
.
set_context
(
device_id
=
device_id
)
context
.
set_context
(
enable_loop_sink
=
True
)
context
.
set_context
(
enable_mem_reuse
=
True
)
os
.
environ
[
'MINDSPORE_HCCL_CONFIG_PATH'
]
=
MINDSPORE_HCCL_CONFIG_PATH
os
.
environ
[
'RANK_ID'
]
=
str
(
device_id
)
os
.
environ
[
'RANK_SIZE'
]
=
str
(
device_num
)
...
...
tests/ut/python/parallel/test_auto_parallel_resnet.py
浏览文件 @
0040764d
...
...
@@ -32,7 +32,6 @@ from mindspore.parallel import _cost_model_context as cost_model_context
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
context
.
set_context
(
device_id
=
0
)
context
.
set_context
(
enable_loop_sink
=
False
)
init
()
...
...
tests/ut/python/pynative_mode/test_context.py
浏览文件 @
0040764d
...
...
@@ -102,6 +102,21 @@ def test_profiling_options():
assert
context
.
get_context
(
"profiling_options"
)
==
"training_trace:task_trace"
def
test_variable_memory_max_size
():
"""test_variable_memory_max_size"""
with
pytest
.
raises
(
TypeError
):
context
.
set_context
(
variable_memory_max_size
=
True
)
with
pytest
.
raises
(
TypeError
):
context
.
set_context
(
variable_memory_max_size
=
1
)
with
pytest
.
raises
(
ValueError
):
context
.
set_context
(
variable_memory_max_size
=
""
)
with
pytest
.
raises
(
ValueError
):
context
.
set_context
(
variable_memory_max_size
=
"1G"
)
with
pytest
.
raises
(
ValueError
):
context
.
set_context
(
variable_memory_max_size
=
"31GB"
)
context
.
set_context
(
variable_memory_max_size
=
"3GB"
)
def
test_set_context
():
""" test_set_context """
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录