Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
3ad3a71f
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3ad3a71f
编写于
6月 18, 2020
作者:
H
hongxing
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change interface
上级
ea87b6c4
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
29 addition
and
7 deletion
+29
-7
mindspore/context.py
mindspore/context.py
+8
-2
mindspore/parallel/_auto_parallel_context.py
mindspore/parallel/_auto_parallel_context.py
+21
-5
未找到文件。
mindspore/context.py
浏览文件 @
3ad3a71f
...
...
@@ -381,8 +381,8 @@ def _context():
@
args_type_check
(
device_num
=
int
,
global_rank
=
int
,
mirror_mean
=
bool
,
cast_before_mirror
=
bool
,
parallel_mode
=
str
,
parameter_broadcast
=
bool
,
strategy_ckpt_load_file
=
str
,
strategy_ckpt_save
_file
=
str
,
full_batch
=
bool
)
auto_parallel_search_mode
=
str
,
parameter_broadcast
=
bool
,
strategy_ckpt_load
_file
=
str
,
strategy_ckpt_save_file
=
str
,
full_batch
=
bool
)
def
set_auto_parallel_context
(
**
kwargs
):
"""
Set auto parallel context.
...
...
@@ -414,6 +414,12 @@ def set_auto_parallel_context(**kwargs):
setting parallel strategies.
- auto_parallel: Achieving parallelism automatically.
auto_parallel_search_mode (str): There are two kinds of search modes, "recursive_programming"
and "dynamic_programming".
- recursive_programming: Recursive programming search mode.
- dynamic_programming: Dynamic programming search mode.
parameter_broadcast (bool): Indicating whether to broadcast parameters before training.
"stand_alone", "semi_auto_parallel" and "auto_parallel" do not support parameter
broadcast. Default: False.
...
...
mindspore/parallel/_auto_parallel_context.py
浏览文件 @
3ad3a71f
...
...
@@ -185,13 +185,20 @@ class _AutoParallelContext:
self
.
check_context_handle
()
return
self
.
_context_handle
.
get_parallel_mode
()
def
set_strategy_search_mode
(
self
,
strategy_search_mode
):
def
set_strategy_search_mode
(
self
,
auto_parallel_search_mode
):
"""
Set search mode of strategy.
Args:
auto_parallel_search_mode (str): The search mode of strategy.
"""
self
.
check_context_handle
()
ret
=
self
.
_context_handle
.
set_strategy_search_mode
(
strategy
_search_mode
)
ret
=
self
.
_context_handle
.
set_strategy_search_mode
(
auto_parallel
_search_mode
)
if
ret
is
False
:
raise
ValueError
(
"Strategy search mode does not support {}"
.
format
(
strategy
_search_mode
))
raise
ValueError
(
"Strategy search mode does not support {}"
.
format
(
auto_parallel
_search_mode
))
def
get_strategy_search_mode
(
self
):
"""Get search mode of strategy."""
self
.
check_context_handle
()
return
self
.
_context_handle
.
get_strategy_search_mode
()
...
...
@@ -422,6 +429,7 @@ _set_auto_parallel_context_func_map = {
"cast_before_mirror"
:
auto_parallel_context
().
set_cast_before_mirror
,
"loss_repeated_mean"
:
auto_parallel_context
().
set_loss_repeated_mean
,
"parallel_mode"
:
auto_parallel_context
().
set_parallel_mode
,
"auto_parallel_search_mode"
:
auto_parallel_context
().
set_strategy_search_mode
,
"parameter_broadcast"
:
auto_parallel_context
().
set_parameter_broadcast
,
"strategy_ckpt_load_file"
:
auto_parallel_context
().
set_strategy_ckpt_load_file
,
"strategy_ckpt_save_file"
:
auto_parallel_context
().
set_strategy_ckpt_save_file
,
...
...
@@ -435,6 +443,7 @@ _get_auto_parallel_context_func_map = {
"cast_before_mirror"
:
auto_parallel_context
().
get_cast_before_mirror
,
"loss_repeated_mean"
:
auto_parallel_context
().
get_loss_repeated_mean
,
"parallel_mode"
:
auto_parallel_context
().
get_parallel_mode
,
"auto_parallel_search_mode"
:
auto_parallel_context
().
get_strategy_search_mode
,
"parameter_broadcast"
:
auto_parallel_context
().
get_parameter_broadcast
,
"strategy_ckpt_load_file"
:
auto_parallel_context
().
get_strategy_ckpt_load_file
,
"strategy_ckpt_save_file"
:
auto_parallel_context
().
get_strategy_ckpt_save_file
,
...
...
@@ -442,8 +451,9 @@ _get_auto_parallel_context_func_map = {
@
args_type_check
(
device_num
=
int
,
global_rank
=
int
,
mirror_mean
=
bool
,
cast_before_mirror
=
bool
,
loss_repeated_mean
=
bool
,
parallel_mode
=
str
,
parameter_broadcast
=
bool
,
strategy_ckpt_load_file
=
str
,
strategy_ckpt_save_file
=
str
,
full_batch
=
bool
)
loss_repeated_mean
=
bool
,
parallel_mode
=
str
,
auto_parallel_search_mode
=
str
,
parameter_broadcast
=
bool
,
strategy_ckpt_load_file
=
str
,
strategy_ckpt_save_file
=
str
,
full_batch
=
bool
)
def
_set_auto_parallel_context
(
**
kwargs
):
"""
Set auto parallel context.
...
...
@@ -471,6 +481,12 @@ def _set_auto_parallel_context(**kwargs):
setting parallel strategies.
- auto_parallel: Achieving parallelism automatically.
auto_parallel_search_mode (str): There are two kinds of search modes, "recursive_programming"
and "dynamic_programming".
- recursive_programming: Recursive programming search mode.
- dynamic_programming: Dynamic programming search mode.
parameter_broadcast (bool): Indicating whether to broadcast parameters before training.
"stand_alone", "semi_auto_parallel" and "auto_parallel" do not support parameter
broadcast. Default: False.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录