Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
7d4f4818
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7d4f4818
编写于
8月 25, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
8月 25, 2020
浏览文件
操作
浏览文件
下载
差异文件
!5017 remove internal interface in wide&deep
Merge pull request !5017 from yao_yf/wide_and_deep_no_internal_interface
上级
9c9b721b
a9a8e323
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
34 addition
and
25 deletion
+34
-25
mindspore/parallel/__init__.py
mindspore/parallel/__init__.py
+3
-1
mindspore/parallel/_cost_model_context.py
mindspore/parallel/_cost_model_context.py
+15
-3
model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py
...zoo/official/recommend/wide_and_deep/src/wide_and_deep.py
+6
-10
model_zoo/official/recommend/wide_and_deep/train_and_eval_auto_parallel.py
...l/recommend/wide_and_deep/train_and_eval_auto_parallel.py
+2
-2
model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py
...l/recommend/wide_and_deep_multitable/src/wide_and_deep.py
+4
-5
tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/train_and_test_multinpu_ci.py
...and_deep/python_file_for_ci/train_and_test_multinpu_ci.py
+2
-2
tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py
...ut/python/parallel/test_auto_parallel_double_subgraphs.py
+2
-2
未找到文件。
mindspore/parallel/__init__.py
浏览文件 @
7d4f4818
...
...
@@ -17,5 +17,7 @@ This interface is ONLY used in Auto-parallel procedure.
"""
from
.algo_parameter_config
import
get_algo_parameters
,
reset_algo_parameters
,
\
set_algo_parameters
from
._cost_model_context
import
set_multi_subgraphs
,
get_multi_subgraphs
__all__
=
[
"get_algo_parameters"
,
"reset_algo_parameters"
,
"set_algo_parameters"
]
__all__
=
[
"set_multi_subgraphs"
,
"get_multi_subgraphs"
,
"get_algo_parameters"
,
"reset_algo_parameters"
,
"set_algo_parameters"
]
mindspore/parallel/_cost_model_context.py
浏览文件 @
7d4f4818
...
...
@@ -479,7 +479,6 @@ set_cost_model_context_func_map = {
"costmodel_communi_threshold"
:
cost_model_context
().
set_costmodel_communi_threshold
,
"costmodel_communi_const"
:
cost_model_context
().
set_costmodel_communi_const
,
"costmodel_communi_bias"
:
cost_model_context
().
set_costmodel_communi_bias
,
"multi_subgraphs"
:
cost_model_context
().
set_multi_subgraphs
,
"run_phase"
:
cost_model_context
().
set_run_phase
,
"costmodel_allreduce_fusion_algorithm"
:
cost_model_context
().
set_costmodel_allreduce_fusion_algorithm
,
"costmodel_allreduce_fusion_times"
:
cost_model_context
().
set_costmodel_allreduce_fusion_times
,
...
...
@@ -501,7 +500,6 @@ get_cost_model_context_func_map = {
"costmodel_communi_threshold"
:
cost_model_context
().
get_costmodel_communi_threshold
,
"costmodel_communi_const"
:
cost_model_context
().
get_costmodel_communi_const
,
"costmodel_communi_bias"
:
cost_model_context
().
get_costmodel_communi_bias
,
"multi_subgraphs"
:
cost_model_context
().
get_multi_subgraphs
,
"run_phase"
:
cost_model_context
().
get_run_phase
,
"costmodel_allreduce_fusion_algorithm"
:
cost_model_context
().
get_costmodel_allreduce_fusion_algorithm
,
"costmodel_allreduce_fusion_times"
:
cost_model_context
().
get_costmodel_allreduce_fusion_times
,
...
...
@@ -538,7 +536,6 @@ def set_cost_model_context(**kwargs):
costmodel_communi_threshold (float): A parameter used in adjusting communication calculation for practice.
costmodel_communi_const (float): A parameter used in adjusting communication calculation for practice.
costmodel_communi_bias (float): A parameter used in adjusting communication calculation for practice.
multi_subgraphs (bool): A parameter used in marking the flag of ANF graph containing multiple subgraphs.
run_phase (int): A parameter indicating which phase is running: training (0) or inference (1). Default: 0.
costmodel_allreduce_fusion_algorithm (int): The allreduce fusion algorithm.
0: bypass allreduce fusion;
...
...
@@ -591,3 +588,18 @@ def get_cost_model_context(attr_key):
def
reset_cost_model_context
():
"""Reset cost model context attributes."""
cost_model_context
().
reset_cost_model
()
def
set_multi_subgraphs
(
multi_subgraph
=
True
):
"""
Set the flag of ANF graph containing multiple subgraphs.
Args:
multi_subgraph (bool): A parameter used in marking the multi-subgraphs flag.
"""
cost_model_context
().
set_multi_subgraphs
(
multi_subgraph
)
def
get_multi_subgraphs
():
"""
Get the flag of ANF graph containing multiple subgraphs.
"""
cost_model_context
().
get_multi_subgraphs
()
model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py
浏览文件 @
7d4f4818
...
...
@@ -14,7 +14,7 @@
# ============================================================================
"""wide and deep model"""
import
numpy
as
np
from
mindspore
import
nn
from
mindspore
import
nn
,
context
from
mindspore
import
Parameter
,
ParameterTuple
import
mindspore.common.dtype
as
mstype
from
mindspore.ops
import
functional
as
F
...
...
@@ -22,10 +22,7 @@ from mindspore.ops import composite as C
from
mindspore.ops
import
operations
as
P
from
mindspore.nn
import
Dropout
from
mindspore.nn.optim
import
Adam
,
FTRL
,
LazyAdam
# from mindspore.nn.metrics import Metric
from
mindspore.common.initializer
import
Uniform
,
initializer
# from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from
mindspore.parallel._utils
import
_get_device_num
,
_get_parallel_mode
,
_get_mirror_mean
from
mindspore.train.parallel_utils
import
ParallelMode
from
mindspore.nn.wrap.grad_reducer
import
DistributedGradReducer
from
mindspore.communication.management
import
get_group_size
...
...
@@ -142,7 +139,7 @@ class WideDeepModel(nn.Cell):
self
.
batch_size
=
config
.
batch_size
host_device_mix
=
bool
(
config
.
host_device_mix
)
parameter_server
=
bool
(
config
.
parameter_server
)
parallel_mode
=
_get_parallel_mode
(
)
parallel_mode
=
context
.
get_auto_parallel_context
(
"parallel_mode"
)
is_auto_parallel
=
parallel_mode
in
(
ParallelMode
.
SEMI_AUTO_PARALLEL
,
ParallelMode
.
AUTO_PARALLEL
)
if
is_auto_parallel
:
self
.
batch_size
=
self
.
batch_size
*
get_group_size
()
...
...
@@ -259,7 +256,7 @@ class NetWithLossClass(nn.Cell):
super
(
NetWithLossClass
,
self
).
__init__
(
auto_prefix
=
False
)
host_device_mix
=
bool
(
config
.
host_device_mix
)
parameter_server
=
bool
(
config
.
parameter_server
)
parallel_mode
=
_get_parallel_mode
(
)
parallel_mode
=
context
.
get_auto_parallel_context
(
"parallel_mode"
)
is_auto_parallel
=
parallel_mode
in
(
ParallelMode
.
SEMI_AUTO_PARALLEL
,
ParallelMode
.
AUTO_PARALLEL
)
self
.
no_l2loss
=
(
is_auto_parallel
if
host_device_mix
else
parameter_server
)
self
.
network
=
network
...
...
@@ -312,7 +309,7 @@ class TrainStepWrap(nn.Cell):
def
__init__
(
self
,
network
,
sens
=
1024.0
,
host_device_mix
=
False
,
parameter_server
=
False
):
super
(
TrainStepWrap
,
self
).
__init__
()
parallel_mode
=
_get_parallel_mode
(
)
parallel_mode
=
context
.
get_auto_parallel_context
(
"parallel_mode"
)
is_auto_parallel
=
parallel_mode
in
(
ParallelMode
.
SEMI_AUTO_PARALLEL
,
ParallelMode
.
AUTO_PARALLEL
)
self
.
network
=
network
self
.
network
.
set_train
()
...
...
@@ -351,12 +348,11 @@ class TrainStepWrap(nn.Cell):
self
.
reducer_flag
=
False
self
.
grad_reducer_w
=
None
self
.
grad_reducer_d
=
None
parallel_mode
=
_get_parallel_mode
()
self
.
reducer_flag
=
parallel_mode
in
(
ParallelMode
.
DATA_PARALLEL
,
ParallelMode
.
HYBRID_PARALLEL
)
if
self
.
reducer_flag
:
mean
=
_get_mirror_mean
(
)
degree
=
_get_device_num
(
)
mean
=
context
.
get_auto_parallel_context
(
"mirror_mean"
)
degree
=
context
.
get_auto_parallel_context
(
"device_num"
)
self
.
grad_reducer_w
=
DistributedGradReducer
(
self
.
optimizer_w
.
parameters
,
mean
,
degree
)
self
.
grad_reducer_d
=
DistributedGradReducer
(
self
.
optimizer_d
.
parameters
,
mean
,
degree
)
...
...
model_zoo/official/recommend/wide_and_deep/train_and_eval_auto_parallel.py
浏览文件 @
7d4f4818
...
...
@@ -22,7 +22,7 @@ from mindspore import Model, context
from
mindspore.train.callback
import
ModelCheckpoint
,
CheckpointConfig
,
TimeMonitor
from
mindspore.train
import
ParallelMode
from
mindspore.communication.management
import
get_rank
,
get_group_size
,
init
from
mindspore.parallel
import
_cost_model_context
as
cost_model_context
from
mindspore.parallel
import
set_multi_subgraphs
from
mindspore.nn.wrap.cell_wrapper
import
VirtualDatasetCellTriple
from
src.wide_and_deep
import
PredictWithSigmoid
,
TrainStepWrap
,
NetWithLossClass
,
WideDeepModel
...
...
@@ -127,7 +127,7 @@ if __name__ == "__main__":
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
wide_deep_config
.
device_target
,
save_graphs
=
True
)
context
.
set_context
(
variable_memory_max_size
=
"24GB"
)
context
.
set_context
(
enable_sparse
=
True
)
cost_model_context
.
set_cost_model_context
(
multi_subgraphs
=
True
)
set_multi_subgraphs
(
)
if
wide_deep_config
.
device_target
==
"Ascend"
:
init
(
"hccl"
)
elif
wide_deep_config
.
device_target
==
"GPU"
:
...
...
model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py
浏览文件 @
7d4f4818
...
...
@@ -16,7 +16,7 @@
import
numpy
as
np
import
mindspore.common.dtype
as
mstype
from
mindspore
import
nn
from
mindspore
import
nn
,
context
from
mindspore
import
Tensor
,
Parameter
,
ParameterTuple
from
mindspore.ops
import
functional
as
F
from
mindspore.ops
import
composite
as
C
...
...
@@ -24,7 +24,6 @@ from mindspore.ops import operations as P
from
mindspore.nn
import
Dropout
,
Flatten
from
mindspore.nn.optim
import
Adam
,
FTRL
from
mindspore.common.initializer
import
Uniform
,
initializer
from
mindspore.parallel._utils
import
_get_device_num
,
_get_parallel_mode
,
_get_mirror_mean
from
mindspore.train.parallel_utils
import
ParallelMode
from
mindspore.nn.wrap.grad_reducer
import
DistributedGradReducer
...
...
@@ -552,13 +551,13 @@ class TrainStepWrap(nn.Cell):
self
.
reducer_flag
=
False
self
.
grad_reducer_w
=
None
self
.
grad_reducer_d
=
None
parallel_mode
=
_get_parallel_mode
(
)
parallel_mode
=
context
.
get_auto_parallel_context
(
"parallel_mode"
)
if
parallel_mode
in
(
ParallelMode
.
DATA_PARALLEL
,
ParallelMode
.
HYBRID_PARALLEL
):
self
.
reducer_flag
=
True
if
self
.
reducer_flag
:
mean
=
_get_mirror_mean
(
)
degree
=
_get_device_num
(
)
mean
=
context
.
get_auto_parallel_context
(
"mirror_mean"
)
degree
=
context
.
get_auto_parallel_context
(
"device_num"
)
self
.
grad_reducer_w
=
DistributedGradReducer
(
self
.
optimizer_w
.
parameters
,
mean
,
degree
)
self
.
grad_reducer_d
=
DistributedGradReducer
(
...
...
tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/train_and_test_multinpu_ci.py
浏览文件 @
7d4f4818
...
...
@@ -21,7 +21,7 @@ from mindspore import Model, context
from
mindspore.train.callback
import
TimeMonitor
from
mindspore.train
import
ParallelMode
from
mindspore.communication.management
import
get_rank
,
get_group_size
,
init
from
mindspore.parallel
import
_cost_model_context
as
cost_model_context
from
mindspore.parallel
import
set_multi_subgraphs
from
mindspore.nn.wrap.cell_wrapper
import
VirtualDatasetCellTriple
from
src.wide_and_deep
import
PredictWithSigmoid
,
TrainStepWrap
,
NetWithLossClass
,
WideDeepModel
...
...
@@ -33,7 +33,7 @@ from src.config import WideDeepConfig
sys
.
path
.
append
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
save_graphs
=
True
)
context
.
set_auto_parallel_context
(
parallel_mode
=
ParallelMode
.
SEMI_AUTO_PARALLEL
,
mirror_mean
=
True
)
cost_model_context
.
set_cost_model_context
(
multi_subgraphs
=
True
)
set_multi_subgraphs
(
)
init
()
...
...
tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py
浏览文件 @
7d4f4818
...
...
@@ -23,7 +23,7 @@ from mindspore.nn.optim import Adam, FTRL
from
mindspore.ops
import
composite
as
C
from
mindspore.ops
import
functional
as
F
from
mindspore.ops
import
operations
as
P
from
mindspore.parallel
import
_cost_model_context
as
cost_model_context
from
mindspore.parallel
import
set_multi_subgraphs
from
mindspore.parallel._utils
import
_reset_op_id
as
reset_op_id
...
...
@@ -103,7 +103,7 @@ class TrainStepWarp(nn.Cell):
def
test_double_subgraphs
():
cost_model_context
.
set_cost_model_context
(
multi_subgraphs
=
True
)
set_multi_subgraphs
(
)
context
.
set_context
(
save_graphs
=
True
)
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
)
context
.
set_auto_parallel_context
(
parallel_mode
=
"auto_parallel"
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录