Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
正统之独孤求败
mindspore
提交
14c77c9f
M
mindspore
项目概览
正统之独孤求败
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
14c77c9f
编写于
8月 13, 2020
作者:
Y
yangzhenzhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update field split
上级
04056cf8
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
199 addition
and
0 deletion
+199
-0
mindspore/ccsrc/frontend/parallel/ops_info/gather_v2_p_info.cc
...pore/ccsrc/frontend/parallel/ops_info/gather_v2_p_info.cc
+4
-0
tests/ut/python/parallel/test_manual_embedding_lookup.py
tests/ut/python/parallel/test_manual_embedding_lookup.py
+195
-0
未找到文件。
mindspore/ccsrc/frontend/parallel/ops_info/gather_v2_p_info.cc
浏览文件 @
14c77c9f
...
...
@@ -550,6 +550,10 @@ RankList GetRankFromGroup(const Group &group) {
}
Status
GatherV2PInfo
::
InferForwardCommunication
()
{
if
(
manual_split_
)
{
return
SUCCESS
;
}
forward_op_
.
clear
();
auto
param_strategy
=
strategy_
->
GetInputDim
().
at
(
0
);
// don't split axis or target is not CPU, no need forward communication
...
...
tests/ut/python/parallel/test_manual_embedding_lookup.py
0 → 100644
浏览文件 @
14c77c9f
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
numpy
as
np
import
pytest
import
mindspore
as
ms
from
mindspore
import
context
,
Tensor
,
Parameter
from
mindspore.common.api
import
_executor
from
mindspore.nn
import
Cell
,
TrainOneStepCell
,
LazyAdam
from
mindspore.ops
import
operations
as
P
from
mindspore.common.initializer
import
initializer
context
.
set_context
(
enable_sparse
=
True
)
class
Net
(
Cell
):
def
__init__
(
self
,
strategy1
=
None
,
strategy2
=
None
,
strategy3
=
None
,
axis
=
0
,
init_flag
=
True
,
split_tuple
=
(
4
,
4
),
split_string
=
"manual_split"
,
param_shape
=
(
8
,
8
)):
super
().
__init__
()
self
.
gatherv2
=
P
.
EmbeddingLookup
().
set_strategy
(
strategy1
)
self
.
gatherv2
.
add_prim_attr
(
split_string
,
split_tuple
)
self
.
gatherv2
.
add_prim_attr
(
"primitive_target"
,
"CPU"
)
self
.
mul
=
P
.
Mul
().
set_strategy
(
strategy2
)
self
.
reshape
=
P
.
Reshape
()
self
.
matmul
=
P
.
MatMul
().
set_strategy
(
strategy3
)
self
.
matmul
.
add_prim_attr
(
"forward_reduce_scatter"
,
True
)
if
init_flag
:
self
.
param
=
Parameter
(
initializer
(
"ones"
,
param_shape
,
ms
.
float32
),
name
=
"gatherv2_param"
)
else
:
self
.
param
=
Parameter
(
Tensor
(
np
.
ones
(
param_shape
),
dtype
=
ms
.
float32
),
name
=
"gatherv2_param"
)
self
.
mul_weight
=
Parameter
(
initializer
(
"ones"
,
(
8
,
8
,
8
),
ms
.
float32
),
name
=
"mul_weight"
)
self
.
matmul_weight
=
Parameter
(
initializer
(
"ones"
,
(
64
,
16
),
ms
.
float32
),
name
=
"matmul_weight"
)
self
.
axis
=
axis
def
construct
(
self
,
x
,
b
):
out
=
self
.
gatherv2
(
self
.
param
,
x
,
self
.
axis
)
out
=
self
.
mul
(
out
,
b
)
return
out
_x
=
Tensor
(
np
.
ones
([
8
,
8
]),
dtype
=
ms
.
int32
)
_b
=
Tensor
(
np
.
ones
([
8
,
8
,
8
]),
dtype
=
ms
.
float32
)
def
compile_net
(
net
):
context
.
set_context
(
save_graphs
=
True
)
optimizer
=
LazyAdam
(
net
.
trainable_params
(),
learning_rate
=
0.1
)
optimizer
.
sparse_opt
.
add_prim_attr
(
"primitive_target"
,
"CPU"
)
train_net
=
TrainOneStepCell
(
net
,
optimizer
)
train_net
.
set_auto_parallel
()
_executor
.
compile
(
train_net
,
_x
,
_b
,
auto_parallel_mode
=
True
)
context
.
reset_auto_parallel_context
()
def
test_normal_split
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
2
,
global_rank
=
0
)
strategy1
=
((
2
,
1
),
(
1
,
2
))
strategy2
=
((
1
,
2
,
1
),
(
1
,
2
,
1
))
strategy3
=
((
1
,
2
),
(
2
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
)
compile_net
(
net
)
def
test_normal_split2
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
4
,
global_rank
=
0
)
strategy1
=
((
4
,
1
),
(
1
,
4
))
strategy2
=
((
1
,
4
,
1
),
(
1
,
4
,
1
))
strategy3
=
((
1
,
4
),
(
4
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
,
split_tuple
=
(
10
,
20
,
30
,
4
),
param_shape
=
(
64
,
8
))
compile_net
(
net
)
def
test_normal_split3
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
32
,
global_rank
=
17
)
strategy1
=
((
4
,
8
),
(
1
,
4
))
strategy2
=
((
1
,
4
,
8
),
(
1
,
4
,
8
))
strategy3
=
((
1
,
32
),
(
32
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
,
split_tuple
=
(
10
,
20
,
30
,
4
),
param_shape
=
(
64
,
8
))
compile_net
(
net
)
def
test_normal_split_with_offset
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
2
,
global_rank
=
0
)
strategy1
=
((
2
,
1
),
(
1
,
2
))
strategy2
=
((
1
,
2
,
1
),
(
1
,
2
,
1
))
strategy3
=
((
1
,
2
),
(
2
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
,
split_string
=
"manual_split_with_offset"
,
split_tuple
=
((
4
,
0
),
(
4
,
4
)))
compile_net
(
net
)
def
test_auto_parallel_error
():
context
.
set_context
(
save_graphs
=
True
)
context
.
set_auto_parallel_context
(
parallel_mode
=
"auto_parallel"
,
device_num
=
2
,
global_rank
=
0
)
net
=
Net
()
with
pytest
.
raises
(
RuntimeError
):
compile_net
(
net
)
def
test_axis_error
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
2
,
global_rank
=
0
)
strategy1
=
((
2
,
1
),
(
1
,
2
))
strategy2
=
((
1
,
2
,
1
),
(
1
,
2
,
1
))
strategy3
=
((
1
,
2
),
(
2
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
,
axis
=
1
)
with
pytest
.
raises
(
RuntimeError
):
compile_net
(
net
)
def
test_strategy_error
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
8
,
global_rank
=
0
)
strategy1
=
((
4
,
1
),
(
8
,
1
))
strategy2
=
((
1
,
2
,
1
),
(
1
,
2
,
1
))
strategy3
=
((
1
,
2
),
(
2
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
)
with
pytest
.
raises
(
RuntimeError
):
compile_net
(
net
)
def
test_strategy_error2
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
8
,
global_rank
=
0
)
strategy1
=
((
4
,
1
),
(
1
,
8
))
strategy2
=
((
1
,
2
,
1
),
(
1
,
2
,
1
))
strategy3
=
((
1
,
2
),
(
2
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
)
with
pytest
.
raises
(
RuntimeError
):
compile_net
(
net
)
def
test_strategy_error3
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
8
,
global_rank
=
0
)
strategy1
=
((
2
,
1
),
(
1
,
2
))
strategy2
=
((
1
,
2
,
1
),
(
1
,
2
,
1
))
strategy3
=
((
1
,
2
),
(
2
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
)
with
pytest
.
raises
(
RuntimeError
):
compile_net
(
net
)
def
test_strategy_error4
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
2
,
global_rank
=
0
)
strategy1
=
((
2
,
8
),
(
1
,
2
))
strategy2
=
((
1
,
2
,
1
),
(
1
,
2
,
1
))
strategy3
=
((
1
,
2
),
(
2
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
)
with
pytest
.
raises
(
RuntimeError
):
compile_net
(
net
)
def
test_strategy_error5
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
4
,
global_rank
=
0
)
strategy1
=
((
4
,
1
),
(
1
,
4
))
strategy2
=
((
1
,
2
,
1
),
(
1
,
2
,
1
))
strategy3
=
((
1
,
2
),
(
2
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
)
with
pytest
.
raises
(
RuntimeError
):
compile_net
(
net
)
def
test_split_tuple_error
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
2
,
global_rank
=
0
)
strategy1
=
((
2
,
1
),
(
1
,
2
))
strategy2
=
((
1
,
2
,
1
),
(
1
,
2
,
1
))
strategy3
=
((
1
,
2
),
(
2
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
,
split_tuple
=
((
5
,
0
),
(
5
,
5
)))
with
pytest
.
raises
(
RuntimeError
):
compile_net
(
net
)
def
test_parameter_use_tensor_error
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
,
device_num
=
2
,
global_rank
=
0
)
strategy1
=
((
2
,
1
),
(
1
,
2
))
strategy2
=
((
1
,
2
,
1
),
(
1
,
2
,
1
))
strategy3
=
((
1
,
2
),
(
2
,
1
))
net
=
Net
(
strategy1
,
strategy2
,
strategy3
,
init_flag
=
False
)
with
pytest
.
raises
(
RuntimeError
):
compile_net
(
net
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录