Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
cda08f6a
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cda08f6a
编写于
8月 19, 2020
作者:
Y
yangzhenzhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
concat 3 tensors in auto parallel mode
上级
fb4afb45
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
48 addition
and
6 deletion
+48
-6
mindspore/ccsrc/frontend/parallel/ops_info/concat_info.cc
mindspore/ccsrc/frontend/parallel/ops_info/concat_info.cc
+20
-5
mindspore/ccsrc/frontend/parallel/ops_info/strided_slice_info.cc
...re/ccsrc/frontend/parallel/ops_info/strided_slice_info.cc
+0
-1
tests/ut/python/parallel/test_concat.py
tests/ut/python/parallel/test_concat.py
+28
-0
未找到文件。
mindspore/ccsrc/frontend/parallel/ops_info/concat_info.cc
浏览文件 @
cda08f6a
...
...
@@ -223,17 +223,32 @@ Status ConcatInfo::GenerateStrategies(int32_t stage_id) {
input_split
.
push_back
(
1
);
}
}
Shapes
splittable_inputs
;
for
(
size_t
i
=
0
;
i
<
inputs_shape_
.
size
();
++
i
)
{
splittable_inputs
.
push_back
(
input_split
)
;
}
// to generate the first input's strategy
Shapes
splittable_input
=
{
input_split
}
;
Shapes
tmp_inputs_shape
=
{
inputs_shape_
[
0
]};
std
::
vector
<
StrategyPtr
>
sp_vector
;
is_auto_parallel_
=
true
;
if
(
GenerateStrategiesWithBroadcast
(
stage_id
,
inputs_shape_
,
splittable_inputs
,
&
sp_vector
)
!=
SUCCESS
)
{
if
(
GenerateStrategiesForIndependentInputs
(
stage_id
,
tmp_inputs_shape
,
splittable_input
,
&
sp_vector
)
!=
SUCCESS
)
{
MS_LOG
(
ERROR
)
<<
name_
<<
": Generate strategies failed"
;
return
FAILED
;
}
// the others strategies are equal to the first input's strategy
for
(
auto
&
sp
:
sp_vector
)
{
if
((
sp
==
nullptr
)
||
sp
->
GetInputDim
().
empty
())
{
MS_LOG
(
ERROR
)
<<
name_
<<
": The strategy is null or empty"
;
return
FAILED
;
}
Strategys
tmp_strategy
;
Dimensions
first_input_strategy
=
sp
->
GetInputDim
()[
0
];
for
(
size_t
i
=
0
;
i
<
inputs_shape_
.
size
();
++
i
)
{
tmp_strategy
.
push_back
(
first_input_strategy
);
}
sp
->
ResetInputs
(
tmp_strategy
);
}
size_t
success
=
0
;
for
(
auto
&
sp
:
sp_vector
)
{
PrintStrategy
(
sp
);
...
...
mindspore/ccsrc/frontend/parallel/ops_info/strided_slice_info.cc
浏览文件 @
cda08f6a
...
...
@@ -111,7 +111,6 @@ Status StridedSliceInfo::CheckStrategy(const StrategyPtr &strategy) {
Dimensions
strategy_value
=
stra
[
0
];
bool
has_split
=
std
::
any_of
(
strategy_value
.
begin
(),
strategy_value
.
end
(),
[](
int32_t
v
)
{
return
v
>
1
;
});
if
(
has_split
&&
has_mask_
)
{
MS_LOG
(
ERROR
)
<<
name_
<<
": When there is a mask, the input is not supported to be split"
;
return
FAILED
;
...
...
tests/ut/python/parallel/test_concat.py
浏览文件 @
cda08f6a
...
...
@@ -50,12 +50,34 @@ class Net2(Cell):
return
out
class
Net3
(
Cell
):
def
__init__
(
self
,
weight
,
weight2
,
weight3
,
strategy1
=
None
,
strategy2
=
None
,
is_parameter
=
True
):
super
().
__init__
()
self
.
concat
=
P
.
Concat
(
axis
=
0
).
set_strategy
(
strategy1
)
if
is_parameter
:
self
.
weight
=
Parameter
(
weight
,
"w1"
)
else
:
self
.
weight
=
weight
self
.
mul
=
P
.
Mul
().
set_strategy
(
strategy2
)
self
.
weight2
=
Parameter
(
weight2
,
"w2"
)
self
.
weight3
=
Parameter
(
weight3
,
"w3"
)
def
construct
(
self
,
x
,
b
):
out
=
self
.
concat
((
self
.
weight
,
self
.
weight2
,
self
.
weight3
))
out
=
self
.
mul
(
x
,
out
)
return
out
_x
=
Tensor
(
np
.
ones
([
128
,
64
,
32
]),
dtype
=
ms
.
float32
)
_w1
=
Tensor
(
np
.
ones
([
96
,
64
,
32
]),
dtype
=
ms
.
float32
)
_w2
=
Tensor
(
np
.
ones
([
32
,
64
,
32
]),
dtype
=
ms
.
float32
)
_w3
=
Tensor
(
np
.
ones
([
128
,
16
,
32
]),
dtype
=
ms
.
float32
)
_b
=
Tensor
(
np
.
ones
([
128
,
64
,
32
]),
dtype
=
ms
.
float32
)
w1
=
Tensor
(
np
.
ones
([
48
,
64
,
32
]),
dtype
=
ms
.
float32
)
w2
=
Tensor
(
np
.
ones
([
16
,
64
,
32
]),
dtype
=
ms
.
float32
)
w3
=
Tensor
(
np
.
ones
([
64
,
64
,
32
]),
dtype
=
ms
.
float32
)
def
compile_net
(
net
):
context
.
set_context
(
save_graphs
=
True
)
...
...
@@ -126,3 +148,9 @@ def test_concat_auto_parallel2():
strategy2
=
None
net
=
Net2
(
_w3
,
strategy1
,
strategy2
,
axis
=
1
)
compile_net
(
net
)
def
test_concat_auto_parallel_3_tensor
():
context
.
set_auto_parallel_context
(
parallel_mode
=
"auto_parallel"
,
device_num
=
8
,
global_rank
=
0
)
net
=
Net3
(
w1
,
w2
,
w3
)
compile_net
(
net
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录