Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
512d8e85
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
512d8e85
编写于
6月 29, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
6月 29, 2020
浏览文件
操作
浏览文件
下载
差异文件
!2687 [CT][MS][Auto-Parallel]Double recursion does not support the gatherv2 operator
Merge pull request !2687 from Chong/zc
上级
25ff2242
9febf7fd
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
43 addition
and
16 deletion
+43
-16
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.cc
.../parallel/auto_parallel/rec_core/rec_generate_strategy.cc
+28
-3
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.h
...c/parallel/auto_parallel/rec_core/rec_generate_strategy.h
+2
-1
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.cc
.../ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.cc
+2
-2
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.h
...e/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.h
+2
-2
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.cc
...re/ccsrc/parallel/auto_parallel/rec_core/rec_partition.cc
+5
-4
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.h
...ore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.h
+4
-4
未找到文件。
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.cc
浏览文件 @
512d8e85
...
...
@@ -164,9 +164,34 @@ std::vector<std::vector<int32_t>> PrepareOneHot(const std::shared_ptr<Graph> &gr
return
strategies
;
}
std
::
vector
<
std
::
vector
<
int32_t
>>
PrepareGatherV2
(
const
std
::
shared_ptr
<
std
::
vector
<
int32_t
>>
&
s
)
{
std
::
vector
<
std
::
vector
<
int32_t
>>
PrepareGatherV2
(
const
std
::
vector
<
std
::
shared_ptr
<
OperatorInfo
>>
&
ops
,
const
size_t
iter_ops
,
std
::
vector
<
int32_t
>
s
)
{
std
::
vector
<
std
::
vector
<
int32_t
>>
strategies
;
strategies
.
push_back
(
*
s
);
int32_t
axis
=
0
;
auto
axis_input
=
GetValue
<
int
>
(
ops
[
iter_ops
]
->
input_value
().
at
(
2
));
if
(
axis_input
<
0
)
{
axis_input
+=
SizeToInt
(
ops
[
iter_ops
]
->
inputs_tensor_info
()[
0
].
shape
().
size
());
}
axis
=
axis_input
;
if
(
axis
>=
SizeToInt
(
s
.
size
()))
{
MS_LOG
(
EXCEPTION
)
<<
"Failure: GatherV2' axis out of range."
;
}
s
[
axis
]
=
1
;
strategies
.
push_back
(
s
);
auto
pos
=
ops
[
iter_ops
]
->
name
().
find
(
"Info"
);
auto
name
=
ops
[
iter_ops
]
->
name
().
substr
(
0
,
pos
);
if
(
name
==
"GatherV2"
)
{
return
strategies
;
}
std
::
vector
<
int32_t
>
s_indices
;
for
(
size_t
i
=
0
;
i
<
ops
[
iter_ops
]
->
inputs_tensor_info
()[
1
].
shape
().
size
();
i
++
)
{
s_indices
.
push_back
(
1
);
}
strategies
.
push_back
(
s_indices
);
return
strategies
;
}
...
...
@@ -607,7 +632,7 @@ std::vector<std::vector<int32_t>> GenerateStrategiesFromStrategy(const std::vect
return
PrepareBiasAdd
(
s_ptr
);
}
if
(
ops
[
iter_ops
]
->
type
()
==
GATHERV2
)
{
return
PrepareGatherV2
(
s_ptr
);
return
PrepareGatherV2
(
ops
,
iter_ops
,
basic_stra
);
}
if
(
ops
[
iter_ops
]
->
type
()
==
L2_NORMALIZE
)
{
return
PrepareL2Normalize
(
ops
,
iter_ops
,
basic_stra
);
...
...
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.h
浏览文件 @
512d8e85
...
...
@@ -38,7 +38,8 @@ std::vector<std::vector<int32_t>> PrepareBiasAdd(const std::shared_ptr<std::vect
std
::
vector
<
std
::
vector
<
int32_t
>>
PrepareOneHot
(
const
std
::
shared_ptr
<
Graph
>
&
graph
,
const
std
::
vector
<
std
::
shared_ptr
<
OperatorInfo
>>
&
ops
,
const
size_t
iter_graph
,
const
size_t
iter_ops
);
std
::
vector
<
std
::
vector
<
int32_t
>>
PrepareGatherV2
(
const
std
::
shared_ptr
<
std
::
vector
<
int32_t
>>
&
s
);
std
::
vector
<
std
::
vector
<
int32_t
>>
PrepareGatherV2
(
const
std
::
vector
<
std
::
shared_ptr
<
OperatorInfo
>>
&
ops
,
const
size_t
iter_ops
,
std
::
vector
<
int32_t
>
s
);
std
::
vector
<
std
::
vector
<
int32_t
>>
PrepareL2Normalize
(
const
std
::
vector
<
std
::
shared_ptr
<
OperatorInfo
>>
&
ops
,
const
size_t
iter_ops
,
std
::
vector
<
int32_t
>
s
);
std
::
vector
<
std
::
vector
<
int32_t
>>
MakeRecSearchStrategy
(
const
std
::
shared_ptr
<
Graph
>
&
graph
,
...
...
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.cc
浏览文件 @
512d8e85
...
...
@@ -40,7 +40,7 @@ const TensorParam MakeTensor(int n, int c, int h, int w) {
return
tensor
;
}
Graph
::
NodeType
MakeNewOperator
(
std
::
vector
<
std
::
shared_ptr
<
OperatorInfo
>>
ops
,
size_t
iter_ops
)
{
Graph
::
NodeType
MakeNewOperator
(
const
std
::
vector
<
std
::
shared_ptr
<
OperatorInfo
>>
&
ops
,
size_t
iter_ops
)
{
Graph
::
NodeType
NewOp
;
NewOp
.
name
=
ops
[
iter_ops
]
->
name
();
NewOp
.
info
=
InfoType
::
kApplication
;
...
...
@@ -140,7 +140,7 @@ std::shared_ptr<Graph> ParseGraph(const std::vector<std::shared_ptr<OperatorInfo
return
graph
;
}
void
MakeEdge
(
const
std
::
vector
<
std
::
vector
<
std
::
string
>>
&
input_tensor_names
,
std
::
shared_ptr
<
Graph
>
graph
)
{
void
MakeEdge
(
const
std
::
vector
<
std
::
vector
<
std
::
string
>>
&
input_tensor_names
,
const
std
::
shared_ptr
<
Graph
>
&
graph
)
{
for
(
size_t
iter_i
=
0
;
iter_i
<
input_tensor_names
.
size
();
iter_i
++
)
{
for
(
size_t
iter_j
=
1
;
iter_j
<
input_tensor_names
[
iter_i
].
size
();
iter_j
++
)
{
size_t
head_node_index
=
GetIndexInInputTensorNames
(
input_tensor_names
,
input_tensor_names
[
iter_i
][
iter_j
]);
...
...
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.h
浏览文件 @
512d8e85
...
...
@@ -111,7 +111,7 @@ const std::map<std::string, OperatorType> DictOpType{
const
TensorParam
MakeTensor
(
int
n
,
int
c
,
int
h
,
int
w
);
Graph
::
NodeType
MakeNewOperator
(
std
::
vector
<
std
::
shared_ptr
<
OperatorInfo
>>
ops
,
size_t
iter_ops
);
Graph
::
NodeType
MakeNewOperator
(
const
std
::
vector
<
std
::
shared_ptr
<
OperatorInfo
>>
&
ops
,
size_t
iter_ops
);
OperatorRec
CompleteOperatorInputs
(
const
std
::
vector
<
std
::
shared_ptr
<
OperatorInfo
>>
&
ops
,
const
size_t
iter_ops
,
Graph
::
NodeType
NewTensor
);
...
...
@@ -122,7 +122,7 @@ TensorParam Complete2DInputs(const std::vector<std::shared_ptr<OperatorInfo>> &o
std
::
shared_ptr
<
Graph
>
ParseGraph
(
const
std
::
vector
<
std
::
shared_ptr
<
OperatorInfo
>>
&
ops
,
const
std
::
vector
<
std
::
vector
<
std
::
string
>>
&
input_tensor_names
);
void
MakeEdge
(
const
std
::
vector
<
std
::
vector
<
std
::
string
>>
&
input_tensor_names
,
std
::
shared_ptr
<
Graph
>
graph
);
void
MakeEdge
(
const
std
::
vector
<
std
::
vector
<
std
::
string
>>
&
input_tensor_names
,
const
std
::
shared_ptr
<
Graph
>
&
graph
);
size_t
GetIndexInInputTensorNames
(
const
std
::
vector
<
std
::
vector
<
std
::
string
>>
&
input_tensor_names
,
const
std
::
string
&
input_name
);
...
...
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.cc
浏览文件 @
512d8e85
...
...
@@ -93,7 +93,7 @@ double GetWeights(const Graph::NodeType &node) {
}
// Sort all the nodes by their weights
std
::
vector
<
size_t
>
SortByWeight
(
const
std
::
shared_ptr
<
Graph
>
graph
)
{
std
::
vector
<
size_t
>
SortByWeight
(
const
std
::
shared_ptr
<
Graph
>
&
graph
)
{
MS_EXCEPTION_IF_NULL
(
graph
);
std
::
vector
<
std
::
pair
<
double
,
size_t
>>
weight_to_node_index
;
...
...
@@ -124,7 +124,7 @@ std::vector<size_t> SortByWeight(const std::shared_ptr<Graph> graph) {
// Get optimal strategy to partition the target node
StrategyRec
PartitionNode
(
const
Graph
::
NodeType
&
node
,
const
std
::
vector
<
std
::
pair
<
std
::
string
,
StrategyRec
>>
&
node_name_to_strategy
,
std
::
shared_ptr
<
Graph
>
graph
)
{
const
std
::
shared_ptr
<
Graph
>
&
graph
)
{
bool
enable_conv_chw_partition
=
false
;
MS_EXCEPTION_IF_NULL
(
graph
);
...
...
@@ -191,7 +191,8 @@ StrategyRec PartitionNode(const Graph::NodeType &node,
}
// Parttion graph into all devices.
Status
PartitionForAllDevices
(
const
size_t
num_device
,
const
double
device_memory
,
std
::
shared_ptr
<
Graph
>
graph
)
{
Status
PartitionForAllDevices
(
const
size_t
num_device
,
const
double
device_memory
,
const
std
::
shared_ptr
<
Graph
>
&
graph
)
{
if
(
num_device
<
1
)
{
MS_LOG
(
EXCEPTION
)
<<
"ERROR: Number of devices can't be "
<<
num_device
<<
"."
;
}
...
...
@@ -261,7 +262,7 @@ Graph::NodeType ApplyStrToTensor(Graph::NodeType Node) {
return
Node
;
}
Status
DevicesMemoryControl
(
const
size_t
num_device
,
const
double
device_memory
,
std
::
shared_ptr
<
Graph
>
graph
)
{
Status
DevicesMemoryControl
(
const
size_t
num_device
,
const
double
device_memory
,
const
std
::
shared_ptr
<
Graph
>
&
graph
)
{
MS_EXCEPTION_IF_NULL
(
graph
);
if
(
num_device
==
0
)
{
MS_LOG
(
EXCEPTION
)
<<
"Failure: device number is 0."
;
...
...
mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.h
浏览文件 @
512d8e85
...
...
@@ -32,19 +32,19 @@
namespace
mindspore
{
namespace
parallel
{
std
::
vector
<
size_t
>
SortByWeight
(
const
std
::
shared_ptr
<
Graph
>
graph
);
std
::
vector
<
size_t
>
SortByWeight
(
const
std
::
shared_ptr
<
Graph
>
&
graph
);
double
GetWeights
(
const
Graph
::
NodeType
&
node
);
StrategyRec
PartitionNode
(
const
Graph
::
NodeType
&
node
,
const
std
::
vector
<
std
::
pair
<
std
::
string
,
StrategyRec
>>
&
node_name_to_strategy
,
std
::
shared_ptr
<
Graph
>
graph
);
const
std
::
shared_ptr
<
Graph
>
&
graph
);
Status
PartitionForAllDevices
(
const
size_t
num_device
,
const
double
device_memory
,
std
::
shared_ptr
<
Graph
>
graph
);
Status
PartitionForAllDevices
(
const
size_t
num_device
,
const
double
device_memory
,
const
std
::
shared_ptr
<
Graph
>
&
graph
);
Graph
::
NodeType
ApplyStrToTensor
(
Graph
::
NodeType
Node
);
Status
DevicesMemoryControl
(
const
size_t
num_device
,
const
double
device_memory
,
std
::
shared_ptr
<
Graph
>
graph
);
Status
DevicesMemoryControl
(
const
size_t
num_device
,
const
double
device_memory
,
const
std
::
shared_ptr
<
Graph
>
&
graph
);
size_t
GetDataTypeSize
(
const
TensorType
&
type
);
}
// namespace parallel
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录