Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
87658105
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
87658105
编写于
4月 15, 2020
作者:
C
c00425699
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix_coding_style_check_warning
上级
9c9c7091
变更
38
隐藏空白更改
内联
并排
Showing
38 changed file
with
15 addition
and
59 deletion
+15
-59
mindspore/ccsrc/parallel/device_manager.cc
mindspore/ccsrc/parallel/device_manager.cc
+0
-1
mindspore/ccsrc/parallel/device_matrix.cc
mindspore/ccsrc/parallel/device_matrix.cc
+0
-1
mindspore/ccsrc/parallel/dynamic_creator.h
mindspore/ccsrc/parallel/dynamic_creator.h
+0
-1
mindspore/ccsrc/parallel/ops_info/activation_info.cc
mindspore/ccsrc/parallel/ops_info/activation_info.cc
+2
-1
mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc
mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc
+0
-1
mindspore/ccsrc/parallel/ops_info/loss_info.cc
mindspore/ccsrc/parallel/ops_info/loss_info.cc
+2
-1
mindspore/ccsrc/parallel/ops_info/operator_info.cc
mindspore/ccsrc/parallel/ops_info/operator_info.cc
+0
-1
mindspore/ccsrc/parallel/ops_info/prelu_info.cc
mindspore/ccsrc/parallel/ops_info/prelu_info.cc
+4
-2
mindspore/ccsrc/parallel/ops_info/reshape_info.cc
mindspore/ccsrc/parallel/ops_info/reshape_info.cc
+3
-2
mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc
mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc
+3
-3
mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h
mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h
+0
-1
mindspore/ccsrc/parallel/status.h
mindspore/ccsrc/parallel/status.h
+0
-1
mindspore/ccsrc/parallel/step_auto_parallel.cc
mindspore/ccsrc/parallel/step_auto_parallel.cc
+0
-2
mindspore/ccsrc/parallel/step_auto_parallel.h
mindspore/ccsrc/parallel/step_auto_parallel.h
+0
-1
mindspore/ccsrc/parallel/step_parallel.cc
mindspore/ccsrc/parallel/step_parallel.cc
+0
-2
mindspore/ccsrc/parallel/strategy.h
mindspore/ccsrc/parallel/strategy.h
+0
-1
mindspore/ccsrc/parallel/tensor_layout/arrangement.cc
mindspore/ccsrc/parallel/tensor_layout/arrangement.cc
+0
-1
mindspore/ccsrc/parallel/tensor_layout/arrangement.h
mindspore/ccsrc/parallel/tensor_layout/arrangement.h
+0
-2
mindspore/ccsrc/parallel/tensor_layout/array.cc
mindspore/ccsrc/parallel/tensor_layout/array.cc
+0
-1
mindspore/ccsrc/parallel/tensor_layout/array.h
mindspore/ccsrc/parallel/tensor_layout/array.h
+0
-2
mindspore/ccsrc/parallel/tensor_layout/construct_operator.h
mindspore/ccsrc/parallel/tensor_layout/construct_operator.h
+0
-1
mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc
mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc
+0
-2
mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h
mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h
+0
-2
mindspore/ccsrc/parallel/tensor_layout/map.cc
mindspore/ccsrc/parallel/tensor_layout/map.cc
+0
-1
mindspore/ccsrc/parallel/tensor_layout/map.h
mindspore/ccsrc/parallel/tensor_layout/map.h
+0
-1
mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc
.../parallel/tensor_layout/redistribution_layout_transfer.cc
+0
-2
mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h
...c/parallel/tensor_layout/redistribution_layout_transfer.h
+0
-2
mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc
...c/parallel/tensor_layout/redistribution_operator_infer.cc
+0
-2
mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h
...rc/parallel/tensor_layout/redistribution_operator_infer.h
+0
-2
mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc
...e/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc
+0
-1
mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h
...re/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h
+0
-2
mindspore/ccsrc/parallel/tensor_layout/shape_util.cc
mindspore/ccsrc/parallel/tensor_layout/shape_util.cc
+0
-2
mindspore/ccsrc/parallel/tensor_layout/shape_util.h
mindspore/ccsrc/parallel/tensor_layout/shape_util.h
+0
-2
mindspore/ccsrc/parallel/tensor_layout/tensor_info.h
mindspore/ccsrc/parallel/tensor_layout/tensor_info.h
+0
-2
mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc
mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc
+1
-2
mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h
mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h
+0
-2
mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc
...ore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc
+0
-1
mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h
...pore/ccsrc/parallel/tensor_layout/tensor_redistribution.h
+0
-2
未找到文件。
mindspore/ccsrc/parallel/device_manager.cc
浏览文件 @
87658105
...
...
@@ -370,6 +370,5 @@ void DeviceManager::Clear() {
stage_devices_
.
clear
();
gm_
.
Clear
();
}
}
// namespace parallel
}
// namespace mindspore
mindspore/ccsrc/parallel/device_matrix.cc
浏览文件 @
87658105
...
...
@@ -29,7 +29,6 @@
namespace
mindspore
{
namespace
parallel
{
DeviceMatrix
::
DeviceMatrix
(
int32_t
rank
,
RankList
dev_list
,
Shape
dev_shape
)
:
rank_
(
rank
),
dev_list_
(
std
::
move
(
dev_list
)),
dev_shape_
(
std
::
move
(
dev_shape
))
{
if
(
!
std
::
any_of
(
dev_list_
.
begin
(),
dev_list_
.
end
(),
[
rank
](
int32_t
a
)
{
return
a
==
rank
;
}))
{
...
...
mindspore/ccsrc/parallel/dynamic_creator.h
浏览文件 @
87658105
...
...
@@ -27,7 +27,6 @@
namespace
mindspore
{
namespace
parallel
{
#define REGISTER(className) \
OperatorInfoPtr objectCreator##className(std::string name, Shapes in, Shapes out, PrimitiveAttrs& attrs) { \
return std::make_shared<className>(name, in, out, attrs); \
...
...
mindspore/ccsrc/parallel/ops_info/activation_info.cc
浏览文件 @
87658105
...
...
@@ -229,7 +229,8 @@ Status Softmax::GenerateStrategies(int32_t stage_id) {
}
is_auto_parallel_
=
true
;
Shape
input0_split
(
inputs_shape_
[
0
].
size
(),
1
);
Shape
input0_split
;
(
void
)
input0_split
.
insert
(
input0_split
.
begin
(),
inputs_shape_
[
0
].
size
(),
1
);
for
(
auto
&
element
:
axis_
)
{
int32_t
axis_index
=
element
;
if
(
element
<
0
)
{
...
...
mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc
浏览文件 @
87658105
...
...
@@ -27,7 +27,6 @@
namespace
mindspore
{
namespace
parallel
{
Status
BatchParallelInfo
::
CheckStrategy
(
const
StrategyPtr
&
strategy
)
{
if
(
CheckStrategyValue
(
strategy
,
inputs_shape_
,
is_auto_parallel_
)
!=
SUCCESS
)
{
if
(
is_auto_parallel_
)
{
...
...
mindspore/ccsrc/parallel/ops_info/loss_info.cc
浏览文件 @
87658105
...
...
@@ -194,7 +194,8 @@ Status SoftmaxCrossEntropyWithLogitsInfo::GenerateStrategies(int32_t stage_id) {
}
is_auto_parallel_
=
true
;
Shape
input0_split
(
inputs_shape_
[
0
].
size
(),
1
);
Shape
input0_split
;
(
void
)
input0_split
.
insert
(
input0_split
.
begin
(),
inputs_shape_
[
0
].
size
(),
1
);
input0_split
[
IntToSize
(
axis_index
)]
=
0
;
Shapes
splittable_inputs
=
{
input0_split
,
input0_split
};
std
::
vector
<
StrategyPtr
>
sp_vector
;
...
...
mindspore/ccsrc/parallel/ops_info/operator_info.cc
浏览文件 @
87658105
...
...
@@ -1255,6 +1255,5 @@ void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra
double
OperatorInfo
::
GetForwardMemoryCostFromCNode
()
{
return
operator_cost
()
->
GetForwardComputationCost
(
inputs_tensor_info_
,
outputs_tensor_info_
,
0
);
}
}
// namespace parallel
}
// namespace mindspore
mindspore/ccsrc/parallel/ops_info/prelu_info.cc
浏览文件 @
87658105
...
...
@@ -212,8 +212,10 @@ Status PReLUInfo::GenerateStrategies(int32_t stage_id) {
return
FAILED
;
}
is_auto_parallel_
=
true
;
Shape
input0_split
(
inputs_shape_
[
0
].
size
(),
1
);
input0_split
[
1
]
=
0
;
Shape
input0_split
;
input0_split
.
emplace_back
(
1
);
input0_split
.
emplace_back
(
0
);
(
void
)
input0_split
.
insert
(
input0_split
.
end
(),
inputs_shape_
[
0
].
size
()
-
2
,
1
);
Shape
input1_split
(
inputs_shape_
[
1
].
size
(),
0
);
Shapes
splittable_inputs
=
{
input0_split
,
input1_split
};
std
::
vector
<
StrategyPtr
>
sp_vector
;
...
...
mindspore/ccsrc/parallel/ops_info/reshape_info.cc
浏览文件 @
87658105
...
...
@@ -413,8 +413,9 @@ Status ReshapeInfo::GenerateStrategies(int32_t stage_id) {
return
FAILED
;
}
is_auto_parallel_
=
true
;
Shape
input0_split
(
inputs_shape_
[
0
].
size
(),
0
);
input0_split
[
0
]
=
1
;
Shape
input0_split
;
input0_split
.
emplace_back
(
1
);
(
void
)
input0_split
.
insert
(
input0_split
.
end
(),
inputs_shape_
[
0
].
size
()
-
1
,
0
);
Shapes
splittable_inputs
=
{
input0_split
};
std
::
vector
<
StrategyPtr
>
sp_vector
;
if
(
GenerateStrategiesForIndependentInputs
(
stage_id
,
inputs_shape_
,
splittable_inputs
,
&
sp_vector
)
!=
SUCCESS
)
{
...
...
mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc
浏览文件 @
87658105
...
...
@@ -27,7 +27,6 @@
namespace
mindspore
{
namespace
parallel
{
Status
VirtualDatasetInfo
::
CheckStrategy
(
const
StrategyPtr
&
strategy
)
{
if
(
CheckStrategyValue
(
strategy
,
inputs_shape_
,
is_auto_parallel_
)
!=
SUCCESS
)
{
if
(
is_auto_parallel_
)
{
...
...
@@ -225,8 +224,9 @@ Status VirtualDatasetInfo::GenerateStrategies(int32_t stage_id) {
StrategyPtr
sp
;
std
::
vector
<
Dimensions
>
strategy
;
for
(
auto
&
shape
:
inputs_shape_
)
{
Shape
temp
(
shape
.
size
(),
1
);
temp
[
0
]
=
SizeToInt
(
total_dev_num
);
Shape
temp
;
temp
.
emplace_back
(
SizeToInt
(
total_dev_num
));
(
void
)
temp
.
insert
(
temp
.
end
(),
shape
.
size
()
-
1
,
1
);
strategy
.
push_back
(
temp
);
}
sp
=
std
::
make_shared
<
Strategy
>
(
stage_id
,
strategy
);
...
...
mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h
浏览文件 @
87658105
...
...
@@ -51,7 +51,6 @@ class VirtualDatasetInfo : public OperatorInfo {
Status
GetAttrs
()
override
;
Status
InferAsLossDivisor
()
override
;
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/status.h
浏览文件 @
87658105
...
...
@@ -21,7 +21,6 @@
namespace
mindspore
{
namespace
parallel
{
enum
Status
{
SUCCESS
=
0
,
FAILED
,
...
...
mindspore/ccsrc/parallel/step_auto_parallel.cc
浏览文件 @
87658105
...
...
@@ -487,7 +487,6 @@ Status ConstructCostGraphNodes(const std::vector<AnfNodePtr> &all_nodes, const F
bool
is_find_wrong
=
(
current_op_ptr
->
name
().
find
(
VIRTUAL_DATA_SET_INFO
)
==
std
::
string
::
npos
)
&&
(
current_op_ptr
->
name
().
find
(
BATCH_PARALLEL
)
==
std
::
string
::
npos
)
&&
(
current_op_ptr
->
name
().
find
(
prim
->
name
())
==
std
::
string
::
npos
);
if
(
is_find_wrong
)
{
MS_LOG
(
EXCEPTION
)
<<
"The OperatorInfo: "
<<
current_op_ptr
->
name
()
<<
" does not match the Prim: "
<<
prim
->
name
();
...
...
@@ -947,7 +946,6 @@ Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const
graph
=
EliminateGraph
(
graph
,
eli_list
,
index_list
);
size_t
num_device
=
g_device_manager
->
DeviceNum
();
if
(
PartitionForAllDevices
(
num_device
,
graph
)
==
SUCCESS
)
{
MS_LOG
(
INFO
)
<<
"Partition Success With "
<<
num_device
<<
" devices."
;
}
else
{
...
...
mindspore/ccsrc/parallel/step_auto_parallel.h
浏览文件 @
87658105
...
...
@@ -55,7 +55,6 @@ Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const
std
::
vector
<
std
::
vector
<
std
::
string
>>
RecInputTensorNames
(
const
std
::
map
<
std
::
string
,
std
::
string
>::
iterator
&
it
,
std
::
vector
<
std
::
vector
<
std
::
string
>>
input_tensor_names
);
}
// namespace parallel
}
// namespace mindspore
#endif // PARALLEL_STEP_AUTO_PARALLEL_H_
mindspore/ccsrc/parallel/step_parallel.cc
浏览文件 @
87658105
...
...
@@ -2094,7 +2094,6 @@ CNodePtr FindLossCNodeFromRoot(const FuncGraphPtr& root) {
MS_EXCEPTION_IF_NULL
(
root_return_node
);
const
auto
&
all_nodes
=
root
->
nodes
();
FuncGraphPtr
func_graph
=
FindForwardGraphByRootNodes
(
all_nodes
);
if
(
func_graph
==
nullptr
)
{
return
FindLossCNode
(
root
);
}
else
{
...
...
@@ -2109,7 +2108,6 @@ FuncGraphPtr ForwardGraph(const FuncGraphPtr& root) {
MS_EXCEPTION_IF_NULL
(
root_return_node
);
const
auto
&
all_nodes
=
root
->
nodes
();
FuncGraphPtr
func_graph
=
FindForwardGraphByRootNodes
(
all_nodes
);
if
(
func_graph
!=
nullptr
)
{
forward_graph
=
func_graph
;
}
...
...
mindspore/ccsrc/parallel/strategy.h
浏览文件 @
87658105
...
...
@@ -27,7 +27,6 @@
namespace
mindspore
{
namespace
parallel
{
#define MIN_SLICE_NUM 1
using
Dimensions
=
std
::
vector
<
int32_t
>
;
...
...
mindspore/ccsrc/parallel/tensor_layout/arrangement.cc
浏览文件 @
87658105
...
...
@@ -26,7 +26,6 @@
namespace
mindspore
{
namespace
parallel
{
Status
Arrangement
::
Init
(
const
std
::
vector
<
int32_t
>&
array
)
{
Status
status
=
Array
::
Init
(
array
);
if
(
status
!=
Status
::
SUCCESS
)
{
...
...
mindspore/ccsrc/parallel/tensor_layout/arrangement.h
浏览文件 @
87658105
...
...
@@ -28,7 +28,6 @@
namespace
mindspore
{
namespace
parallel
{
class
Arrangement
:
public
Array
{
public:
Arrangement
()
:
size_
(
1
)
{}
...
...
@@ -53,7 +52,6 @@ class Arrangement : public Array {
void
ComputeSize
();
int32_t
size_
;
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/tensor_layout/array.cc
浏览文件 @
87658105
...
...
@@ -21,7 +21,6 @@
namespace
mindspore
{
namespace
parallel
{
std
::
string
Array
::
ToString
()
const
{
std
::
ostringstream
buffer
;
buffer
<<
"[ "
;
...
...
mindspore/ccsrc/parallel/tensor_layout/array.h
浏览文件 @
87658105
...
...
@@ -26,7 +26,6 @@
namespace
mindspore
{
namespace
parallel
{
class
Array
{
public:
Array
()
=
default
;
...
...
@@ -43,7 +42,6 @@ class Array {
protected:
std
::
vector
<
int32_t
>
array_
;
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/tensor_layout/construct_operator.h
浏览文件 @
87658105
...
...
@@ -52,7 +52,6 @@ class ConstructOperator {
Shape
dev_matrix_shape_
;
Status
CreateGroupByDim
(
size_t
axis
,
std
::
vector
<
Group
>*
group
);
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc
浏览文件 @
87658105
...
...
@@ -20,7 +20,6 @@
namespace
mindspore
{
namespace
parallel
{
std
::
string
LayoutTransfer
::
ToString
()
const
{
std
::
ostringstream
buffer
;
buffer
<<
std
::
endl
<<
std
::
string
(
"from_in_ tensor layout:"
+
from_in_
.
ToString
());
...
...
@@ -37,6 +36,5 @@ Status LayoutTransfer::Init(const TensorLayout& from_in, const TensorLayout& to_
Status
status
=
CheckValidTransfer
();
return
status
;
}
}
// namespace parallel
}
// namespace mindspore
mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h
浏览文件 @
87658105
...
...
@@ -23,7 +23,6 @@
namespace
mindspore
{
namespace
parallel
{
class
LayoutTransfer
{
public:
LayoutTransfer
()
=
default
;
...
...
@@ -43,7 +42,6 @@ class LayoutTransfer {
private:
virtual
Status
CheckValidTransfer
()
=
0
;
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/tensor_layout/map.cc
浏览文件 @
87658105
...
...
@@ -26,7 +26,6 @@
namespace
mindspore
{
namespace
parallel
{
Status
Map
::
Init
(
const
std
::
vector
<
int32_t
>&
array
)
{
Status
status
=
Array
::
Init
(
array
);
if
(
status
!=
Status
::
SUCCESS
)
{
...
...
mindspore/ccsrc/parallel/tensor_layout/map.h
浏览文件 @
87658105
...
...
@@ -46,7 +46,6 @@ class Map : public Array {
private:
bool
IsValidMap
();
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc
浏览文件 @
87658105
...
...
@@ -21,7 +21,6 @@
namespace
mindspore
{
namespace
parallel
{
Status
RedistributionLayoutTransfer
::
CheckValidTransfer
()
{
return
Status
::
SUCCESS
;
}
/*
...
...
@@ -66,6 +65,5 @@ std::shared_ptr<ReshapeLayoutTransfer> RedistributionLayoutTransfer::UnifyDevice
}
return
unified_device_arrangement_ptr
->
UnifyDeviceArrangementAndTensorShape
();
}
}
// namespace parallel
}
// namespace mindspore
mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h
浏览文件 @
87658105
...
...
@@ -24,7 +24,6 @@
namespace
mindspore
{
namespace
parallel
{
class
RedistributionLayoutTransfer
:
public
LayoutTransfer
{
public:
RedistributionLayoutTransfer
()
=
default
;
...
...
@@ -35,7 +34,6 @@ class RedistributionLayoutTransfer : public LayoutTransfer {
Status
CheckValidTransfer
()
override
;
std
::
shared_ptr
<
ReshapeLayoutTransfer
>
UnifyDeviceArrangement
()
const
;
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc
浏览文件 @
87658105
...
...
@@ -22,7 +22,6 @@
namespace
mindspore
{
namespace
parallel
{
Status
RedistributionOperatorInfer
::
Init
(
const
TensorLayout
&
tensor_layout
,
const
Map
&
out_tensor_map
,
RankList
dev_list
)
{
in_tensor_map_
=
tensor_layout
.
tensor_map
();
...
...
@@ -273,6 +272,5 @@ Status RedistributionOperatorInfer::TransferConcatByAxis(Args args) {
}
return
Status
::
SUCCESS
;
}
}
// namespace parallel
}
// namespace mindspore
mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h
浏览文件 @
87658105
...
...
@@ -28,7 +28,6 @@
#include "utils/convert_utils.h"
namespace
mindspore
{
namespace
parallel
{
using
DeviceArrangement
=
std
::
vector
<
int32_t
>
;
using
TensorMap
=
std
::
vector
<
int32_t
>
;
using
TensorShape
=
std
::
vector
<
int32_t
>
;
...
...
@@ -69,7 +68,6 @@ class RedistributionOperatorInfer {
RankList
dev_list_
;
bool
construct_op_flag_
;
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc
浏览文件 @
87658105
...
...
@@ -20,7 +20,6 @@
namespace
mindspore
{
namespace
parallel
{
Status
ReshapeLayoutTransfer
::
CheckValidTransfer
()
{
if
(
!
IsSameDeviceArrangement
())
{
return
Status
::
FAILED
;
...
...
mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h
浏览文件 @
87658105
...
...
@@ -23,7 +23,6 @@
namespace
mindspore
{
namespace
parallel
{
class
ReshapeLayoutTransfer
:
public
LayoutTransfer
{
public:
ReshapeLayoutTransfer
()
=
default
;
...
...
@@ -43,7 +42,6 @@ class ReshapeLayoutTransfer : public LayoutTransfer {
bool
FromTensorShapeCanBeExpandByTo
()
const
;
bool
ToTensorShapeCanBeExpandByFrom
()
const
;
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/tensor_layout/shape_util.cc
浏览文件 @
87658105
...
...
@@ -21,7 +21,6 @@
namespace
mindspore
{
namespace
parallel
{
/*
* example:
* shape = [2, 8, 32]
...
...
@@ -260,6 +259,5 @@ Status ExpandShape(const std::vector<int32_t>& in, const std::vector<int32_t>& e
}
return
status
;
}
}
// namespace parallel
}
// namespace mindspore
mindspore/ccsrc/parallel/tensor_layout/shape_util.h
浏览文件 @
87658105
...
...
@@ -27,7 +27,6 @@
namespace
mindspore
{
namespace
parallel
{
/*
* compute the accumulating product of all the values in shape from left to right,
* the accumulating results are saved in shape_accum from left to right
...
...
@@ -167,7 +166,6 @@ Status ExpandAccumulateProduct(const std::vector<int64_t>& in_accum_reverse,
* out = [2, 4, 2, 4, 8]
*/
Status
ExpandShape
(
const
std
::
vector
<
int32_t
>&
in
,
const
std
::
vector
<
int32_t
>&
expand
,
std
::
vector
<
int32_t
>*
out
);
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/tensor_layout/tensor_info.h
浏览文件 @
87658105
...
...
@@ -28,7 +28,6 @@
namespace
mindspore
{
namespace
parallel
{
using
Shapes
=
std
::
vector
<
Shape
>
;
class
TensorInfo
{
...
...
@@ -55,7 +54,6 @@ class TensorInfo {
// reduce method's reduce dim
std
::
vector
<
int32_t
>
reduce_dim_
;
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc
浏览文件 @
87658105
...
...
@@ -27,7 +27,6 @@
namespace
mindspore
{
namespace
parallel
{
std
::
string
TensorLayout
::
ToString
()
const
{
return
StandardToString
()
+
OriginToString
();
}
std
::
string
TensorLayout
::
StandardToString
()
const
{
...
...
@@ -337,7 +336,7 @@ Status TensorLayout::UpdateTensorMap(uint32_t index, int32_t value) {
MS_LOG
(
ERROR
)
<<
"Index is out of the size of the tensor map!"
;
return
Status
::
FAILED
;
}
Shape
shape
=
tensor_map_
.
array
();
auto
shape
=
tensor_map_
.
array
();
shape
[
index
]
=
value
;
if
(
tensor_map_
.
Init
(
shape
)
==
Status
::
FAILED
)
{
MS_LOG
(
ERROR
)
<<
"Update tensor map failed!"
;
...
...
mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h
浏览文件 @
87658105
...
...
@@ -30,7 +30,6 @@
namespace
mindspore
{
namespace
parallel
{
class
TensorLayout
{
public:
TensorLayout
()
=
default
;
...
...
@@ -94,7 +93,6 @@ class TensorLayout {
Map
tensor_map_
;
Arrangement
tensor_shape_
;
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc
浏览文件 @
87658105
...
...
@@ -24,7 +24,6 @@
namespace
mindspore
{
namespace
parallel
{
Status
TensorRedistribution
::
Init
(
const
TensorLayout
&
from
,
const
TensorLayout
&
to
,
const
RankList
&
dev_list
)
{
from_origin_
=
from
;
to_origin_
=
to
;
...
...
mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h
浏览文件 @
87658105
...
...
@@ -33,7 +33,6 @@
namespace
mindspore
{
namespace
parallel
{
class
TensorRedistribution
{
public:
explicit
TensorRedistribution
(
bool
construct_op_flag
=
true
,
bool
keep_reshape
=
false
)
...
...
@@ -83,7 +82,6 @@ class TensorRedistribution {
bool
construct_op_flag_
;
bool
keep_reshape_
;
};
}
// namespace parallel
}
// namespace mindspore
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录