Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
c4b03e85
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c4b03e85
编写于
3月 31, 2020
作者:
C
c00425699
提交者:
高东海
4月 08, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
use std::vector instead of std::list to promote performance for parallel module
上级
d735d479
变更
73
隐藏空白更改
内联
并排
Showing
73 changed file
with
141 addition
and
160 deletion
+141
-160
mindspore/ccsrc/parallel/context.cc
mindspore/ccsrc/parallel/context.cc
+0
-1
mindspore/ccsrc/parallel/context.h
mindspore/ccsrc/parallel/context.h
+0
-1
mindspore/ccsrc/parallel/device.h
mindspore/ccsrc/parallel/device.h
+0
-1
mindspore/ccsrc/parallel/device_manager.cc
mindspore/ccsrc/parallel/device_manager.cc
+10
-9
mindspore/ccsrc/parallel/device_manager.h
mindspore/ccsrc/parallel/device_manager.h
+10
-10
mindspore/ccsrc/parallel/device_matrix.cc
mindspore/ccsrc/parallel/device_matrix.cc
+3
-3
mindspore/ccsrc/parallel/device_matrix.h
mindspore/ccsrc/parallel/device_matrix.h
+4
-5
mindspore/ccsrc/parallel/graph_util/generate_graph.cc
mindspore/ccsrc/parallel/graph_util/generate_graph.cc
+0
-1
mindspore/ccsrc/parallel/graph_util/generate_graph.h
mindspore/ccsrc/parallel/graph_util/generate_graph.h
+0
-1
mindspore/ccsrc/parallel/group_manager.cc
mindspore/ccsrc/parallel/group_manager.cc
+3
-3
mindspore/ccsrc/parallel/group_manager.h
mindspore/ccsrc/parallel/group_manager.h
+5
-5
mindspore/ccsrc/parallel/ops_info/activation_info.h
mindspore/ccsrc/parallel/ops_info/activation_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/arithmetic_info.h
mindspore/ccsrc/parallel/ops_info/arithmetic_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h
mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/bias_add_info.h
mindspore/ccsrc/parallel/ops_info/bias_add_info.h
+1
-1
mindspore/ccsrc/parallel/ops_info/comparison_function_info.h
mindspore/ccsrc/parallel/ops_info/comparison_function_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h
mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/elementary_function_info.h
mindspore/ccsrc/parallel/ops_info/elementary_function_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/generator_info.h
mindspore/ccsrc/parallel/ops_info/generator_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/get_next_info.h
mindspore/ccsrc/parallel/ops_info/get_next_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h
mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/loss_info.h
mindspore/ccsrc/parallel/ops_info/loss_info.h
+1
-1
mindspore/ccsrc/parallel/ops_info/matmul_info.cc
mindspore/ccsrc/parallel/ops_info/matmul_info.cc
+1
-1
mindspore/ccsrc/parallel/ops_info/matmul_info.h
mindspore/ccsrc/parallel/ops_info/matmul_info.h
+1
-1
mindspore/ccsrc/parallel/ops_info/onehot_info.h
mindspore/ccsrc/parallel/ops_info/onehot_info.h
+1
-1
mindspore/ccsrc/parallel/ops_info/operator_info.h
mindspore/ccsrc/parallel/ops_info/operator_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/prelu_info.h
mindspore/ccsrc/parallel/ops_info/prelu_info.h
+1
-1
mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc
mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc
+1
-1
mindspore/ccsrc/parallel/ops_info/reduce_method_info.h
mindspore/ccsrc/parallel/ops_info/reduce_method_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/reshape_info.h
mindspore/ccsrc/parallel/ops_info/reshape_info.h
+0
-1
mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h
mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h
+1
-0
mindspore/ccsrc/parallel/ops_info/transpose_info.h
mindspore/ccsrc/parallel/ops_info/transpose_info.h
+1
-1
mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h
mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h
+1
-1
mindspore/ccsrc/parallel/status.h
mindspore/ccsrc/parallel/status.h
+0
-1
mindspore/ccsrc/parallel/step_parallel.cc
mindspore/ccsrc/parallel/step_parallel.cc
+1
-1
mindspore/ccsrc/parallel/step_parallel.h
mindspore/ccsrc/parallel/step_parallel.h
+1
-1
mindspore/ccsrc/parallel/strategy.h
mindspore/ccsrc/parallel/strategy.h
+0
-1
mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h
...rc/parallel/tensor_layout/redistribution_operator_infer.h
+0
-1
tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc
tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc
+2
-2
tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc
tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc
+2
-2
tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc
tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc
+2
-2
tests/ut/cpp/parallel/auto_parallel/operator_costmodel_test.cc
.../ut/cpp/parallel/auto_parallel/operator_costmodel_test.cc
+6
-6
tests/ut/cpp/parallel/device_manager_test.cc
tests/ut/cpp/parallel/device_manager_test.cc
+8
-8
tests/ut/cpp/parallel/device_matrix_test.cc
tests/ut/cpp/parallel/device_matrix_test.cc
+8
-8
tests/ut/cpp/parallel/group_manager_test.cc
tests/ut/cpp/parallel/group_manager_test.cc
+11
-11
tests/ut/cpp/parallel/ops_info/activation_info_test.cc
tests/ut/cpp/parallel/ops_info/activation_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/activation_test.cc
tests/ut/cpp/parallel/ops_info/activation_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/dropout_do_mask_info_test.cc
tests/ut/cpp/parallel/ops_info/dropout_do_mask_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/gelu_info_test.cc
tests/ut/cpp/parallel/ops_info/gelu_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/generate_strategy_test.cc
tests/ut/cpp/parallel/ops_info/generate_strategy_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/generator_info_test.cc
tests/ut/cpp/parallel/ops_info/generator_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/get_next_info_test.cc
tests/ut/cpp/parallel/ops_info/get_next_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/l2_normalize_info_test.cc
tests/ut/cpp/parallel/ops_info/l2_normalize_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/log_softmax_info_test.cc
tests/ut/cpp/parallel/ops_info/log_softmax_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/matmul_info_test.cc
tests/ut/cpp/parallel/ops_info/matmul_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/onehot_info_test.cc
tests/ut/cpp/parallel/ops_info/onehot_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/onehot_info_test_axis_0.cc
tests/ut/cpp/parallel/ops_info/onehot_info_test_axis_0.cc
+2
-2
tests/ut/cpp/parallel/ops_info/pow_info_test.cc
tests/ut/cpp/parallel/ops_info/pow_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/prelu_test.cc
tests/ut/cpp/parallel/ops_info/prelu_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/reduce_method_test.cc
tests/ut/cpp/parallel/ops_info/reduce_method_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/reshape_test.cc
tests/ut/cpp/parallel/ops_info/reshape_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/softmax_entropy_loss_info_test.cc
...t/cpp/parallel/ops_info/softmax_entropy_loss_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/softmax_info_test.cc
tests/ut/cpp/parallel/ops_info/softmax_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/tanh_info_test.cc
tests/ut/cpp/parallel/ops_info/tanh_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc
tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc
tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc
+2
-2
tests/ut/cpp/parallel/ops_info/transpose_test.cc
tests/ut/cpp/parallel/ops_info/transpose_test.cc
+2
-2
tests/ut/cpp/parallel/step_auto_parallel_test.cc
tests/ut/cpp/parallel/step_auto_parallel_test.cc
+2
-2
tests/ut/cpp/parallel/step_parallel_test.cc
tests/ut/cpp/parallel/step_parallel_test.cc
+2
-2
tests/ut/cpp/parallel/tensor_layout/construct_operator_test.cc
.../ut/cpp/parallel/tensor_layout/construct_operator_test.cc
+2
-2
tests/ut/cpp/parallel/tensor_layout/redistribution_operator_infer_test.cc
...allel/tensor_layout/redistribution_operator_infer_test.cc
+2
-2
tests/ut/cpp/parallel/tensor_layout/tensor_redistribution_test.cc
.../cpp/parallel/tensor_layout/tensor_redistribution_test.cc
+1
-1
tests/ut/cpp/parallel/virtual_dataset_test.cc
tests/ut/cpp/parallel/virtual_dataset_test.cc
+2
-2
未找到文件。
mindspore/ccsrc/parallel/context.cc
浏览文件 @
c4b03e85
...
...
@@ -21,7 +21,6 @@
#include <utility>
#include <numeric>
#include <functional>
#include <list>
#include <memory>
#include "parallel/device_manager.h"
...
...
mindspore/ccsrc/parallel/context.h
浏览文件 @
c4b03e85
...
...
@@ -20,7 +20,6 @@
#include <cstdint>
#include <string>
#include <vector>
#include <list>
#include <memory>
#include "parallel/status.h"
...
...
mindspore/ccsrc/parallel/device.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_DEVICE_H_
#include <cstdint>
#include <list>
#include <string>
#include <utility>
...
...
mindspore/ccsrc/parallel/device_manager.cc
浏览文件 @
c4b03e85
...
...
@@ -30,7 +30,7 @@ namespace mindspore {
namespace
parallel
{
DeviceManagerPtr
g_device_manager
=
nullptr
;
Stage
::
Stage
(
const
std
::
list
<
mindspore
::
parallel
::
Device
>&
devices
,
int
num
,
int
rank
)
Stage
::
Stage
(
const
std
::
vector
<
mindspore
::
parallel
::
Device
>&
devices
,
int
num
,
int
rank
)
:
devices_
(
devices
),
number_
(
num
),
rank_
(
rank
)
{
gm_
=
GroupManager
();
}
...
...
@@ -104,7 +104,7 @@ int32_t GetListMemberByIndex(size_t index, const RankList& devices) {
return
result
;
}
std
::
shared_ptr
<
Device
>
GetListMemberByIndex
(
size_t
index
,
const
std
::
list
<
std
::
shared_ptr
<
Device
>>&
device_list
)
{
std
::
shared_ptr
<
Device
>
GetListMemberByIndex
(
size_t
index
,
const
std
::
vector
<
std
::
shared_ptr
<
Device
>>&
device_list
)
{
size_t
i
=
0
;
std
::
shared_ptr
<
Device
>
result
;
if
((
device_list
.
empty
())
||
(
index
>=
device_list
.
size
()))
{
...
...
@@ -178,7 +178,7 @@ Status DeviceManager::Init(const RankList& devices, int32_t global_device_rank,
MS_LOG
(
ERROR
)
<<
"The number of 'devices' in a stage must be positive"
;
return
Status
::
FAILED
;
}
std
::
list
<
Device
>
curr_dev_list
;
std
::
vector
<
Device
>
curr_dev_list
;
for
(
int
i
=
0
;
i
<
num_device
;
++
i
)
{
curr_dev_list
.
push_back
(
*
GetListMemberByIndex
(
global_index
,
devices_
));
global_index
++
;
...
...
@@ -278,8 +278,8 @@ RankList DeviceManager::global_device_list(int32_t stage_id, int32_t rank, int32
Device
DeviceManager
::
CreateNewDeviceByRank
(
int32_t
rank
)
const
{
return
Device
(
rank
);
}
std
::
list
<
Device
>
DeviceManager
::
CreateDeviceListByRankList
(
RankList
ranks
)
{
std
::
list
<
Device
>
dev_list
;
std
::
vector
<
Device
>
DeviceManager
::
CreateDeviceListByRankList
(
RankList
ranks
)
{
std
::
vector
<
Device
>
dev_list
;
for
(
auto
&
rank
:
ranks
)
{
Device
one
=
CreateNewDeviceByRank
(
rank
);
dev_list
.
push_back
(
one
);
...
...
@@ -312,8 +312,8 @@ std::string HashName(const std::string& origin_name) { return std::to_string(std
// is '0-1-3-5-7'.
std
::
string
DeviceManager
::
GenerateGroupNameByRanks
(
RankList
ranks
)
{
std
::
string
rank_list_name
;
std
::
list
<
int32_t
>::
iterator
it
;
ranks
.
sort
(
);
// sorted in increasing order
std
::
vector
<
int32_t
>::
iterator
it
;
std
::
sort
(
ranks
.
begin
(),
ranks
.
end
()
);
// sorted in increasing order
for
(
it
=
ranks
.
begin
();
it
!=
ranks
.
end
();
++
it
)
{
if
(
it
==
ranks
.
begin
())
{
rank_list_name
=
std
::
to_string
(
*
it
);
...
...
@@ -343,7 +343,8 @@ std::string DeviceManager::GenerateGroupNameByRanks(RankList ranks) {
// Create the group with the given devices and the given name. The GroupManager
// gm_ will create a new group only if there does not exit a group with the same
// name. Otherwise, let the pointer g point to that group.
Group
DeviceManager
::
CreateGroup
(
const
std
::
string
&
group_name
,
const
std
::
list
<
mindspore
::
parallel
::
Device
>&
devices
)
{
Group
DeviceManager
::
CreateGroup
(
const
std
::
string
&
group_name
,
const
std
::
vector
<
mindspore
::
parallel
::
Device
>&
devices
)
{
if
((
world_group
()
==
NCCL_WORLD_GROUP
)
&&
(
devices
.
size
()
!=
devices_
.
size
()))
{
MS_LOG
(
EXCEPTION
)
<<
"Do not support sub group for nccl"
;
}
...
...
@@ -360,7 +361,7 @@ Group DeviceManager::CreateGroup(const RankList& dev_ranks) {
}
std
::
string
group_name
=
GenerateGroupNameByRanks
(
dev_ranks
);
std
::
list
<
Device
>
dev_list
=
CreateDeviceListByRankList
(
dev_ranks
);
auto
dev_list
=
CreateDeviceListByRankList
(
dev_ranks
);
return
CreateGroup
(
group_name
,
dev_list
);
}
...
...
mindspore/ccsrc/parallel/device_manager.h
浏览文件 @
c4b03e85
...
...
@@ -19,7 +19,7 @@
#include <cstdint>
#include <cstring>
#include <
list
>
#include <
vector
>
#include <map>
#include <memory>
#include <string>
...
...
@@ -50,19 +50,19 @@ class Stage {
// This class is used in pipeline-parallelization. Available devices are partitioned into multiple stages.
// Currently, the function of pipeline-parallelization and this class are NOT implemented.
public:
explicit
Stage
(
std
::
list
<
Device
>
devices
)
:
devices_
(
std
::
move
(
devices
)),
number_
(
0
),
rank_
(
0
)
{
explicit
Stage
(
std
::
vector
<
Device
>
devices
)
:
devices_
(
std
::
move
(
devices
)),
number_
(
0
),
rank_
(
0
)
{
gm_
=
GroupManager
();
}
Stage
(
const
std
::
list
<
mindspore
::
parallel
::
Device
>&
devices
,
int
num
,
int
rank
);
Stage
(
const
std
::
vector
<
mindspore
::
parallel
::
Device
>&
devices
,
int
num
,
int
rank
);
~
Stage
()
=
default
;
int
GetStageNum
()
const
{
return
number_
;
}
size_t
GetDevicesNum
()
const
{
return
devices_
.
size
();
}
std
::
list
<
Device
>
GetDevicesList
()
{
return
devices_
;
}
std
::
vector
<
Device
>
GetDevicesList
()
{
return
devices_
;
}
int
global_rank
(
Group
*
g
)
const
;
private:
std
::
list
<
Device
>
devices_
;
std
::
vector
<
Device
>
devices_
;
int
number_
;
int32_t
rank_
;
GroupManager
gm_
;
...
...
@@ -89,10 +89,10 @@ class DeviceManager {
RankList
global_device_list
(
int32_t
stage_id
,
int32_t
rank
,
int32_t
split_num
)
const
;
Device
CreateNewDeviceByRank
(
int32_t
rank
)
const
;
std
::
list
<
Device
>
CreateDeviceListByRankList
(
RankList
ranks
);
std
::
vector
<
Device
>
CreateDeviceListByRankList
(
RankList
ranks
);
std
::
string
GenerateGroupNameByRanks
(
RankList
dev_ranks
);
Group
CreateGroup
(
const
std
::
string
&
group_name
,
const
std
::
list
<
Device
>&
devices
);
Group
CreateGroup
(
const
std
::
string
&
group_name
,
const
std
::
vector
<
Device
>&
devices
);
Group
CreateGroup
(
const
RankList
&
dev_ranks
);
std
::
shared_ptr
<
Stage
>
GetStageById
(
int32_t
stage_id
);
...
...
@@ -108,11 +108,11 @@ class DeviceManager {
std
::
string
FindRankListNameByHashName
(
const
std
::
string
&
hash_name
);
private:
std
::
list
<
std
::
shared_ptr
<
Device
>>
devices_
;
std
::
vector
<
std
::
shared_ptr
<
Device
>>
devices_
;
// each stage has a list of devices
std
::
list
<
std
::
list
<
int32_t
>>
stage_devices_
;
std
::
vector
<
std
::
vector
<
int32_t
>>
stage_devices_
;
std
::
shared_ptr
<
Device
>
device_
;
std
::
list
<
std
::
shared_ptr
<
Stage
>>
stages_
;
std
::
vector
<
std
::
shared_ptr
<
Stage
>>
stages_
;
GroupManager
gm_
;
std
::
string
backend_
;
...
...
mindspore/ccsrc/parallel/device_matrix.cc
浏览文件 @
c4b03e85
...
...
@@ -21,7 +21,7 @@
#include <utility>
#include <numeric>
#include <functional>
#include <
list
>
#include <
vector
>
#include "parallel/status.h"
#include "parallel/ops_info/operator_info.h"
...
...
@@ -64,7 +64,7 @@ Status DeviceMatrix::GetDevicesAlongDim(const uint32_t& dim, RankList* devices)
}
RankList
group
;
std
::
list
<
RankList
>
local_group_list
;
std
::
vector
<
RankList
>
local_group_list
;
// lower than dim
int32_t
step
=
1
;
...
...
@@ -160,7 +160,7 @@ std::string ShapeToString(const Shape& shape) {
return
str
+
"]"
;
}
std
::
string
ListToString
(
const
std
::
list
<
int32_t
>&
list
)
{
std
::
string
ListToString
(
const
std
::
vector
<
int32_t
>&
list
)
{
std
::
string
str
=
"["
;
for
(
auto
&
element
:
list
)
{
str
+=
std
::
to_string
(
element
)
+
", "
;
...
...
mindspore/ccsrc/parallel/device_matrix.h
浏览文件 @
c4b03e85
...
...
@@ -20,7 +20,6 @@
#include <cstdint>
#include <string>
#include <vector>
#include <list>
#include "parallel/status.h"
#include "utils/convert_utils.h"
...
...
@@ -28,7 +27,7 @@
namespace
mindspore
{
namespace
parallel
{
using
RankList
=
std
::
list
<
int32_t
>
;
using
RankList
=
std
::
vector
<
int32_t
>
;
using
Shape
=
std
::
vector
<
int32_t
>
;
class
DeviceMatrix
{
...
...
@@ -36,7 +35,7 @@ class DeviceMatrix {
DeviceMatrix
(
int32_t
rank
,
RankList
devices
,
Shape
dev_shape
);
DeviceMatrix
()
=
default
;
~
DeviceMatrix
()
=
default
;
std
::
list
<
RankList
>
group_list
()
const
{
return
group_list_
;
}
std
::
vector
<
RankList
>
group_list
()
const
{
return
group_list_
;
}
Status
CreateGroupList
();
Status
GetDevicesByTensorMap
(
const
Shape
&
tensor_map
,
RankList
*
rank_list
);
Status
GetDevicesAlongDim
(
const
uint32_t
&
dim
,
RankList
*
devices
);
...
...
@@ -46,11 +45,11 @@ class DeviceMatrix {
RankList
dev_list_
;
// From low dim to high dim. eg: [D0 D1 D2 D3]
Shape
dev_shape_
;
std
::
list
<
RankList
>
group_list_
;
std
::
vector
<
RankList
>
group_list_
;
};
std
::
string
ShapeToString
(
const
Shape
&
shape
);
std
::
string
ListToString
(
const
std
::
list
<
int32_t
>&
list
);
std
::
string
ListToString
(
const
std
::
vector
<
int32_t
>&
list
);
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/graph_util/generate_graph.cc
浏览文件 @
c4b03e85
...
...
@@ -17,7 +17,6 @@
#include "parallel/graph_util/generate_graph.h"
#include <algorithm>
#include <list>
#include <memory>
#include <string>
#include <utility>
...
...
mindspore/ccsrc/parallel/graph_util/generate_graph.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_GRAPH_UTIL_GENERATE_GRAPH_H_
#include <vector>
#include <list>
#include <memory>
#include <unordered_map>
#include <map>
...
...
mindspore/ccsrc/parallel/group_manager.cc
浏览文件 @
c4b03e85
...
...
@@ -30,13 +30,13 @@ Group::Group() {
devices_
.
clear
();
}
Status
Group
::
Init
(
const
std
::
string
&
name
,
const
std
::
list
<
Device
>
&
devices
)
{
Status
Group
::
Init
(
const
std
::
string
&
name
,
const
std
::
vector
<
Device
>
&
devices
)
{
this
->
name_
=
name
;
this
->
devices_
=
devices
;
return
Status
::
SUCCESS
;
}
std
::
list
<
Device
>
Group
::
GetDevicesList
()
const
{
return
devices_
;
}
std
::
vector
<
Device
>
Group
::
GetDevicesList
()
const
{
return
devices_
;
}
bool
Group
::
IsInThisGroup
(
int32_t
device_rank
)
{
for
(
auto
&
device
:
devices_
)
{
...
...
@@ -66,7 +66,7 @@ Status Group::GetIndex(size_t *index) {
GroupManager
::
GroupManager
()
{
groups_
.
clear
();
}
Status
GroupManager
::
CreateGroup
(
const
std
::
string
&
group_name
,
const
std
::
list
<
Device
>
&
devices
,
Status
GroupManager
::
CreateGroup
(
const
std
::
string
&
group_name
,
const
std
::
vector
<
Device
>
&
devices
,
mindspore
::
parallel
::
Group
*
const
group
)
{
// it is simple to use size to determine whether it is a world group
uint32_t
world_size
=
0
;
...
...
mindspore/ccsrc/parallel/group_manager.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,7 @@
#define MINDSPORE_CCSRC_PARALLEL_GROUP_MANAGER_H_
#include <cstdint>
#include <
list
>
#include <
vector
>
#include <map>
#include <string>
...
...
@@ -37,8 +37,8 @@ class Group {
public:
Group
();
~
Group
()
=
default
;
Status
Init
(
const
std
::
string
&
name
,
const
std
::
list
<
Device
>&
devices
);
std
::
list
<
Device
>
GetDevicesList
()
const
;
Status
Init
(
const
std
::
string
&
name
,
const
std
::
vector
<
Device
>&
devices
);
std
::
vector
<
Device
>
GetDevicesList
()
const
;
std
::
string
name
()
const
{
return
name_
;
}
bool
IsInThisGroup
(
int32_t
device_rank
);
Status
GetIndex
(
size_t
*
index
);
...
...
@@ -46,7 +46,7 @@ class Group {
private:
std
::
string
name_
;
std
::
list
<
Device
>
devices_
;
std
::
vector
<
Device
>
devices_
;
};
class
GroupManager
{
...
...
@@ -54,7 +54,7 @@ class GroupManager {
GroupManager
();
~
GroupManager
()
=
default
;
Status
CreateGroup
(
const
std
::
string
&
name
,
const
std
::
list
<
Device
>&
devices
,
Group
*
group
);
Status
CreateGroup
(
const
std
::
string
&
name
,
const
std
::
vector
<
Device
>&
devices
,
Group
*
group
);
Status
DestroyGroup
(
Group
*
group
);
Status
DestroyAllGroups
();
Status
GetRankID
(
const
std
::
string
&
name
,
unsigned
int
*
rank_id
);
...
...
mindspore/ccsrc/parallel/ops_info/activation_info.h
浏览文件 @
c4b03e85
...
...
@@ -19,7 +19,6 @@
#include <ir/value.h>
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
...
...
mindspore/ccsrc/parallel/ops_info/arithmetic_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ARITHMETIC_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
...
...
mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h
浏览文件 @
c4b03e85
...
...
@@ -17,7 +17,6 @@
#ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_BATCH_PARALLEL_INFO_H_
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_BATCH_PARALLEL_INFO_H_
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
...
...
mindspore/ccsrc/parallel/ops_info/bias_add_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,7 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_BIAS_ADD_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
...
...
mindspore/ccsrc/parallel/ops_info/comparison_function_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_COMPARISON_FUNCTION_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include "ir/value.h"
...
...
mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_DROPOUT_DO_MASK_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
...
...
mindspore/ccsrc/parallel/ops_info/elementary_function_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ELEMENTARY_FUNCTION_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include "ir/value.h"
...
...
mindspore/ccsrc/parallel/ops_info/generator_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GENERATOR_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
...
...
mindspore/ccsrc/parallel/ops_info/get_next_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GETNEXT_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
...
...
mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_L2_NORMALIZE_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
...
...
mindspore/ccsrc/parallel/ops_info/loss_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,10 +18,10 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_LOSS_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/ops_info/activation_info.h"
...
...
mindspore/ccsrc/parallel/ops_info/matmul_info.cc
浏览文件 @
c4b03e85
...
...
@@ -397,7 +397,7 @@ Status MatMulBase::GenerateStrategies(int32_t stage_id) {
return
FAILED
;
}
CheckGlobalDeviceManager
();
std
::
list
<
int32_t
>
dev_list
=
g_device_manager
->
GetDeviceListByStageId
(
stage_id
);
std
::
vector
<
int32_t
>
dev_list
=
g_device_manager
->
GetDeviceListByStageId
(
stage_id
);
size_t
dev_num
=
dev_list
.
size
();
Shape
input0_shape
=
inputs_shape_
[
0
],
input1_shape
=
inputs_shape_
[
1
];
if
(
transpose_a_
)
{
...
...
mindspore/ccsrc/parallel/ops_info/matmul_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,10 +18,10 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_MATMUL_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/strategy.h"
...
...
mindspore/ccsrc/parallel/ops_info/onehot_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,10 +18,10 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ONEHOT_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/auto_parallel/operator_costmodel.h"
...
...
mindspore/ccsrc/parallel/ops_info/operator_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_OPERATOR_INFO_H_
#include <cstdint>
#include <list>
#include <map>
#include <memory>
#include <string>
...
...
mindspore/ccsrc/parallel/ops_info/prelu_info.h
浏览文件 @
c4b03e85
...
...
@@ -17,11 +17,11 @@
#ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_PRELU_INFO_H_
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_PRELU_INFO_H_
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/strategy.h"
...
...
mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc
浏览文件 @
c4b03e85
...
...
@@ -198,7 +198,7 @@ ForwardOp CreatReduceMeanForwardOp(const std::vector<Group> &forward_group, cons
// Creat RealDiv op
OperatorName
operator1_name
=
REAL_DIV
;
std
::
list
<
Device
>
device_list
=
forward_group
[
0
].
GetDevicesList
();
std
::
vector
<
Device
>
device_list
=
forward_group
[
0
].
GetDevicesList
();
auto
divisor
=
static_cast
<
float
>
(
device_list
.
size
());
py
::
tuple
tuple
=
py
::
make_tuple
(
divisor
);
mindspore
::
tensor
::
TensorPtr
tensor_ptr
=
std
::
make_shared
<
mindspore
::
tensor
::
Tensor
>
(
tuple
,
dtype
);
...
...
mindspore/ccsrc/parallel/ops_info/reduce_method_info.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_REDUCE_SUM_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
...
...
mindspore/ccsrc/parallel/ops_info/reshape_info.h
浏览文件 @
c4b03e85
...
...
@@ -19,7 +19,6 @@
#include <ir/value.h>
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
...
...
mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h
浏览文件 @
c4b03e85
...
...
@@ -20,6 +20,7 @@
#include <vector>
#include <memory>
#include <string>
#include "parallel/ops_info/operator_info.h"
#include "parallel/auto_parallel/operator_costmodel.h"
#include "parallel/strategy.h"
...
...
mindspore/ccsrc/parallel/ops_info/transpose_info.h
浏览文件 @
c4b03e85
...
...
@@ -17,11 +17,11 @@
#ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_TRANSPOSE_INFO_H_
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_TRANSPOSE_INFO_H_
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/strategy.h"
...
...
mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h
浏览文件 @
c4b03e85
...
...
@@ -17,11 +17,11 @@
#ifndef PARALLEL_OPS_INFO_DATASET_INFO_H_
#define PARALLEL_OPS_INFO_DATASET_INFO_H_
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/strategy.h"
...
...
mindspore/ccsrc/parallel/status.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_STATUS_H_
#include <cstdint>
#include <list>
namespace
mindspore
{
namespace
parallel
{
...
...
mindspore/ccsrc/parallel/step_parallel.cc
浏览文件 @
c4b03e85
...
...
@@ -19,7 +19,7 @@
#include <inttypes.h>
#include <sys/time.h>
#include <algorithm>
#include <list>
#include <map>
#include <memory>
#include <string>
...
...
mindspore/ccsrc/parallel/step_parallel.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,7 @@
#define MINDSPORE_CCSRC_PARALLEL_STEP_PARALLEL_H_
#include <vector>
#include <list>
#include <memory>
#include <unordered_map>
#include <map>
...
...
mindspore/ccsrc/parallel/strategy.h
浏览文件 @
c4b03e85
...
...
@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_STRATEGY_H_
#include <cstdint>
#include <list>
#include <string>
#include <vector>
#include <memory>
...
...
mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h
浏览文件 @
c4b03e85
...
...
@@ -22,7 +22,6 @@
#include <string>
#include <vector>
#include <utility>
#include <list>
#include "parallel/tensor_layout/redistribution_layout_transfer.h"
#include "parallel/tensor_layout/construct_operator.h"
...
...
tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc
浏览文件 @
c4b03e85
...
...
@@ -154,13 +154,13 @@ class TestDPAlgo : public UT::Common {
void
TestDPAlgo
::
SetUp
()
{
cost_graph
=
std
::
make_shared
<
CostGraph
>
();
cost_graph
->
SetDeviceMemoryAndCostParameter
();
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
10
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
8
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc
浏览文件 @
c4b03e85
...
...
@@ -42,13 +42,13 @@ class TestEdgeCostModel : public UT::Common {
};
void
TestEdgeCostModel
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
10
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
8
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc
浏览文件 @
c4b03e85
...
...
@@ -53,13 +53,13 @@ class TestCostGraph : public UT::Common {
void
TestCostGraph
::
SetUp
()
{
cost_graph
.
SetDeviceMemoryAndCostParameter
();
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
10
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
8
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/auto_parallel/operator_costmodel_test.cc
浏览文件 @
c4b03e85
...
...
@@ -33,13 +33,13 @@ class TestMatMulCost : public UT::Common {
void
TestMatMulCost
::
SetUp
()
{
mmcost_
=
MatMulCost
();
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
1050
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
1024
);
stage_map
.
push_back
(
26
);
...
...
@@ -90,13 +90,13 @@ class TestActivationCost : public UT::Common {
void
TestActivationCost
::
SetUp
()
{
ac_cost_
=
ActivationCost
();
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
1050
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
1024
);
stage_map
.
push_back
(
26
);
...
...
@@ -142,13 +142,13 @@ class TestPReLUCost : public UT::Common {
void
TestPReLUCost
::
SetUp
()
{
prelu_cost_
=
PReLUCost
();
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
1050
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
1024
);
stage_map
.
push_back
(
26
);
...
...
tests/ut/cpp/parallel/device_manager_test.cc
浏览文件 @
c4b03e85
...
...
@@ -69,8 +69,8 @@ void TestDeviceManager::TearDown() {
}
TEST_F
(
TestDeviceManager
,
test_dm_init_AND_get_device_list
)
{
std
::
list
<
int32_t
>
dev_list
;
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
stage_map
;
int32_t
local_dev
=
0
;
dev_list
.
push_back
(
5
);
...
...
@@ -85,12 +85,12 @@ TEST_F(TestDeviceManager, test_dm_init_AND_get_device_list) {
ASSERT_EQ
(
dm_
.
DeviceNum
(),
4
);
ASSERT_EQ
(
dm_
.
GetStageNum
(),
(
int32_t
)(
2
));
std
::
list
<
int32_t
>
dev_list_0
=
dm_
.
GetDeviceListByStageId
(
0
);
std
::
list
<
int32_t
>
dev_list_1
=
dm_
.
GetDeviceListByStageId
(
1
);
std
::
vector
<
int32_t
>
dev_list_0
=
dm_
.
GetDeviceListByStageId
(
0
);
std
::
vector
<
int32_t
>
dev_list_1
=
dm_
.
GetDeviceListByStageId
(
1
);
ASSERT_EQ
(
dev_list_0
.
size
(),
2
);
ASSERT_EQ
(
dev_list_1
.
size
(),
2
);
std
::
list
<
int32_t
>::
iterator
it
=
dev_list_0
.
begin
();
std
::
vector
<
int32_t
>::
iterator
it
=
dev_list_0
.
begin
();
ASSERT_EQ
((
*
it
),
int32_t
(
5
));
it
++
;
ASSERT_EQ
((
*
it
),
int32_t
(
3
));
...
...
@@ -111,13 +111,13 @@ TEST_F(TestDeviceManager, test_CreateNewDeviceByRank) {
}
TEST_F
(
TestDeviceManager
,
test_CreateDeviceListByRankList
)
{
std
::
list
<
Device
>
dev_list
;
std
::
list
<
int32_t
>
rlist
;
std
::
vector
<
Device
>
dev_list
;
std
::
vector
<
int32_t
>
rlist
;
rlist
.
push_back
(
int32_t
(
2
));
rlist
.
push_back
(
int32_t
(
1
));
dev_list
=
dm_
.
CreateDeviceListByRankList
(
rlist
);
std
::
list
<
Device
>::
iterator
it
=
dev_list
.
begin
();
std
::
vector
<
Device
>::
iterator
it
=
dev_list
.
begin
();
ASSERT_EQ
(
it
->
rank
(),
int32_t
(
2
));
it
++
;
ASSERT_EQ
(
it
->
rank
(),
int32_t
(
1
));
...
...
tests/ut/cpp/parallel/device_matrix_test.cc
浏览文件 @
c4b03e85
...
...
@@ -35,9 +35,9 @@ TEST_F(TestDeviceMatrix, Test2Dgroup_list) {
Shape
shape
=
{
2
,
3
};
DeviceMatrix
arr
(
0
,
dev_list
,
shape
);
std
::
list
<
RankList
>
group_list
;
std
::
vector
<
RankList
>
group_list
;
if
(
arr
.
CreateGroupList
()
==
Status
::
SUCCESS
)
group_list
=
arr
.
group_list
();
std
::
list
<
RankList
>
group_list_expect
=
{{
0
,
3
},
{
0
,
1
,
2
}};
std
::
vector
<
RankList
>
group_list_expect
=
{{
0
,
3
},
{
0
,
1
,
2
}};
ASSERT_EQ
(
group_list
,
group_list_expect
);
}
...
...
@@ -46,9 +46,9 @@ TEST_F(TestDeviceMatrix, Test3Dgroup_list) {
Shape
shape
=
{
2
,
2
,
3
};
DeviceMatrix
arr
(
5
,
dev_list
,
shape
);
std
::
list
<
RankList
>
group_list
;
std
::
vector
<
RankList
>
group_list
;
if
(
arr
.
CreateGroupList
()
==
Status
::
SUCCESS
)
group_list
=
arr
.
group_list
();
std
::
list
<
RankList
>
group_list_expect
=
{{
5
,
11
},
{
2
,
5
},
{
3
,
4
,
5
}};
std
::
vector
<
RankList
>
group_list_expect
=
{{
5
,
11
},
{
2
,
5
},
{
3
,
4
,
5
}};
ASSERT_EQ
(
group_list
,
group_list_expect
);
}
...
...
@@ -57,9 +57,9 @@ TEST_F(TestDeviceMatrix, Test4DGetAlongDim) {
Shape
shape
=
{
2
,
1
,
4
,
2
};
DeviceMatrix
arr
(
5
,
dev_list
,
shape
);
std
::
list
<
RankList
>
group_list
;
std
::
vector
<
RankList
>
group_list
;
if
(
arr
.
CreateGroupList
()
==
Status
::
SUCCESS
)
group_list
=
arr
.
group_list
();
std
::
list
<
RankList
>
group_list_expect
=
{{
5
,
13
},
{
5
},
{
1
,
3
,
5
,
7
},
{
4
,
5
}};
std
::
vector
<
RankList
>
group_list_expect
=
{{
5
,
13
},
{
5
},
{
1
,
3
,
5
,
7
},
{
4
,
5
}};
ASSERT_EQ
(
group_list
,
group_list_expect
);
}
...
...
@@ -69,9 +69,9 @@ TEST_F(TestDeviceMatrix, Test5DGetAlongDim) {
Shape
shape
=
{
3
,
4
,
2
,
3
,
2
};
DeviceMatrix
arr
(
5
,
dev_list
,
shape
);
std
::
list
<
RankList
>
group_list
;
std
::
vector
<
RankList
>
group_list
;
if
(
arr
.
CreateGroupList
()
==
Status
::
SUCCESS
)
group_list
=
arr
.
group_list
();
std
::
list
<
RankList
>
group_list_expect
=
{{
5
,
53
,
101
},
{
5
,
17
,
29
,
41
},
{
5
,
11
},
{
1
,
3
,
5
},
{
4
,
5
}};
std
::
vector
<
RankList
>
group_list_expect
=
{{
5
,
53
,
101
},
{
5
,
17
,
29
,
41
},
{
5
,
11
},
{
1
,
3
,
5
},
{
4
,
5
}};
ASSERT_EQ
(
group_list
,
group_list_expect
);
}
...
...
tests/ut/cpp/parallel/group_manager_test.cc
浏览文件 @
c4b03e85
...
...
@@ -42,7 +42,7 @@ void TestGroup::TearDown() {
Status
TestGroup
::
Init
()
{
std
::
string
gname
=
"1-2"
;
std
::
list
<
Device
>
dev_list
;
std
::
vector
<
Device
>
dev_list
;
Device
one
=
Device
(
int32_t
(
1
));
dev_list
.
push_back
(
one
);
Device
two
=
Device
(
int32_t
(
2
));
...
...
@@ -55,8 +55,8 @@ TEST_F(TestGroup, test_Init) { ASSERT_EQ(Init(), Status::SUCCESS); }
TEST_F
(
TestGroup
,
test_GetDevicesList
)
{
Init
();
std
::
list
<
Device
>
res_dev_list
=
gp
.
GetDevicesList
();
std
::
list
<
Device
>::
iterator
it
=
res_dev_list
.
begin
();
std
::
vector
<
Device
>
res_dev_list
=
gp
.
GetDevicesList
();
std
::
vector
<
Device
>::
iterator
it
=
res_dev_list
.
begin
();
ASSERT_EQ
(
it
->
rank
(),
int32_t
(
1
));
it
++
;
ASSERT_EQ
(
it
->
rank
(),
int32_t
(
2
));
...
...
@@ -88,7 +88,7 @@ void TestGroupManager::TearDown() {
Status
TestGroupManager
::
Init
(
Group
**
gp_ptr
)
{
std
::
string
gname
=
"1-2"
;
std
::
list
<
Device
>
dev_list
;
std
::
vector
<
Device
>
dev_list
;
Device
one
=
Device
(
int32_t
(
1
));
dev_list
.
push_back
(
one
);
Device
two
=
Device
(
int32_t
(
2
));
...
...
@@ -102,15 +102,15 @@ TEST_F(TestGroupManager, test_CreateGroup) {
Group
*
gp_ptr
=
new
Group
();
ASSERT_EQ
(
Init
(
&
gp_ptr
),
Status
::
SUCCESS
);
std
::
list
<
Device
>
res_dev_list
=
gp_ptr
->
GetDevicesList
();
std
::
list
<
Device
>::
iterator
it
=
res_dev_list
.
begin
();
std
::
vector
<
Device
>
res_dev_list
=
gp_ptr
->
GetDevicesList
();
std
::
vector
<
Device
>::
iterator
it
=
res_dev_list
.
begin
();
ASSERT_EQ
(
it
->
rank
(),
int32_t
(
1
));
it
++
;
ASSERT_EQ
(
it
->
rank
(),
int32_t
(
2
));
delete
gp_ptr
;
// testing for creating a group with an existing group name
std
::
list
<
Device
>
dev_list2
;
std
::
vector
<
Device
>
dev_list2
;
Device
three
=
Device
(
int32_t
(
3
));
dev_list2
.
push_back
(
three
);
Device
four
=
Device
(
int32_t
(
4
));
...
...
@@ -119,8 +119,8 @@ TEST_F(TestGroupManager, test_CreateGroup) {
ASSERT_EQ
(
gm
.
CreateGroup
(
"1-2"
,
dev_list2
,
gp_ptr
),
Status
::
SUCCESS
);
ASSERT_STREQ
(
gp_ptr
->
name
().
data
(),
"1-2"
);
std
::
list
<
Device
>
res_dev_list2
=
gp_ptr
->
GetDevicesList
();
std
::
list
<
Device
>::
iterator
it2
=
res_dev_list2
.
begin
();
std
::
vector
<
Device
>
res_dev_list2
=
gp_ptr
->
GetDevicesList
();
std
::
vector
<
Device
>::
iterator
it2
=
res_dev_list2
.
begin
();
ASSERT_EQ
(
it2
->
rank
(),
int32_t
(
1
));
it2
++
;
ASSERT_EQ
(
it2
->
rank
(),
int32_t
(
2
));
...
...
@@ -136,8 +136,8 @@ TEST_F(TestGroupManager, test_FindGroup) {
ASSERT_EQ
(
gm
.
FindGroup
(
gname
,
&
gp_ptr2
),
Status
::
SUCCESS
);
std
::
list
<
Device
>
res_dev_list
=
gp_ptr2
->
GetDevicesList
();
std
::
list
<
Device
>::
iterator
it
=
res_dev_list
.
begin
();
std
::
vector
<
Device
>
res_dev_list
=
gp_ptr2
->
GetDevicesList
();
std
::
vector
<
Device
>::
iterator
it
=
res_dev_list
.
begin
();
ASSERT_EQ
(
it
->
rank
(),
int32_t
(
1
));
it
++
;
ASSERT_EQ
(
it
->
rank
(),
int32_t
(
2
));
...
...
tests/ut/cpp/parallel/ops_info/activation_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestActivationInfo : public UT::Common {
};
void
TestActivationInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
1050
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
1024
);
stage_map
.
push_back
(
26
);
...
...
tests/ut/cpp/parallel/ops_info/activation_test.cc
浏览文件 @
c4b03e85
...
...
@@ -40,13 +40,13 @@ class TestActivation : public UT::Common {
};
void
TestActivation
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
1050
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
1024
);
stage_map
.
push_back
(
26
);
...
...
tests/ut/cpp/parallel/ops_info/dropout_do_mask_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestDropoutDoMaskInfo : public UT::Common {
};
void
TestDropoutDoMaskInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
34
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
32
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/gelu_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestGeluInfo : public UT::Common {
};
void
TestGeluInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
130
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
128
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/generate_strategy_test.cc
浏览文件 @
c4b03e85
...
...
@@ -34,13 +34,13 @@ class TestGenerateStrategy : public UT::Common {
};
void
TestGenerateStrategy
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
10
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
8
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/generator_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestDropoutGenMaskInfo : public UT::Common {
};
void
TestDropoutGenMaskInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
10
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
8
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/get_next_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestGetNextInfo : public UT::Common {
};
void
TestGetNextInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
8
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
8
);
int32_t
local_dev
=
0
;
// create a new g_device_manager
...
...
tests/ut/cpp/parallel/ops_info/l2_normalize_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestL2NormalizeInfo : public UT::Common {
};
void
TestL2NormalizeInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
34
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
32
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/log_softmax_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestLogSoftmaxInfo : public UT::Common {
};
void
TestLogSoftmaxInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
130
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
128
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/matmul_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -42,13 +42,13 @@ class TestMatmulInfo : public UT::Common {
};
void
TestMatmulInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
1050
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
1024
);
stage_map
.
push_back
(
26
);
...
...
tests/ut/cpp/parallel/ops_info/onehot_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestOneHotInfo : public UT::Common {
};
void
TestOneHotInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
10
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
8
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/onehot_info_test_axis_0.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestOneHotInfo2 : public UT::Common {
};
void
TestOneHotInfo2
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
10
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
8
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/pow_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestPowInfo : public UT::Common {
};
void
TestPowInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
66
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
64
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/prelu_test.cc
浏览文件 @
c4b03e85
...
...
@@ -39,13 +39,13 @@ class TestPReLUInfo : public UT::Common {
};
void
TestPReLUInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
1050
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
1024
);
stage_map
.
push_back
(
26
);
int32_t
local_dev
=
0
;
...
...
tests/ut/cpp/parallel/ops_info/reduce_method_test.cc
浏览文件 @
c4b03e85
...
...
@@ -39,13 +39,13 @@ class TestReduceSumInfo : public UT::Common {
void
TestReduceSumInfo
::
SetUp
()
{
UT
::
InitPythonPath
();
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
34
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
32
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/reshape_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestReshapeInfo : public UT::Common {
};
void
TestReshapeInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
34
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
32
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/softmax_entropy_loss_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestSoftmaxLoss : public UT::Common {
};
void
TestSoftmaxLoss
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
65
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
64
);
stage_map
.
push_back
(
1
);
...
...
tests/ut/cpp/parallel/ops_info/softmax_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -39,13 +39,13 @@ class TestSoftmaxInfo : public UT::Common {
};
void
TestSoftmaxInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
130
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
128
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/tanh_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestTanhInfo : public UT::Common {
};
void
TestTanhInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
130
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
128
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestTensorAddInfo : public UT::Common {
};
void
TestTensorAddInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
34
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
32
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestTmpIdentityInfo : public UT::Common {
};
void
TestTmpIdentityInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
1050
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
1024
);
stage_map
.
push_back
(
26
);
...
...
tests/ut/cpp/parallel/ops_info/transpose_test.cc
浏览文件 @
c4b03e85
...
...
@@ -38,13 +38,13 @@ class TestTransposeInfo : public UT::Common {
};
void
TestTransposeInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
34
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
32
);
stage_map
.
push_back
(
2
);
...
...
tests/ut/cpp/parallel/step_auto_parallel_test.cc
浏览文件 @
c4b03e85
...
...
@@ -32,13 +32,13 @@ class TestStepAutoParallel : public UT::Common {
};
void
TestStepAutoParallel
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
20
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
16
);
stage_map
.
push_back
(
4
);
...
...
tests/ut/cpp/parallel/step_parallel_test.cc
浏览文件 @
c4b03e85
...
...
@@ -34,13 +34,13 @@ class TestStepParallel : public UT::Common {
void
TestStepParallel
::
SetUp
()
{
UT
::
InitPythonPath
();
}
void
Init_Device_Manager
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
20
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
16
);
stage_map
.
push_back
(
4
);
...
...
tests/ut/cpp/parallel/tensor_layout/construct_operator_test.cc
浏览文件 @
c4b03e85
...
...
@@ -39,12 +39,12 @@ class TestConstructOperator : public UT::Common {
};
void
TestConstructOperator
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
1050
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
1024
);
stage_map
.
push_back
(
26
);
...
...
tests/ut/cpp/parallel/tensor_layout/redistribution_operator_infer_test.cc
浏览文件 @
c4b03e85
...
...
@@ -28,13 +28,13 @@ class TestRedistributionOperatorInfer : public UT::Common {
TestRedistributionOperatorInfer
()
{}
void
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
1050
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
1024
);
stage_map
.
push_back
(
26
);
...
...
tests/ut/cpp/parallel/tensor_layout/tensor_redistribution_test.cc
浏览文件 @
c4b03e85
...
...
@@ -33,7 +33,7 @@ class TestTensorRedistribution : public UT::Common {
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
16
);
stage_map
.
push_back
(
4
);
...
...
tests/ut/cpp/parallel/virtual_dataset_test.cc
浏览文件 @
c4b03e85
...
...
@@ -37,13 +37,13 @@ class TestVirtualDatasetInfo : public UT::Common {
};
void
TestVirtualDatasetInfo
::
SetUp
()
{
std
::
list
<
int32_t
>
dev_list
;
std
::
vector
<
int32_t
>
dev_list
;
for
(
int32_t
i
=
0
;
i
<
130
;
i
++
)
{
dev_list
.
push_back
(
i
);
}
std
::
list
<
int32_t
>
stage_map
;
std
::
vector
<
int32_t
>
stage_map
;
stage_map
.
push_back
(
16
);
stage_map
.
push_back
(
114
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录