提交 da447b8d 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!45 use std::vector instead of std::list to promote performance for parallel module

Merge pull request !45 from chentingting/permote_auto_prallel_performance
......@@ -21,7 +21,6 @@
#include <utility>
#include <numeric>
#include <functional>
#include <list>
#include <memory>
#include "parallel/device_manager.h"
......
......@@ -20,7 +20,6 @@
#include <cstdint>
#include <string>
#include <vector>
#include <list>
#include <memory>
#include "parallel/status.h"
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_DEVICE_H_
#include <cstdint>
#include <list>
#include <string>
#include <utility>
......
......@@ -30,7 +30,7 @@ namespace mindspore {
namespace parallel {
DeviceManagerPtr g_device_manager = nullptr;
Stage::Stage(const std::list<mindspore::parallel::Device>& devices, int num, int rank)
Stage::Stage(const std::vector<mindspore::parallel::Device>& devices, int num, int rank)
: devices_(devices), number_(num), rank_(rank) {
gm_ = GroupManager();
}
......@@ -104,7 +104,7 @@ int32_t GetListMemberByIndex(size_t index, const RankList& devices) {
return result;
}
std::shared_ptr<Device> GetListMemberByIndex(size_t index, const std::list<std::shared_ptr<Device>>& device_list) {
std::shared_ptr<Device> GetListMemberByIndex(size_t index, const std::vector<std::shared_ptr<Device>>& device_list) {
size_t i = 0;
std::shared_ptr<Device> result;
if ((device_list.empty()) || (index >= device_list.size())) {
......@@ -178,7 +178,7 @@ Status DeviceManager::Init(const RankList& devices, int32_t global_device_rank,
MS_LOG(ERROR) << "The number of 'devices' in a stage must be positive";
return Status::FAILED;
}
std::list<Device> curr_dev_list;
std::vector<Device> curr_dev_list;
for (int i = 0; i < num_device; ++i) {
curr_dev_list.push_back(*GetListMemberByIndex(global_index, devices_));
global_index++;
......@@ -278,8 +278,8 @@ RankList DeviceManager::global_device_list(int32_t stage_id, int32_t rank, int32
Device DeviceManager::CreateNewDeviceByRank(int32_t rank) const { return Device(rank); }
std::list<Device> DeviceManager::CreateDeviceListByRankList(RankList ranks) {
std::list<Device> dev_list;
std::vector<Device> DeviceManager::CreateDeviceListByRankList(RankList ranks) {
std::vector<Device> dev_list;
for (auto& rank : ranks) {
Device one = CreateNewDeviceByRank(rank);
dev_list.push_back(one);
......@@ -312,8 +312,8 @@ std::string HashName(const std::string& origin_name) { return std::to_string(std
// is '0-1-3-5-7'.
std::string DeviceManager::GenerateGroupNameByRanks(RankList ranks) {
std::string rank_list_name;
std::list<int32_t>::iterator it;
ranks.sort(); // sorted in increasing order
std::vector<int32_t>::iterator it;
std::sort(ranks.begin(), ranks.end()); // sorted in increasing order
for (it = ranks.begin(); it != ranks.end(); ++it) {
if (it == ranks.begin()) {
rank_list_name = std::to_string(*it);
......@@ -343,7 +343,8 @@ std::string DeviceManager::GenerateGroupNameByRanks(RankList ranks) {
// Create the group with the given devices and the given name. The GroupManager
// gm_ will create a new group only if there does not exit a group with the same
// name. Otherwise, let the pointer g point to that group.
Group DeviceManager::CreateGroup(const std::string& group_name, const std::list<mindspore::parallel::Device>& devices) {
Group DeviceManager::CreateGroup(const std::string& group_name,
const std::vector<mindspore::parallel::Device>& devices) {
if ((world_group() == NCCL_WORLD_GROUP) && (devices.size() != devices_.size())) {
MS_LOG(EXCEPTION) << "Do not support sub group for nccl";
}
......@@ -360,7 +361,7 @@ Group DeviceManager::CreateGroup(const RankList& dev_ranks) {
}
std::string group_name = GenerateGroupNameByRanks(dev_ranks);
std::list<Device> dev_list = CreateDeviceListByRankList(dev_ranks);
auto dev_list = CreateDeviceListByRankList(dev_ranks);
return CreateGroup(group_name, dev_list);
}
......
......@@ -19,7 +19,7 @@
#include <cstdint>
#include <cstring>
#include <list>
#include <vector>
#include <map>
#include <memory>
#include <string>
......@@ -50,19 +50,19 @@ class Stage {
// This class is used in pipeline-parallelization. Available devices are partitioned into multiple stages.
// Currently, the function of pipeline-parallelization and this class are NOT implemented.
public:
explicit Stage(std::list<Device> devices) : devices_(std::move(devices)), number_(0), rank_(0) {
explicit Stage(std::vector<Device> devices) : devices_(std::move(devices)), number_(0), rank_(0) {
gm_ = GroupManager();
}
Stage(const std::list<mindspore::parallel::Device>& devices, int num, int rank);
Stage(const std::vector<mindspore::parallel::Device>& devices, int num, int rank);
~Stage() = default;
int GetStageNum() const { return number_; }
size_t GetDevicesNum() const { return devices_.size(); }
std::list<Device> GetDevicesList() { return devices_; }
std::vector<Device> GetDevicesList() { return devices_; }
int global_rank(Group* g) const;
private:
std::list<Device> devices_;
std::vector<Device> devices_;
int number_;
int32_t rank_;
GroupManager gm_;
......@@ -89,10 +89,10 @@ class DeviceManager {
RankList global_device_list(int32_t stage_id, int32_t rank, int32_t split_num) const;
Device CreateNewDeviceByRank(int32_t rank) const;
std::list<Device> CreateDeviceListByRankList(RankList ranks);
std::vector<Device> CreateDeviceListByRankList(RankList ranks);
std::string GenerateGroupNameByRanks(RankList dev_ranks);
Group CreateGroup(const std::string& group_name, const std::list<Device>& devices);
Group CreateGroup(const std::string& group_name, const std::vector<Device>& devices);
Group CreateGroup(const RankList& dev_ranks);
std::shared_ptr<Stage> GetStageById(int32_t stage_id);
......@@ -108,11 +108,11 @@ class DeviceManager {
std::string FindRankListNameByHashName(const std::string& hash_name);
private:
std::list<std::shared_ptr<Device>> devices_;
std::vector<std::shared_ptr<Device>> devices_;
// each stage has a list of devices
std::list<std::list<int32_t>> stage_devices_;
std::vector<std::vector<int32_t>> stage_devices_;
std::shared_ptr<Device> device_;
std::list<std::shared_ptr<Stage>> stages_;
std::vector<std::shared_ptr<Stage>> stages_;
GroupManager gm_;
std::string backend_;
......
......@@ -21,7 +21,7 @@
#include <utility>
#include <numeric>
#include <functional>
#include <list>
#include <vector>
#include "parallel/status.h"
#include "parallel/ops_info/operator_info.h"
......@@ -64,7 +64,7 @@ Status DeviceMatrix::GetDevicesAlongDim(const uint32_t& dim, RankList* devices)
}
RankList group;
std::list<RankList> local_group_list;
std::vector<RankList> local_group_list;
// lower than dim
int32_t step = 1;
......@@ -160,7 +160,7 @@ std::string ShapeToString(const Shape& shape) {
return str + "]";
}
std::string ListToString(const std::list<int32_t>& list) {
std::string ListToString(const std::vector<int32_t>& list) {
std::string str = "[";
for (auto& element : list) {
str += std::to_string(element) + ", ";
......
......@@ -20,7 +20,6 @@
#include <cstdint>
#include <string>
#include <vector>
#include <list>
#include "parallel/status.h"
#include "utils/convert_utils.h"
......@@ -28,7 +27,7 @@
namespace mindspore {
namespace parallel {
using RankList = std::list<int32_t>;
using RankList = std::vector<int32_t>;
using Shape = std::vector<int32_t>;
class DeviceMatrix {
......@@ -36,7 +35,7 @@ class DeviceMatrix {
DeviceMatrix(int32_t rank, RankList devices, Shape dev_shape);
DeviceMatrix() = default;
~DeviceMatrix() = default;
std::list<RankList> group_list() const { return group_list_; }
std::vector<RankList> group_list() const { return group_list_; }
Status CreateGroupList();
Status GetDevicesByTensorMap(const Shape& tensor_map, RankList* rank_list);
Status GetDevicesAlongDim(const uint32_t& dim, RankList* devices);
......@@ -46,11 +45,11 @@ class DeviceMatrix {
RankList dev_list_;
// From low dim to high dim. eg: [D0 D1 D2 D3]
Shape dev_shape_;
std::list<RankList> group_list_;
std::vector<RankList> group_list_;
};
std::string ShapeToString(const Shape& shape);
std::string ListToString(const std::list<int32_t>& list);
std::string ListToString(const std::vector<int32_t>& list);
} // namespace parallel
} // namespace mindspore
......
......@@ -17,7 +17,6 @@
#include "parallel/graph_util/generate_graph.h"
#include <algorithm>
#include <list>
#include <memory>
#include <string>
#include <utility>
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_GRAPH_UTIL_GENERATE_GRAPH_H_
#include <vector>
#include <list>
#include <memory>
#include <unordered_map>
#include <map>
......
......@@ -30,13 +30,13 @@ Group::Group() {
devices_.clear();
}
Status Group::Init(const std::string &name, const std::list<Device> &devices) {
Status Group::Init(const std::string &name, const std::vector<Device> &devices) {
this->name_ = name;
this->devices_ = devices;
return Status::SUCCESS;
}
std::list<Device> Group::GetDevicesList() const { return devices_; }
std::vector<Device> Group::GetDevicesList() const { return devices_; }
bool Group::IsInThisGroup(int32_t device_rank) {
for (auto &device : devices_) {
......@@ -66,7 +66,7 @@ Status Group::GetIndex(size_t *index) {
GroupManager::GroupManager() { groups_.clear(); }
Status GroupManager::CreateGroup(const std::string &group_name, const std::list<Device> &devices,
Status GroupManager::CreateGroup(const std::string &group_name, const std::vector<Device> &devices,
mindspore::parallel::Group *const group) {
// it is simple to use size to determine whether it is a world group
uint32_t world_size = 0;
......
......@@ -18,7 +18,7 @@
#define MINDSPORE_CCSRC_PARALLEL_GROUP_MANAGER_H_
#include <cstdint>
#include <list>
#include <vector>
#include <map>
#include <string>
......@@ -37,8 +37,8 @@ class Group {
public:
Group();
~Group() = default;
Status Init(const std::string& name, const std::list<Device>& devices);
std::list<Device> GetDevicesList() const;
Status Init(const std::string& name, const std::vector<Device>& devices);
std::vector<Device> GetDevicesList() const;
std::string name() const { return name_; }
bool IsInThisGroup(int32_t device_rank);
Status GetIndex(size_t* index);
......@@ -46,7 +46,7 @@ class Group {
private:
std::string name_;
std::list<Device> devices_;
std::vector<Device> devices_;
};
class GroupManager {
......@@ -54,7 +54,7 @@ class GroupManager {
GroupManager();
~GroupManager() = default;
Status CreateGroup(const std::string& name, const std::list<Device>& devices, Group* group);
Status CreateGroup(const std::string& name, const std::vector<Device>& devices, Group* group);
Status DestroyGroup(Group* group);
Status DestroyAllGroups();
Status GetRankID(const std::string& name, unsigned int* rank_id);
......
......@@ -19,7 +19,6 @@
#include <ir/value.h>
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ARITHMETIC_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
......
......@@ -17,7 +17,6 @@
#ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_BATCH_PARALLEL_INFO_H_
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_BATCH_PARALLEL_INFO_H_
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
......
......@@ -18,7 +18,7 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_BIAS_ADD_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_COMPARISON_FUNCTION_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include "ir/value.h"
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_DROPOUT_DO_MASK_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ELEMENTARY_FUNCTION_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include "ir/value.h"
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GENERATOR_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GETNEXT_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_L2_NORMALIZE_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
......
......@@ -18,10 +18,10 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_LOSS_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/ops_info/activation_info.h"
......
......@@ -397,7 +397,7 @@ Status MatMulBase::GenerateStrategies(int32_t stage_id) {
return FAILED;
}
CheckGlobalDeviceManager();
std::list<int32_t> dev_list = g_device_manager->GetDeviceListByStageId(stage_id);
std::vector<int32_t> dev_list = g_device_manager->GetDeviceListByStageId(stage_id);
size_t dev_num = dev_list.size();
Shape input0_shape = inputs_shape_[0], input1_shape = inputs_shape_[1];
if (transpose_a_) {
......
......@@ -18,10 +18,10 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_MATMUL_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/strategy.h"
......
......@@ -18,10 +18,10 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ONEHOT_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/auto_parallel/operator_costmodel.h"
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_OPERATOR_INFO_H_
#include <cstdint>
#include <list>
#include <map>
#include <memory>
#include <string>
......
......@@ -17,11 +17,11 @@
#ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_PRELU_INFO_H_
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_PRELU_INFO_H_
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/strategy.h"
......
......@@ -198,7 +198,7 @@ ForwardOp CreatReduceMeanForwardOp(const std::vector<Group> &forward_group, cons
// Creat RealDiv op
OperatorName operator1_name = REAL_DIV;
std::list<Device> device_list = forward_group[0].GetDevicesList();
std::vector<Device> device_list = forward_group[0].GetDevicesList();
auto divisor = static_cast<float>(device_list.size());
py::tuple tuple = py::make_tuple(divisor);
mindspore::tensor::TensorPtr tensor_ptr = std::make_shared<mindspore::tensor::Tensor>(tuple, dtype);
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_REDUCE_SUM_INFO_H_
#include <string>
#include <list>
#include <unordered_map>
#include <vector>
#include <memory>
......
......@@ -19,7 +19,6 @@
#include <ir/value.h>
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
......
......@@ -20,6 +20,7 @@
#include <vector>
#include <memory>
#include <string>
#include "parallel/ops_info/operator_info.h"
#include "parallel/auto_parallel/operator_costmodel.h"
#include "parallel/strategy.h"
......
......@@ -17,11 +17,11 @@
#ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_TRANSPOSE_INFO_H_
#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_TRANSPOSE_INFO_H_
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/strategy.h"
......
......@@ -17,11 +17,11 @@
#ifndef PARALLEL_OPS_INFO_DATASET_INFO_H_
#define PARALLEL_OPS_INFO_DATASET_INFO_H_
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
#include <memory>
#include "ir/value.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/strategy.h"
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_STATUS_H_
#include <cstdint>
#include <list>
namespace mindspore {
namespace parallel {
......
......@@ -19,7 +19,7 @@
#include <inttypes.h>
#include <sys/time.h>
#include <algorithm>
#include <list>
#include <map>
#include <memory>
#include <string>
......
......@@ -18,7 +18,7 @@
#define MINDSPORE_CCSRC_PARALLEL_STEP_PARALLEL_H_
#include <vector>
#include <list>
#include <memory>
#include <unordered_map>
#include <map>
......
......@@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_PARALLEL_STRATEGY_H_
#include <cstdint>
#include <list>
#include <string>
#include <vector>
#include <memory>
......
......@@ -22,7 +22,6 @@
#include <string>
#include <vector>
#include <utility>
#include <list>
#include "parallel/tensor_layout/redistribution_layout_transfer.h"
#include "parallel/tensor_layout/construct_operator.h"
......
......@@ -154,13 +154,13 @@ class TestDPAlgo : public UT::Common {
void TestDPAlgo::SetUp() {
cost_graph = std::make_shared<CostGraph>();
cost_graph->SetDeviceMemoryAndCostParameter();
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 10; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(8);
stage_map.push_back(2);
......
......@@ -42,13 +42,13 @@ class TestEdgeCostModel : public UT::Common {
};
void TestEdgeCostModel::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 10; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(8);
stage_map.push_back(2);
......
......@@ -53,13 +53,13 @@ class TestCostGraph : public UT::Common {
void TestCostGraph::SetUp() {
cost_graph.SetDeviceMemoryAndCostParameter();
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 10; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(8);
stage_map.push_back(2);
......
......@@ -33,13 +33,13 @@ class TestMatMulCost : public UT::Common {
void TestMatMulCost::SetUp() {
mmcost_ = MatMulCost();
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 1050; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(1024);
stage_map.push_back(26);
......@@ -90,13 +90,13 @@ class TestActivationCost : public UT::Common {
void TestActivationCost::SetUp() {
ac_cost_ = ActivationCost();
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 1050; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(1024);
stage_map.push_back(26);
......@@ -142,13 +142,13 @@ class TestPReLUCost : public UT::Common {
void TestPReLUCost::SetUp() {
prelu_cost_ = PReLUCost();
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 1050; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(1024);
stage_map.push_back(26);
......
......@@ -69,8 +69,8 @@ void TestDeviceManager::TearDown() {
}
TEST_F(TestDeviceManager, test_dm_init_AND_get_device_list) {
std::list<int32_t> dev_list;
std::list<int32_t> stage_map;
std::vector<int32_t> dev_list;
std::vector<int32_t> stage_map;
int32_t local_dev = 0;
dev_list.push_back(5);
......@@ -85,12 +85,12 @@ TEST_F(TestDeviceManager, test_dm_init_AND_get_device_list) {
ASSERT_EQ(dm_.DeviceNum(), 4);
ASSERT_EQ(dm_.GetStageNum(), (int32_t)(2));
std::list<int32_t> dev_list_0 = dm_.GetDeviceListByStageId(0);
std::list<int32_t> dev_list_1 = dm_.GetDeviceListByStageId(1);
std::vector<int32_t> dev_list_0 = dm_.GetDeviceListByStageId(0);
std::vector<int32_t> dev_list_1 = dm_.GetDeviceListByStageId(1);
ASSERT_EQ(dev_list_0.size(), 2);
ASSERT_EQ(dev_list_1.size(), 2);
std::list<int32_t>::iterator it = dev_list_0.begin();
std::vector<int32_t>::iterator it = dev_list_0.begin();
ASSERT_EQ((*it), int32_t(5));
it++;
ASSERT_EQ((*it), int32_t(3));
......@@ -111,13 +111,13 @@ TEST_F(TestDeviceManager, test_CreateNewDeviceByRank) {
}
TEST_F(TestDeviceManager, test_CreateDeviceListByRankList) {
std::list<Device> dev_list;
std::list<int32_t> rlist;
std::vector<Device> dev_list;
std::vector<int32_t> rlist;
rlist.push_back(int32_t(2));
rlist.push_back(int32_t(1));
dev_list = dm_.CreateDeviceListByRankList(rlist);
std::list<Device>::iterator it = dev_list.begin();
std::vector<Device>::iterator it = dev_list.begin();
ASSERT_EQ(it->rank(), int32_t(2));
it++;
ASSERT_EQ(it->rank(), int32_t(1));
......
......@@ -35,9 +35,9 @@ TEST_F(TestDeviceMatrix, Test2Dgroup_list) {
Shape shape = {2, 3};
DeviceMatrix arr(0, dev_list, shape);
std::list<RankList> group_list;
std::vector<RankList> group_list;
if (arr.CreateGroupList() == Status::SUCCESS) group_list = arr.group_list();
std::list<RankList> group_list_expect = {{0, 3}, {0, 1, 2}};
std::vector<RankList> group_list_expect = {{0, 3}, {0, 1, 2}};
ASSERT_EQ(group_list, group_list_expect);
}
......@@ -46,9 +46,9 @@ TEST_F(TestDeviceMatrix, Test3Dgroup_list) {
Shape shape = {2, 2, 3};
DeviceMatrix arr(5, dev_list, shape);
std::list<RankList> group_list;
std::vector<RankList> group_list;
if (arr.CreateGroupList() == Status::SUCCESS) group_list = arr.group_list();
std::list<RankList> group_list_expect = {{5, 11}, {2, 5}, {3, 4, 5}};
std::vector<RankList> group_list_expect = {{5, 11}, {2, 5}, {3, 4, 5}};
ASSERT_EQ(group_list, group_list_expect);
}
......@@ -57,9 +57,9 @@ TEST_F(TestDeviceMatrix, Test4DGetAlongDim) {
Shape shape = {2, 1, 4, 2};
DeviceMatrix arr(5, dev_list, shape);
std::list<RankList> group_list;
std::vector<RankList> group_list;
if (arr.CreateGroupList() == Status::SUCCESS) group_list = arr.group_list();
std::list<RankList> group_list_expect = {{5, 13}, {5}, {1, 3, 5, 7}, {4, 5}};
std::vector<RankList> group_list_expect = {{5, 13}, {5}, {1, 3, 5, 7}, {4, 5}};
ASSERT_EQ(group_list, group_list_expect);
}
......@@ -69,9 +69,9 @@ TEST_F(TestDeviceMatrix, Test5DGetAlongDim) {
Shape shape = {3, 4, 2, 3, 2};
DeviceMatrix arr(5, dev_list, shape);
std::list<RankList> group_list;
std::vector<RankList> group_list;
if (arr.CreateGroupList() == Status::SUCCESS) group_list = arr.group_list();
std::list<RankList> group_list_expect = {{5, 53, 101}, {5, 17, 29, 41}, {5, 11}, {1, 3, 5}, {4, 5}};
std::vector<RankList> group_list_expect = {{5, 53, 101}, {5, 17, 29, 41}, {5, 11}, {1, 3, 5}, {4, 5}};
ASSERT_EQ(group_list, group_list_expect);
}
......
......@@ -42,7 +42,7 @@ void TestGroup::TearDown() {
Status TestGroup::Init() {
std::string gname = "1-2";
std::list<Device> dev_list;
std::vector<Device> dev_list;
Device one = Device(int32_t(1));
dev_list.push_back(one);
Device two = Device(int32_t(2));
......@@ -55,8 +55,8 @@ TEST_F(TestGroup, test_Init) { ASSERT_EQ(Init(), Status::SUCCESS); }
TEST_F(TestGroup, test_GetDevicesList) {
Init();
std::list<Device> res_dev_list = gp.GetDevicesList();
std::list<Device>::iterator it = res_dev_list.begin();
std::vector<Device> res_dev_list = gp.GetDevicesList();
std::vector<Device>::iterator it = res_dev_list.begin();
ASSERT_EQ(it->rank(), int32_t(1));
it++;
ASSERT_EQ(it->rank(), int32_t(2));
......@@ -88,7 +88,7 @@ void TestGroupManager::TearDown() {
Status TestGroupManager::Init(Group** gp_ptr) {
std::string gname = "1-2";
std::list<Device> dev_list;
std::vector<Device> dev_list;
Device one = Device(int32_t(1));
dev_list.push_back(one);
Device two = Device(int32_t(2));
......@@ -102,15 +102,15 @@ TEST_F(TestGroupManager, test_CreateGroup) {
Group* gp_ptr = new Group();
ASSERT_EQ(Init(&gp_ptr), Status::SUCCESS);
std::list<Device> res_dev_list = gp_ptr->GetDevicesList();
std::list<Device>::iterator it = res_dev_list.begin();
std::vector<Device> res_dev_list = gp_ptr->GetDevicesList();
std::vector<Device>::iterator it = res_dev_list.begin();
ASSERT_EQ(it->rank(), int32_t(1));
it++;
ASSERT_EQ(it->rank(), int32_t(2));
delete gp_ptr;
// testing for creating a group with an existing group name
std::list<Device> dev_list2;
std::vector<Device> dev_list2;
Device three = Device(int32_t(3));
dev_list2.push_back(three);
Device four = Device(int32_t(4));
......@@ -119,8 +119,8 @@ TEST_F(TestGroupManager, test_CreateGroup) {
ASSERT_EQ(gm.CreateGroup("1-2", dev_list2, gp_ptr), Status::SUCCESS);
ASSERT_STREQ(gp_ptr->name().data(), "1-2");
std::list<Device> res_dev_list2 = gp_ptr->GetDevicesList();
std::list<Device>::iterator it2 = res_dev_list2.begin();
std::vector<Device> res_dev_list2 = gp_ptr->GetDevicesList();
std::vector<Device>::iterator it2 = res_dev_list2.begin();
ASSERT_EQ(it2->rank(), int32_t(1));
it2++;
ASSERT_EQ(it2->rank(), int32_t(2));
......@@ -136,8 +136,8 @@ TEST_F(TestGroupManager, test_FindGroup) {
ASSERT_EQ(gm.FindGroup(gname, &gp_ptr2), Status::SUCCESS);
std::list<Device> res_dev_list = gp_ptr2->GetDevicesList();
std::list<Device>::iterator it = res_dev_list.begin();
std::vector<Device> res_dev_list = gp_ptr2->GetDevicesList();
std::vector<Device>::iterator it = res_dev_list.begin();
ASSERT_EQ(it->rank(), int32_t(1));
it++;
ASSERT_EQ(it->rank(), int32_t(2));
......
......@@ -38,13 +38,13 @@ class TestActivationInfo : public UT::Common {
};
void TestActivationInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 1050; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(1024);
stage_map.push_back(26);
......
......@@ -40,13 +40,13 @@ class TestActivation : public UT::Common {
};
void TestActivation::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 1050; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(1024);
stage_map.push_back(26);
......
......@@ -38,13 +38,13 @@ class TestDropoutDoMaskInfo : public UT::Common {
};
void TestDropoutDoMaskInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 34; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(32);
stage_map.push_back(2);
......
......@@ -38,13 +38,13 @@ class TestGeluInfo : public UT::Common {
};
void TestGeluInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 130; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(128);
stage_map.push_back(2);
......
......@@ -34,13 +34,13 @@ class TestGenerateStrategy : public UT::Common {
};
void TestGenerateStrategy::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 10; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(8);
stage_map.push_back(2);
......
......@@ -38,13 +38,13 @@ class TestDropoutGenMaskInfo : public UT::Common {
};
void TestDropoutGenMaskInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 10; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(8);
stage_map.push_back(2);
......
......@@ -38,13 +38,13 @@ class TestGetNextInfo : public UT::Common {
};
void TestGetNextInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 8; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(8);
int32_t local_dev = 0;
// create a new g_device_manager
......
......@@ -38,13 +38,13 @@ class TestL2NormalizeInfo : public UT::Common {
};
void TestL2NormalizeInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 34; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(32);
stage_map.push_back(2);
......
......@@ -38,13 +38,13 @@ class TestLogSoftmaxInfo : public UT::Common {
};
void TestLogSoftmaxInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 130; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(128);
stage_map.push_back(2);
......
......@@ -42,13 +42,13 @@ class TestMatmulInfo : public UT::Common {
};
void TestMatmulInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 1050; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(1024);
stage_map.push_back(26);
......
......@@ -38,13 +38,13 @@ class TestOneHotInfo : public UT::Common {
};
void TestOneHotInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 10; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(8);
stage_map.push_back(2);
......
......@@ -38,13 +38,13 @@ class TestOneHotInfo2 : public UT::Common {
};
void TestOneHotInfo2::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 10; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(8);
stage_map.push_back(2);
......
......@@ -38,13 +38,13 @@ class TestPowInfo : public UT::Common {
};
void TestPowInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 66; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(64);
stage_map.push_back(2);
......
......@@ -39,13 +39,13 @@ class TestPReLUInfo : public UT::Common {
};
void TestPReLUInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 1050; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(1024);
stage_map.push_back(26);
int32_t local_dev = 0;
......
......@@ -39,13 +39,13 @@ class TestReduceSumInfo : public UT::Common {
void TestReduceSumInfo::SetUp() {
UT::InitPythonPath();
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 34; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(32);
stage_map.push_back(2);
......
......@@ -38,13 +38,13 @@ class TestReshapeInfo : public UT::Common {
};
void TestReshapeInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 34; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(32);
stage_map.push_back(2);
......
......@@ -38,13 +38,13 @@ class TestSoftmaxLoss : public UT::Common {
};
void TestSoftmaxLoss::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 65; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(64);
stage_map.push_back(1);
......
......@@ -39,13 +39,13 @@ class TestSoftmaxInfo : public UT::Common {
};
void TestSoftmaxInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 130; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(128);
stage_map.push_back(2);
......
......@@ -38,13 +38,13 @@ class TestTanhInfo : public UT::Common {
};
void TestTanhInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 130; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(128);
stage_map.push_back(2);
......
......@@ -38,13 +38,13 @@ class TestTensorAddInfo : public UT::Common {
};
void TestTensorAddInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 34; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(32);
stage_map.push_back(2);
......
......@@ -38,13 +38,13 @@ class TestTmpIdentityInfo : public UT::Common {
};
void TestTmpIdentityInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 1050; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(1024);
stage_map.push_back(26);
......
......@@ -38,13 +38,13 @@ class TestTransposeInfo : public UT::Common {
};
void TestTransposeInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 34; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(32);
stage_map.push_back(2);
......
......@@ -32,13 +32,13 @@ class TestStepAutoParallel : public UT::Common {
};
void TestStepAutoParallel::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 20; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(16);
stage_map.push_back(4);
......
......@@ -34,13 +34,13 @@ class TestStepParallel : public UT::Common {
void TestStepParallel::SetUp() { UT::InitPythonPath(); }
void Init_Device_Manager() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 20; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(16);
stage_map.push_back(4);
......
......@@ -39,12 +39,12 @@ class TestConstructOperator : public UT::Common {
};
void TestConstructOperator::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 1050; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(1024);
stage_map.push_back(26);
......
......@@ -28,13 +28,13 @@ class TestRedistributionOperatorInfer : public UT::Common {
TestRedistributionOperatorInfer() {}
void SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 1050; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(1024);
stage_map.push_back(26);
......
......@@ -33,7 +33,7 @@ class TestTensorRedistribution : public UT::Common {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(16);
stage_map.push_back(4);
......
......@@ -37,13 +37,13 @@ class TestVirtualDatasetInfo : public UT::Common {
};
void TestVirtualDatasetInfo::SetUp() {
std::list<int32_t> dev_list;
std::vector<int32_t> dev_list;
for (int32_t i = 0; i < 130; i++) {
dev_list.push_back(i);
}
std::list<int32_t> stage_map;
std::vector<int32_t> stage_map;
stage_map.push_back(16);
stage_map.push_back(114);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册