Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
milvus
提交
4f2c45ab
milvus
项目概览
BaiXuePrincess
/
milvus
与 Fork 源项目一致
从无法访问的项目Fork
通知
7
Star
4
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
milvus
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
4f2c45ab
编写于
9月 09, 2019
作者:
W
wxyu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
MS-527 Update scheduler_test and enable it
Former-commit-id: 96fdfdc0c46a0e4cf1438ad3e30cbfb2c42b4416
上级
24bdbee0
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
229 addition
and
222 deletion
+229
-222
cpp/CHANGELOG.md
cpp/CHANGELOG.md
+1
-0
cpp/unittest/scheduler/scheduler_test.cpp
cpp/unittest/scheduler/scheduler_test.cpp
+228
-222
未找到文件。
cpp/CHANGELOG.md
浏览文件 @
4f2c45ab
...
@@ -105,6 +105,7 @@ Please mark all change in change log and use the ticket from JIRA.
...
@@ -105,6 +105,7 @@ Please mark all change in change log and use the ticket from JIRA.
-
MS-520 - Update resource_test in scheduler
-
MS-520 - Update resource_test in scheduler
-
MS-524 - Add some unittest in event_test and resource_test
-
MS-524 - Add some unittest in event_test and resource_test
-
MS-525 - Disable parallel reduce in SearchTask
-
MS-525 - Disable parallel reduce in SearchTask
-
MS-527 - Update scheduler_test and enable it
## New Feature
## New Feature
-
MS-343 - Implement ResourceMgr
-
MS-343 - Implement ResourceMgr
...
...
cpp/unittest/scheduler/scheduler_test.cpp
浏览文件 @
4f2c45ab
...
@@ -6,6 +6,7 @@
...
@@ -6,6 +6,7 @@
#include "scheduler/Scheduler.h"
#include "scheduler/Scheduler.h"
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include <src/scheduler/tasklabel/DefaultLabel.h>
#include <src/scheduler/tasklabel/DefaultLabel.h>
#include <src/server/ServerConfig.h>
#include "cache/DataObj.h"
#include "cache/DataObj.h"
#include "cache/GpuCacheMgr.h"
#include "cache/GpuCacheMgr.h"
#include "scheduler/task/TestTask.h"
#include "scheduler/task/TestTask.h"
...
@@ -15,233 +16,238 @@
...
@@ -15,233 +16,238 @@
#include "wrapper/knowhere/vec_index.h"
#include "wrapper/knowhere/vec_index.h"
#include "scheduler/tasklabel/SpecResLabel.h"
#include "scheduler/tasklabel/SpecResLabel.h"
namespace
zilliz
{
namespace
zilliz
{
namespace
milvus
{
namespace
milvus
{
namespace
engine
{
namespace
engine
{
//class MockVecIndex : public engine::VecIndex {
class
MockVecIndex
:
public
engine
::
VecIndex
{
//public:
public:
// virtual server::KnowhereError BuildAll(const long &nb,
virtual
ErrorCode
BuildAll
(
const
long
&
nb
,
// const float *xb,
const
float
*
xb
,
// const long *ids,
const
long
*
ids
,
// const engine::Config &cfg,
const
engine
::
Config
&
cfg
,
// const long &nt = 0,
const
long
&
nt
=
0
,
// const float *xt = nullptr) {
const
float
*
xt
=
nullptr
)
{
//
// }
}
//
// engine::VecIndexPtr Clone() override {
engine
::
VecIndexPtr
Clone
()
override
{
// return zilliz::milvus::engine::VecIndexPtr();
return
zilliz
::
milvus
::
engine
::
VecIndexPtr
();
// }
}
//
// int64_t GetDeviceId() override {
int64_t
GetDeviceId
()
override
{
// return 0;
return
0
;
// }
}
//
// engine::IndexType GetType() override {
engine
::
IndexType
GetType
()
override
{
// return engine::IndexType::INVALID;
return
engine
::
IndexType
::
INVALID
;
// }
}
//
// virtual server::KnowhereError Add(const long &nb,
virtual
ErrorCode
Add
(
const
long
&
nb
,
// const float *xb,
const
float
*
xb
,
// const long *ids,
const
long
*
ids
,
// const engine::Config &cfg = engine::Config()) {
const
engine
::
Config
&
cfg
=
engine
::
Config
())
{
//
// }
}
//
// virtual server::KnowhereError Search(const long &nq,
virtual
ErrorCode
Search
(
const
long
&
nq
,
// const float *xq,
const
float
*
xq
,
// float *dist,
float
*
dist
,
// long *ids,
long
*
ids
,
// const engine::Config &cfg = engine::Config()) {
const
engine
::
Config
&
cfg
=
engine
::
Config
())
{
//
// }
}
//
// engine::VecIndexPtr CopyToGpu(const int64_t &device_id, const engine::Config &cfg) override {
engine
::
VecIndexPtr
CopyToGpu
(
const
int64_t
&
device_id
,
const
engine
::
Config
&
cfg
)
override
{
//
// }
}
//
// engine::VecIndexPtr CopyToCpu(const engine::Config &cfg) override {
engine
::
VecIndexPtr
CopyToCpu
(
const
engine
::
Config
&
cfg
)
override
{
//
// }
}
//
// virtual int64_t Dimension() {
virtual
int64_t
Dimension
()
{
// return dimension_;
return
dimension_
;
// }
}
//
// virtual int64_t Count() {
virtual
int64_t
Count
()
{
// return ntotal_;
return
ntotal_
;
// }
}
//
// virtual zilliz::knowhere::BinarySet Serialize() {
virtual
zilliz
::
knowhere
::
BinarySet
Serialize
()
{
// zilliz::knowhere::BinarySet binset;
zilliz
::
knowhere
::
BinarySet
binset
;
// return binset;
return
binset
;
// }
}
//
// virtual server::KnowhereError Load(const zilliz::knowhere::BinarySet &index_binary) {
virtual
ErrorCode
Load
(
const
zilliz
::
knowhere
::
BinarySet
&
index_binary
)
{
//
// }
}
//
//public:
public:
// int64_t dimension_ = 512;
int64_t
dimension_
=
512
;
// int64_t ntotal_ = 0;
int64_t
ntotal_
=
0
;
//};
};
//
//
//class SchedulerTest : public testing::Test {
class
SchedulerTest
:
public
testing
::
Test
{
//protected:
protected:
// void
void
// SetUp() override {
SetUp
()
override
{
// ResourcePtr cpu = ResourceFactory::Create("cpu", "CPU", 0, true, false);
server
::
ConfigNode
&
config
=
server
::
ServerConfig
::
GetInstance
().
GetConfig
(
server
::
CONFIG_CACHE
);
// ResourcePtr gpu_0 = ResourceFactory::Create("gpu0", "GPU", 0);
config
.
AddSequenceItem
(
server
::
CONFIG_GPU_IDS
,
"0"
);
// ResourcePtr gpu_1 = ResourceFactory::Create("gpu1", "GPU", 1);
config
.
AddSequenceItem
(
server
::
CONFIG_GPU_IDS
,
"1"
);
//
// res_mgr_ = std::make_shared<ResourceMgr>();
ResourcePtr
cpu
=
ResourceFactory
::
Create
(
"cpu"
,
"CPU"
,
0
,
true
,
false
);
// cpu_resource_ = res_mgr_->Add(std::move(cpu));
ResourcePtr
gpu_0
=
ResourceFactory
::
Create
(
"gpu0"
,
"GPU"
,
0
);
// gpu_resource_0_ = res_mgr_->Add(std::move(gpu_0));
ResourcePtr
gpu_1
=
ResourceFactory
::
Create
(
"gpu1"
,
"GPU"
,
1
);
// gpu_resource_1_ = res_mgr_->Add(std::move(gpu_1));
//
res_mgr_
=
std
::
make_shared
<
ResourceMgr
>
();
// auto PCIE = Connection("IO", 11000.0);
cpu_resource_
=
res_mgr_
->
Add
(
std
::
move
(
cpu
));
// res_mgr_->Connect("cpu", "gpu0", PCIE);
gpu_resource_0_
=
res_mgr_
->
Add
(
std
::
move
(
gpu_0
));
// res_mgr_->Connect("cpu", "gpu1", PCIE);
gpu_resource_1_
=
res_mgr_
->
Add
(
std
::
move
(
gpu_1
));
//
// scheduler_ = std::make_shared<Scheduler>(res_mgr_);
auto
PCIE
=
Connection
(
"IO"
,
11000.0
);
//
res_mgr_
->
Connect
(
"cpu"
,
"gpu0"
,
PCIE
);
// res_mgr_->Start();
res_mgr_
->
Connect
(
"cpu"
,
"gpu1"
,
PCIE
);
// scheduler_->Start();
// }
scheduler_
=
std
::
make_shared
<
Scheduler
>
(
res_mgr_
);
//
// void
res_mgr_
->
Start
();
// TearDown() override {
scheduler_
->
Start
();
// scheduler_->Stop();
}
// res_mgr_->Stop();
// }
void
//
TearDown
()
override
{
// ResourceWPtr cpu_resource_;
scheduler_
->
Stop
();
// ResourceWPtr gpu_resource_0_;
res_mgr_
->
Stop
();
// ResourceWPtr gpu_resource_1_;
}
//
// ResourceMgrPtr res_mgr_;
ResourceWPtr
cpu_resource_
;
// std::shared_ptr<Scheduler> scheduler_;
ResourceWPtr
gpu_resource_0_
;
//};
ResourceWPtr
gpu_resource_1_
;
//
//void
ResourceMgrPtr
res_mgr_
;
//insert_dummy_index_into_gpu_cache(uint64_t device_id) {
std
::
shared_ptr
<
Scheduler
>
scheduler_
;
// MockVecIndex* mock_index = new MockVecIndex();
};
// mock_index->ntotal_ = 1000;
// engine::VecIndexPtr index(mock_index);
void
//
insert_dummy_index_into_gpu_cache
(
uint64_t
device_id
)
{
// cache::DataObjPtr obj = std::make_shared<cache::DataObj>(index);
MockVecIndex
*
mock_index
=
new
MockVecIndex
();
//
mock_index
->
ntotal_
=
1000
;
// cache::GpuCacheMgr::GetInstance(device_id)->InsertItem("location",obj);
engine
::
VecIndexPtr
index
(
mock_index
);
//}
//
cache
::
DataObjPtr
obj
=
std
::
make_shared
<
cache
::
DataObj
>
(
index
);
//TEST_F(SchedulerTest, OnCopyCompleted) {
// const uint64_t NUM = 10;
cache
::
GpuCacheMgr
::
GetInstance
(
device_id
)
->
InsertItem
(
"location"
,
obj
);
// std::vector<std::shared_ptr<TestTask>> tasks;
}
// TableFileSchemaPtr dummy = std::make_shared<meta::TableFileSchema>();
// dummy->location_ = "location";
TEST_F
(
SchedulerTest
,
OnLoadCompleted
)
{
//
const
uint64_t
NUM
=
10
;
// insert_dummy_index_into_gpu_cache(1);
std
::
vector
<
std
::
shared_ptr
<
TestTask
>>
tasks
;
//
TableFileSchemaPtr
dummy
=
std
::
make_shared
<
meta
::
TableFileSchema
>
();
// for (uint64_t i = 0; i < NUM; ++i) {
dummy
->
location_
=
"location"
;
// auto task = std::make_shared<TestTask>(dummy);
// task->label() = std::make_shared<DefaultLabel>();
insert_dummy_index_into_gpu_cache
(
1
);
// tasks.push_back(task);
// cpu_resource_.lock()->task_table().Put(task);
for
(
uint64_t
i
=
0
;
i
<
NUM
;
++
i
)
{
// }
auto
task
=
std
::
make_shared
<
TestTask
>
(
dummy
);
//
task
->
label
()
=
std
::
make_shared
<
DefaultLabel
>
();
// sleep(3);
tasks
.
push_back
(
task
);
cpu_resource_
.
lock
()
->
task_table
().
Put
(
task
);
}
sleep
(
3
);
ASSERT_EQ
(
res_mgr_
->
GetResource
(
ResourceType
::
GPU
,
1
)
->
task_table
().
Size
(),
NUM
);
}
TEST_F
(
SchedulerTest
,
PushTaskToNeighbourRandomlyTest
)
{
const
uint64_t
NUM
=
10
;
std
::
vector
<
std
::
shared_ptr
<
TestTask
>>
tasks
;
TableFileSchemaPtr
dummy1
=
std
::
make_shared
<
meta
::
TableFileSchema
>
();
dummy1
->
location_
=
"location"
;
tasks
.
clear
();
for
(
uint64_t
i
=
0
;
i
<
NUM
;
++
i
)
{
auto
task
=
std
::
make_shared
<
TestTask
>
(
dummy1
);
task
->
label
()
=
std
::
make_shared
<
DefaultLabel
>
();
tasks
.
push_back
(
task
);
cpu_resource_
.
lock
()
->
task_table
().
Put
(
task
);
}
sleep
(
3
);
// ASSERT_EQ(res_mgr_->GetResource(ResourceType::GPU, 1)->task_table().Size(), NUM);
// ASSERT_EQ(res_mgr_->GetResource(ResourceType::GPU, 1)->task_table().Size(), NUM);
//
}
//}
//
class
SchedulerTest2
:
public
testing
::
Test
{
//TEST_F(SchedulerTest, PushTaskToNeighbourRandomlyTest) {
protected:
// const uint64_t NUM = 10;
void
// std::vector<std::shared_ptr<TestTask>> tasks;
SetUp
()
override
{
// TableFileSchemaPtr dummy1 = std::make_shared<meta::TableFileSchema>();
ResourcePtr
disk
=
ResourceFactory
::
Create
(
"disk"
,
"DISK"
,
0
,
true
,
false
);
// dummy1->location_ = "location";
ResourcePtr
cpu0
=
ResourceFactory
::
Create
(
"cpu0"
,
"CPU"
,
0
,
true
,
false
);
//
ResourcePtr
cpu1
=
ResourceFactory
::
Create
(
"cpu1"
,
"CPU"
,
1
,
true
,
false
);
// tasks.clear();
ResourcePtr
cpu2
=
ResourceFactory
::
Create
(
"cpu2"
,
"CPU"
,
2
,
true
,
false
);
//
ResourcePtr
gpu0
=
ResourceFactory
::
Create
(
"gpu0"
,
"GPU"
,
0
,
true
,
true
);
// for (uint64_t i = 0; i < NUM; ++i) {
ResourcePtr
gpu1
=
ResourceFactory
::
Create
(
"gpu1"
,
"GPU"
,
1
,
true
,
true
);
// auto task = std::make_shared<TestTask>(dummy1);
// task->label() = std::make_shared<DefaultLabel>();
res_mgr_
=
std
::
make_shared
<
ResourceMgr
>
();
// tasks.push_back(task);
disk_
=
res_mgr_
->
Add
(
std
::
move
(
disk
));
// cpu_resource_.lock()->task_table().Put(task);
cpu_0_
=
res_mgr_
->
Add
(
std
::
move
(
cpu0
));
// }
cpu_1_
=
res_mgr_
->
Add
(
std
::
move
(
cpu1
));
//
cpu_2_
=
res_mgr_
->
Add
(
std
::
move
(
cpu2
));
// sleep(3);
gpu_0_
=
res_mgr_
->
Add
(
std
::
move
(
gpu0
));
//// ASSERT_EQ(res_mgr_->GetResource(ResourceType::GPU, 1)->task_table().Size(), NUM);
gpu_1_
=
res_mgr_
->
Add
(
std
::
move
(
gpu1
));
//}
auto
IO
=
Connection
(
"IO"
,
5.0
);
//
auto
PCIE1
=
Connection
(
"PCIE"
,
11.0
);
//class SchedulerTest2 : public testing::Test {
auto
PCIE2
=
Connection
(
"PCIE"
,
20.0
);
// protected:
res_mgr_
->
Connect
(
"disk"
,
"cpu0"
,
IO
);
// void
res_mgr_
->
Connect
(
"cpu0"
,
"cpu1"
,
IO
);
// SetUp() override {
res_mgr_
->
Connect
(
"cpu1"
,
"cpu2"
,
IO
);
// ResourcePtr disk = ResourceFactory::Create("disk", "DISK", 0, true, false);
res_mgr_
->
Connect
(
"cpu0"
,
"cpu2"
,
IO
);
// ResourcePtr cpu0 = ResourceFactory::Create("cpu0", "CPU", 0, true, false);
res_mgr_
->
Connect
(
"cpu1"
,
"gpu0"
,
PCIE1
);
// ResourcePtr cpu1 = ResourceFactory::Create("cpu1", "CPU", 1, true, false);
res_mgr_
->
Connect
(
"cpu2"
,
"gpu1"
,
PCIE2
);
// ResourcePtr cpu2 = ResourceFactory::Create("cpu2", "CPU", 2, true, false);
// ResourcePtr gpu0 = ResourceFactory::Create("gpu0", "GPU", 0, true, true);
scheduler_
=
std
::
make_shared
<
Scheduler
>
(
res_mgr_
);
// ResourcePtr gpu1 = ResourceFactory::Create("gpu1", "GPU", 1, true, true);
//
res_mgr_
->
Start
();
// res_mgr_ = std::make_shared<ResourceMgr>();
scheduler_
->
Start
();
// disk_ = res_mgr_->Add(std::move(disk));
}
// cpu_0_ = res_mgr_->Add(std::move(cpu0));
// cpu_1_ = res_mgr_->Add(std::move(cpu1));
void
// cpu_2_ = res_mgr_->Add(std::move(cpu2));
TearDown
()
override
{
// gpu_0_ = res_mgr_->Add(std::move(gpu0));
scheduler_
->
Stop
();
// gpu_1_ = res_mgr_->Add(std::move(gpu1));
res_mgr_
->
Stop
();
// auto IO = Connection("IO", 5.0);
}
// auto PCIE1 = Connection("PCIE", 11.0);
// auto PCIE2 = Connection("PCIE", 20.0);
ResourceWPtr
disk_
;
// res_mgr_->Connect("disk", "cpu0", IO);
ResourceWPtr
cpu_0_
;
// res_mgr_->Connect("cpu0", "cpu1", IO);
ResourceWPtr
cpu_1_
;
// res_mgr_->Connect("cpu1", "cpu2", IO);
ResourceWPtr
cpu_2_
;
// res_mgr_->Connect("cpu0", "cpu2", IO);
ResourceWPtr
gpu_0_
;
// res_mgr_->Connect("cpu1", "gpu0", PCIE1);
ResourceWPtr
gpu_1_
;
// res_mgr_->Connect("cpu2", "gpu1", PCIE2);
ResourceMgrPtr
res_mgr_
;
//
// scheduler_ = std::make_shared<Scheduler>(res_mgr_);
std
::
shared_ptr
<
Scheduler
>
scheduler_
;
//
};
// res_mgr_->Start();
// scheduler_->Start();
// }
TEST_F
(
SchedulerTest2
,
SpecifiedResourceTest
)
{
//
const
uint64_t
NUM
=
10
;
// void
std
::
vector
<
std
::
shared_ptr
<
TestTask
>>
tasks
;
// TearDown() override {
TableFileSchemaPtr
dummy
=
std
::
make_shared
<
meta
::
TableFileSchema
>
();
// scheduler_->Stop();
dummy
->
location_
=
"location"
;
// res_mgr_->Stop();
// }
for
(
uint64_t
i
=
0
;
i
<
NUM
;
++
i
)
{
//
std
::
shared_ptr
<
TestTask
>
task
=
std
::
make_shared
<
TestTask
>
(
dummy
);
// ResourceWPtr disk_;
task
->
label
()
=
std
::
make_shared
<
SpecResLabel
>
(
disk_
);
// ResourceWPtr cpu_0_;
tasks
.
push_back
(
task
);
// ResourceWPtr cpu_1_;
disk_
.
lock
()
->
task_table
().
Put
(
task
);
// ResourceWPtr cpu_2_;
}
// ResourceWPtr gpu_0_;
// ResourceWPtr gpu_1_;
// ASSERT_EQ(res_mgr_->GetResource(ResourceType::GPU, 1)->task_table().Size(), NUM);
// ResourceMgrPtr res_mgr_;
}
//
// std::shared_ptr<Scheduler> scheduler_;
//};
//
//
//TEST_F(SchedulerTest2, SpecifiedResourceTest) {
// const uint64_t NUM = 10;
// std::vector<std::shared_ptr<TestTask>> tasks;
// TableFileSchemaPtr dummy = std::make_shared<meta::TableFileSchema>();
// dummy->location_ = "location";
//
// for (uint64_t i = 0; i < NUM; ++i) {
// std::shared_ptr<TestTask> task = std::make_shared<TestTask>(dummy);
// task->label() = std::make_shared<SpecResLabel>(disk_);
// tasks.push_back(task);
// disk_.lock()->task_table().Put(task);
// }
//
//// ASSERT_EQ(res_mgr_->GetResource(ResourceType::GPU, 1)->task_table().Size(), NUM);
//}
}
}
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录