提交 842fa507 编写于 作者: W wxyu

MS-380 Update resource loader and executor, work util all finished


Former-commit-id: 712a0aceaa4c8d4ebbea40f5d18f524afeb38559
上级 41f3a2ac
......@@ -31,6 +31,7 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-377 - Improve process thread trigger in ResourceMgr, Scheduler and TaskTable
- MS-378 - Debug and Update normal_test in scheduler unittest
- MS-379 - Add Dump implementation in Resource
- MS-380 - Update resource loader and executor, work util all finished
## New Feature
- MS-343 - Implement ResourceMgr
......
......@@ -12,7 +12,7 @@ namespace milvus {
namespace engine {
void
push_task(ResourcePtr &self, ResourcePtr &other) {
push_task(const ResourcePtr &self, const ResourcePtr &other) {
auto &self_task_table = self->task_table();
auto &other_task_table = other->task_table();
CacheMgr cache;
......@@ -31,8 +31,7 @@ Action::PushTaskToNeighbour(const ResourceWPtr &res) {
if (auto self = res.lock()) {
for (auto &neighbour : self->GetNeighbours()) {
if (auto n = neighbour.neighbour_node.lock()) {
auto neighbour = std::static_pointer_cast<Resource>(n);
push_task(self, neighbour);
push_task(self, std::static_pointer_cast<Resource>(n));
}
}
}
......
......@@ -99,8 +99,11 @@ void Resource::loader_function() {
std::unique_lock<std::mutex> lock(load_mutex_);
load_cv_.wait(lock, [&] { return load_flag_; });
load_flag_ = false;
auto task_item = pick_task_load();
if (task_item) {
while (true) {
auto task_item = pick_task_load();
if (task_item == nullptr) {
break;
}
LoadFile(task_item->task);
// TODO: wrapper loaded
task_item->state = TaskTableItemState::LOADED;
......@@ -109,6 +112,7 @@ void Resource::loader_function() {
subscriber_(std::static_pointer_cast<Event>(event));
}
}
}
}
......@@ -121,8 +125,11 @@ void Resource::executor_function() {
std::unique_lock<std::mutex> lock(exec_mutex_);
exec_cv_.wait(lock, [&] { return exec_flag_; });
exec_flag_ = false;
auto task_item = pick_task_execute();
if (task_item) {
while (true) {
auto task_item = pick_task_execute();
if (task_item == nullptr) {
break;
}
Process(task_item->task);
task_item->state = TaskTableItemState::EXECUTED;
if (subscriber_) {
......@@ -130,6 +137,7 @@ void Resource::executor_function() {
subscriber_(std::static_pointer_cast<Event>(event));
}
}
}
}
......
......@@ -25,12 +25,20 @@ protected:
disk_resource_ = ResourceFactory::Create("disk");
cpu_resource_ = ResourceFactory::Create("cpu");
gpu_resource_ = ResourceFactory::Create("gpu");
flag_ = false;
resources_.push_back(disk_resource_);
resources_.push_back(cpu_resource_);
resources_.push_back(gpu_resource_);
auto subscriber = [&](EventPtr event) {
std::unique_lock<std::mutex> lock(mutex_);
if (event->Type() == EventType::COPY_COMPLETED || event->Type() == EventType::FINISH_TASK) {
flag_ = true;
auto subscriber = [&](EventPtr event) {
if (event->Type() == EventType::COPY_COMPLETED) {
std::lock_guard<std::mutex> lock(load_mutex_);
++load_count_;
cv_.notify_one();
}
if (event->Type() == EventType::FINISH_TASK) {
std::lock_guard<std::mutex> lock(load_mutex_);
++exec_count_;
cv_.notify_one();
}
};
......@@ -52,42 +60,82 @@ protected:
}
void
Wait() {
std::unique_lock<std::mutex> lock(mutex_);
cv_.wait(lock, [&] { return flag_; });
WaitLoader(uint64_t count) {
std::unique_lock<std::mutex> lock(load_mutex_);
cv_.wait(lock, [&] { return load_count_ == count; });
}
void
WaitExecutor(uint64_t count) {
std::unique_lock<std::mutex> lock(exec_mutex_);
cv_.wait(lock, [&] { return exec_count_ == count; });
}
ResourcePtr disk_resource_;
ResourcePtr cpu_resource_;
ResourcePtr gpu_resource_;
bool flag_;
std::mutex mutex_;
std::vector<ResourcePtr> resources_;
uint64_t load_count_ = 0;
uint64_t exec_count_ = 0;
std::mutex load_mutex_;
std::mutex exec_mutex_;
std::condition_variable cv_;
};
TEST_F(ResourceTest, cpu_resource_test) {
auto task = std::make_shared<TestTask>();
cpu_resource_->task_table().Put(task);
const uint64_t NUM = 100;
std::vector<std::shared_ptr<TestTask>> tasks;
for (uint64_t i = 0; i < NUM; ++i) {
auto task = std::make_shared<TestTask>();
tasks.push_back(task);
cpu_resource_->task_table().Put(task);
}
cpu_resource_->WakeupLoader();
Wait();
ASSERT_EQ(task->load_count_, 1);
flag_ = false;
WaitLoader(NUM);
// std::cout << "after WakeupLoader" << std::endl;
// std::cout << cpu_resource_->task_table().Dump();
for (uint64_t i = 0; i < NUM; ++i) {
ASSERT_EQ(tasks[i]->load_count_, 1);
}
cpu_resource_->WakeupExecutor();
Wait();
ASSERT_EQ(task->exec_count_, 1);
WaitExecutor(NUM);
// std::cout << "after WakeupExecutor" << std::endl;
// std::cout << cpu_resource_->task_table().Dump();
for (uint64_t i = 0; i < NUM; ++i) {
ASSERT_EQ(tasks[i]->exec_count_, 1);
}
}
TEST_F(ResourceTest, gpu_resource_test) {
auto task = std::make_shared<TestTask>();
gpu_resource_->task_table().Put(task);
const uint64_t NUM = 100;
std::vector<std::shared_ptr<TestTask>> tasks;
for (uint64_t i = 0; i < NUM; ++i) {
auto task = std::make_shared<TestTask>();
tasks.push_back(task);
gpu_resource_->task_table().Put(task);
}
gpu_resource_->WakeupLoader();
Wait();
ASSERT_EQ(task->load_count_, 1);
flag_ = false;
WaitLoader(NUM);
// std::cout << "after WakeupLoader" << std::endl;
// std::cout << cpu_resource_->task_table().Dump();
for (uint64_t i = 0; i < NUM; ++i) {
ASSERT_EQ(tasks[i]->load_count_, 1);
}
gpu_resource_->WakeupExecutor();
Wait();
ASSERT_EQ(task->exec_count_, 1);
WaitExecutor(NUM);
// std::cout << "after WakeupExecutor" << std::endl;
// std::cout << cpu_resource_->task_table().Dump();
for (uint64_t i = 0; i < NUM; ++i) {
ASSERT_EQ(tasks[i]->exec_count_, 1);
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册