normal_test.cpp 3.2 KB
Newer Older
W
wxyu 已提交
1 2 3
#include "scheduler/ResourceFactory.h"
#include "scheduler/ResourceMgr.h"
#include "scheduler/Scheduler.h"
4 5
#include "scheduler/task/TestTask.h"
#include "utils/Log.h"
W
wxyu 已提交
6 7 8 9 10
#include <gtest/gtest.h>


using namespace zilliz::milvus::engine;

11
TEST(normal_test, test1) {
W
wxyu 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24
    // ResourceMgr only compose resources, provide unified event
    auto res_mgr = std::make_shared<ResourceMgr>();
    auto disk = res_mgr->Add(ResourceFactory::Create("disk", "ssd"));
    auto cpu = res_mgr->Add(ResourceFactory::Create("cpu"));
    auto gpu1 = res_mgr->Add(ResourceFactory::Create("gpu"));
    auto gpu2 = res_mgr->Add(ResourceFactory::Create("gpu"));

    auto IO = Connection("IO", 500.0);
    auto PCIE = Connection("IO", 11000.0);
    res_mgr->Connect(disk, cpu, IO);
    res_mgr->Connect(cpu, gpu1, PCIE);
    res_mgr->Connect(cpu, gpu2, PCIE);

W
wxyu 已提交
25
    res_mgr->Start();
W
wxyu 已提交
26

27 28 29
    auto scheduler = new Scheduler(res_mgr);
    scheduler->Start();

30 31 32 33 34 35 36 37
    const uint64_t NUM_TASK = 10;
    std::vector<std::shared_ptr<TestTask>> tasks;
    for (uint64_t i = 0; i < NUM_TASK; ++i) {
        if (auto observe = disk.lock()) {
            auto task = std::make_shared<TestTask>();
            tasks.push_back(task);
            observe->task_table().Put(task);
        }
W
wxyu 已提交
38 39
    }

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
    if (auto disk_r = disk.lock()) {
        if (auto cpu_r = cpu.lock()) {
            if (auto gpu1_r = gpu1.lock()) {
                if (auto gpu2_r = gpu2.lock()) {
                    std::cout << "<<<<<<<<<<before<<<<<<<<<<" << std::endl;
                    std::cout << "disk:" << std::endl;
                    std::cout << disk_r->task_table().Dump() << std::endl;
                    std::cout << "cpu:" << std::endl;
                    std::cout << cpu_r->task_table().Dump() << std::endl;
                    std::cout << "gpu1:" << std::endl;
                    std::cout << gpu1_r->task_table().Dump() << std::endl;
                    std::cout << "gpu2:" << std::endl;
                    std::cout << gpu2_r->task_table().Dump() << std::endl;
                    std::cout << ">>>>>>>>>>before>>>>>>>>>>" << std::endl;
                }
            }
        }
    }
58

59
    sleep(1);
60

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
    if (auto disk_r = disk.lock()) {
        if (auto cpu_r = cpu.lock()) {
            if (auto gpu1_r = gpu1.lock()) {
                if (auto gpu2_r = gpu2.lock()) {
                    std::cout << "<<<<<<<<<<after<<<<<<<<<<" << std::endl;
                    std::cout << "disk:" << std::endl;
                    std::cout << disk_r->task_table().Dump() << std::endl;
                    std::cout << "cpu:" << std::endl;
                    std::cout << cpu_r->task_table().Dump() << std::endl;
                    std::cout << "gpu1:" << std::endl;
                    std::cout << gpu1_r->task_table().Dump() << std::endl;
                    std::cout << "gpu2:" << std::endl;
                    std::cout << gpu2_r->task_table().Dump() << std::endl;
                    std::cout << ">>>>>>>>>>after>>>>>>>>>>" << std::endl;
                }
            }
        }
    }
79 80
    scheduler->Stop();
    res_mgr->Stop();
W
wxyu 已提交
81

82 83 84 85
    for (uint64_t i = 0 ; i < NUM_TASK; ++i) {
        ASSERT_EQ(tasks[i]->load_count_, 1);
        ASSERT_EQ(tasks[i]->exec_count_, 1);
    }
W
wxyu 已提交
86
}