thread_pool.cpp 4.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/**
 * \file src/core/test/utils/thread_pool.cpp
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
 * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 */
#include "megbrain/utils/thread_pool.h"
#include "megbrain/comp_node.h"
#include "megbrain/system.h"
#include "megbrain/test/helper.h"
15 16
#include "megbrain/opr/io.h"
#include "megbrain/opr/utility.h"
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#include <atomic>
#include <random>

#if MGB_HAVE_THREAD
using namespace mgb;
TEST(TestThreadPool, BASIC) {
    auto thread_pool0 = std::make_shared<ThreadPool>(1u);
    auto thread_pool1 = std::make_shared<ThreadPool>(4u);
    ASSERT_EQ(thread_pool0->nr_threads(), static_cast<size_t>(1));
    ASSERT_EQ(thread_pool1->nr_threads(), static_cast<size_t>(4));

    std::vector<int> source(100), dst0(100), dst1(100), truth(100);
    std::atomic_size_t count0{0}, count1{0};
    for (int i = 0; i < 100; i++) {
        source[i] = i;
        dst0[i] = 0;
        dst1[i] = 0;
        truth[i] = i * i;
    }
    size_t total_task = 50;
    auto func0 = [&](size_t index, size_t) {
        count0++;
        size_t sub_task = 100 / total_task;
        for (size_t i = index * sub_task; i < (index + 1) * sub_task; i++) {
            dst0[i] = source[i] * source[i];
        }
    };
    auto func1 = [&](size_t index, size_t) {
        count1++;
        size_t sub_task = 100 / total_task;
        for (size_t i = index * sub_task; i < (index + 1) * sub_task; i++) {
            dst1[i] = source[i] * source[i];
        }
    };
    thread_pool0->active();
    thread_pool0->add_task({func0, total_task});
    thread_pool0->deactive();
    thread_pool1->active();
    thread_pool1->add_task({func1, total_task});
    thread_pool1->deactive();
    ASSERT_EQ(count0, total_task);
    ASSERT_EQ(count1, total_task);
    for (size_t i = 0; i < 100; i++) {
        ASSERT_EQ(dst0[i], truth[i]);
        ASSERT_EQ(dst1[i], truth[i]);
    }
}
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130

TEST(TestGraph, ParallelRunMultithreadMode) {
    // check race conditions when graphs are executed on multple threads
    std::atomic_size_t sync_counter{0};
    constexpr size_t NR_RUN = 50;
    size_t nr_worker = std::max(4, sys::get_cpu_count() / 4);
    if (auto setting = MGB_GETENV("TestGraphParallelRun_nr_worker")) {
        nr_worker = std::stoul(setting);
    }
    mgb_log("use %zu workers", nr_worker);

    auto sync_barrier = [&sync_counter, nr_worker](size_t& cnt) {
        ++sync_counter;
        ++cnt;
        while (sync_counter < cnt * nr_worker)
            ;
    };

    auto do_worker = [&sync_barrier](size_t sync_cnt) {
        auto cn = CompNode::load("multithread2:0");
        HostTensorGenerator<> gen;
        auto host_x = gen({23}, cn);
        HostTensorND host_y, y_expect;
        y_expect.copy_from(*host_x);
        {
            auto py = y_expect.ptr<float>();
            for (int i = 0; i < 23; ++i) {
                for (int j = 0; j < 5; ++j) {
                    py[i] = py[i] * 2 + 3;
                }
            }
        }

        sync_barrier(sync_cnt);
        auto graph = ComputingGraph::make();
        auto x = opr::Host2DeviceCopy::make(*graph, host_x), y = x;
        for (int i = 0; i < 5; ++i) {
            y = y * 2 + 3;
        }

        sync_barrier(sync_cnt);
        auto func = graph->compile({make_callback_copy(y, host_y)});

        sync_barrier(sync_cnt);
        func->execute();
        MGB_ASSERT_TENSOR_EQ(y_expect, host_y);
        memset(host_y.raw_ptr(), -1, 23 * sizeof(float));

        sync_barrier(sync_cnt);
        func->execute();
        MGB_ASSERT_TENSOR_EQ(y_expect, host_y);
        func->wait();
    };
    auto worker = [&]() {
        size_t scnt = 0;
        for (size_t run_id = 0; run_id < NR_RUN; ++run_id) {
            do_worker(scnt);
        }
    };

    std::vector<std::thread> workers;
    for (size_t i = 0; i < nr_worker; ++i)
        workers.emplace_back(worker);

    for (auto&& i : workers)
        i.join();
}
131 132 133 134 135
#else
#pragma message "tests are disabled as thread is not enabled."
#endif  //  MGB_HAVE_THREAD

// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}