benchmarker.h 9.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
/**
 * \file dnn/test/common/benchmarker.h
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
 * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 */
#pragma once

#include <map>
#include <memory>
#include <vector>
#include <regex>
#include "megdnn/basic_types.h"
#include "megdnn/tensor_format.h"
#include "test/common/opr_proxy.h"
#include "test/common/rng.h"
#include "test/common/timer.h"
#include "test/common/opr_algo_proxy.h"

namespace megdnn {
namespace test {

template <typename Opr, typename T>
class BenchmarkerBase {
public:
    using Param = typename Opr::Param;
    using TensorValueArray = TensorNDArray;
    using BeforeExecCallback =
            std::function<void(Opr*, const TensorValueArray&)>;

    BenchmarkerBase(Handle* handle, T timer)
            : m_timer(timer),
              m_handle_naive(create_cpu_handle(2, false)),
              m_handle(handle),
              m_default_rng(new NormalRNG()),
              m_param(Param()),
              m_proxy{new OprProxy<Opr>()} {}

    const Handle* handle() const { return m_handle; }

    /*!
     * \brief benchmark opr on current param/dtype/rng config
     * \returns elapsed time in ms
     *
     * Benchmarker would construct TensorLayout vectors from shapes and
     * dtypes and call exec(TensorLayoutArray &).
     */
    float exec(const TensorShapeArray& shapes) {
        return exec(make_layouts(shapes));
    }
    float exec(TensorLayoutArray layouts);

    //! disabiguate overloaded exec
    float execs(const TensorShapeArray& shapes) { return exec(shapes); }
    float execl(const TensorLayoutArray& layouts) { return exec(layouts); }
    BenchmarkerBase& set_param(Param param) {
        m_param = param;
        return *this;
    }
    BenchmarkerBase& set_dtype(size_t idx, DType dtype) {
        m_dtype[idx] = dtype;
        return *this;
    }
    BenchmarkerBase& set_rng(size_t idx, RNG* rng) {
        m_rng[idx] = rng;
        return *this;
    }
    BenchmarkerBase& set_fmt(size_t idx, TensorFormat fmt) {
        m_fmt[idx] = fmt;
        return *this;
    }
    TensorLayoutArray make_layouts(const TensorShapeArray& shapes) {
        TensorLayoutArray layouts(shapes.size());
        for (size_t i = 0; i < shapes.size(); ++i) {
            DType dt = (m_dtype.find(i) != m_dtype.end() ? m_dtype[i]
                                                         : dtype::Float32());
            TensorFormat fmt = (m_fmt.find(i) != m_fmt.end()
                                        ? m_fmt[i]
                                        : DefaultTensorFormat::make());
            layouts[i] = TensorLayout(shapes[i], dt, fmt);
        }
        return layouts;
    }
    BenchmarkerBase& set_proxy(std::unique_ptr<OprProxy<Opr>>& proxy) {
        m_proxy.reset(nullptr);
        m_proxy = std::move(proxy);
        return *this;
    }
    std::unique_ptr<OprProxy<Opr>>& proxy() { return m_proxy; }
    BenchmarkerBase& set_times(size_t times) {
        m_times = times;
        return *this;
    }
    BenchmarkerBase& set_display(bool display) {
        m_display = display;
        return *this;
    }
    //! set a callback to be invoked before executing the operator
    BenchmarkerBase& set_before_exec_callback(const BeforeExecCallback& cb) {
        m_before_exec_callback = cb;
        return *this;
    }

    /*!
     * \brief set adaptive benchmarking: ignore set_times() and find
     * suitable times to run for given duration;
     *
     * Note: the value returned by exec() would be average time per run,
     * rather than total elapsed time, if this is enabled.
     */
    BenchmarkerBase& set_adaptive_benchmark(float tot_time_in_secs) {
        m_adaptive_secs = tot_time_in_secs;
        return *this;
    }

    //! get the opr impl so setting other than param() can be modified
    Opr* opr() {
        if (!m_opr) {
            m_opr = m_handle->create_operator<Opr>();
        }
        return m_opr.get();
    }

    const Param& param() const { return m_param; }

private:
    T m_timer;
    bool m_display = true;
    size_t m_times = 1;
    float m_adaptive_secs = 0;
    std::unique_ptr<Handle> m_handle_naive;
    Handle* m_handle;
    std::unique_ptr<RNG> m_default_rng;
    std::map<size_t, RNG*> m_rng;
    std::map<size_t, DType> m_dtype;
    std::map<size_t, TensorFormat> m_fmt;
    Param m_param;
    std::unique_ptr<OprProxy<Opr>> m_proxy;
    BeforeExecCallback m_before_exec_callback;
    std::unique_ptr<Opr> m_opr;
};

template <typename Opr, typename T>
float BenchmarkerBase<Opr, T>::exec(TensorLayoutArray layouts) {
    auto opr = this->opr();
    opr->param() = m_param;
    auto user_layouts = layouts;
    m_proxy->deduce_layout(opr, layouts);
    for (size_t i = 0; i < layouts.size(); ++i)
        if (user_layouts[i].ndim > 0) {
            auto run = [&]() {
                ASSERT_TRUE(layouts[i].eq_shape(user_layouts[i]))
                        << "User provided shape is "
                        << user_layouts[i].TensorShape::to_string()
                        << "\nExpected shape is "
                        << layouts[i].TensorShape::to_string();
            };
            run();
        }
    auto allocate = [&layouts](Handle* handle) {
        TensorNDArray tensors(layouts.size());
        auto trans_func = [handle](const TensorLayout& layout) {
            auto span = layout.span();
            TensorND res;
            res.raw_ptr = static_cast<uint8_t*>(
                                  megdnn_malloc(handle, span.dist_byte())) +
                          span.low_byte;
            res.layout = layout;
            return res;
        };
        std::transform(layouts.begin(), layouts.end(), tensors.begin(),
                       trans_func);
        return tensors;
    };
    auto tensors_cur = allocate(m_handle);
    auto tensors_cur_host = allocate(m_handle_naive.get());
    // init
    for (size_t i = 0; i < tensors_cur_host.size(); ++i) {
        TensorND& tensor = tensors_cur_host[i];
        auto rng = m_rng[i];
        if (!rng)
            rng = m_default_rng.get();
        auto size = tensor.layout.span().high_byte;
        rng->gen(tensor);
        if (tensor.layout.ndim == 0)
            continue;
        megdnn_memcpy_H2D(m_handle, tensors_cur[i].raw_ptr, tensor.raw_ptr,
                          size);
    }
    if (m_before_exec_callback) {
        m_before_exec_callback(opr, tensors_cur);
    }
    // run
    // warm up
    m_proxy->exec(opr, tensors_cur);
    megcoreSynchronize(m_handle->megcore_computing_handle());

    if (m_adaptive_secs) {
        // find m_times for adaptive benchmarking
        m_times = 0;
        int cur_times = 1;
        auto remain_time = m_adaptive_secs * 1e6;
        while (remain_time > 0) {
            m_timer.reset();
            m_timer.start();
            for (int i = 0; i < cur_times; ++i)
                m_proxy->exec(opr, tensors_cur);
            megcoreSynchronize(m_handle->megcore_computing_handle());
            m_timer.stop();
            m_times += cur_times;
            auto this_run_time = m_timer.get_time_in_us();
            remain_time -= this_run_time;
            cur_times = std::min(
                    cur_times * 2,
                    std::max<int>(1, remain_time / this_run_time * cur_times));
        }
    }
    m_timer.reset();
    m_timer.start();
    for (size_t t = 0; t < m_times; ++t)
        m_proxy->exec(opr, tensors_cur);
    megcoreSynchronize(m_handle->megcore_computing_handle());
    m_timer.stop();
    auto time_in_ms = m_timer.get_time_in_us() / 1e3;
    if (m_display) {
        std::cout << "Total time is " << time_in_ms << "ms "
                  << "for " << m_times << " run(s)." << std::endl;
    }
    auto free = [](Handle* handle, TensorNDArray& tensors) {
        std::for_each(tensors.begin(), tensors.end(),
                      [handle](const TensorND& tensor) {
                          megdnn_free(handle, tensor.raw_ptr);
                      });
    };
    free(m_handle, tensors_cur);
    free(m_handle_naive.get(), tensors_cur_host);
    if (m_adaptive_secs)
        time_in_ms /= m_times;
    return time_in_ms;
}

template <typename Opr, typename T = Timer>
class Benchmarker;

template <typename Opr>
class Benchmarker<Opr, Timer> : public BenchmarkerBase<Opr, Timer> {
public:
    Benchmarker(Handle* handle)
            : BenchmarkerBase<Opr, Timer>{handle, Timer{}} {}
};

////////////////// Algo Benchmark ////////////////////////
257 258
template <typename Opr, typename Proxy = OprProxy<Opr>, typename T = Timer>
float algo_benchmark(Benchmarker<Opr, T>& benchmark, TensorLayoutArray layouts,
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
                     const std::string& algo_base) {
    Proxy proxy;
    auto opr = benchmark.opr();
    opr->param() = benchmark.param();
    proxy.deduce_layout(opr, layouts);
    auto algos = OprAlgoProxy<Opr>::get_all_algorithms(opr, layouts);
    float min_used = std::numeric_limits<float>::max();
    bool execed = false;
    for (auto i : algos) {
        if (std::regex_match(i->name(),
                             std::regex("(" + algo_base + ")(.*)"))) {
            opr->execution_policy().algorithm = i;
            auto used = benchmark.exec(layouts);
            min_used = std::min(min_used, used);
            printf("run algo: %s used: %f ms min_used: %f ms\n", i->name(),
                   used, min_used);
            execed = true;
        }
    }
    megdnn_assert(execed, "no algo start with %s\n", algo_base.c_str());
    return min_used;
}

282 283
template <typename Opr, typename Proxy = OprProxy<Opr>, typename T = Timer>
float algo_benchmark(Benchmarker<Opr, T>& benchmark, TensorShapeArray shapes,
284 285 286 287 288 289 290 291
                     const std::string& algo_base) {
    return algo_benchmark(benchmark, benchmark.make_layouts(shapes), algo_base);
}

}  // namespace test
}  // namespace megdnn

// vim: syntax=cpp.doxygen