algos.h 10.2 KB
Newer Older
1 2 3 4
/**
 * \file dnn/src/cuda/matrix_mul/algos.h
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
9 10
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
11 12 13 14 15 16
 */

#pragma once
#include "megdnn/oprs.h"
#include "src/common/utils.h"
#include "src/cuda/matrix_mul/opr_impl.h"
17 18
#include "src/common/algo_base.h"
#include "src/common/metahelper.h"
19

20
#include <unordered_map>
21
#include <cuda.h>
22
#include <memory>
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
#if CUDA_VERSION >= 10010
#include <cublasLt.h>
#endif

namespace megdnn {
namespace cuda {

/*!
 * \brief base class for matrix mul algos
 *
 */
class MatrixMulForwardImpl::AlgoBase : public Algorithm {
protected:
    ~AlgoBase() = default;

public:
39 40 41 42 43
    enum class AlgoType : uint32_t {
        CUDA_CUBLAS,
        CUDA_WMMA_UINT4X4X32,
        CUDA_CUBLASLT,
        CUDA_NAIVE,
44
        CUDA_BFLOAT16, 
45
#if CUDA_VERSION >= 9020
46
        CUDA_FLOAT32_SIMT, 
47
        CUDA_FLOAT32_SIMT_SPLIT_K, 
48 49
        CUDA_FLOAT32_SIMT_GEMV_BATCHED_STRIDED, 
#endif
50 51 52
    };
    using Mapper = std::unordered_map<AlgorithmDesc, AlgoBase*>;

53
    AlgoBase() : Algorithm() { m_handle_type = Handle::HandleType::CUDA; }
54 55 56 57 58
    struct SizeArgs {
        MatrixMulForwardImpl* opr;
        TensorLayout layout_a, layout_b, layout_c;

        std::string to_string() const;
59 60
        SizeArgs(MatrixMulForwardImpl* opr, const TensorLayout& A,
                 const TensorLayout& B, const TensorLayout& C);
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82

        bool can_be_treated_as_int8x8x32() const {
            return layout_a.dtype.enumv() == layout_b.dtype.enumv() &&
                   (layout_a.dtype.enumv() == DTypeEnum::Int8 ||
                    layout_a.dtype.enumv() == DTypeEnum::QuantizedS8) &&
                   (layout_c.dtype.enumv() == DTypeEnum::Int32 ||
                    layout_c.dtype.enumv() == DTypeEnum::QuantizedS32) &&
                   opr->param().format == param::MatrixMul::Format::DEFAULT;
        }
    };
    struct ExecArgs : public SizeArgs {
        TensorND tensor_a, tensor_b, tensor_c;
        Workspace workspace;

        ExecArgs(MatrixMulForwardImpl* opr, _megdnn_tensor_in A,
                 _megdnn_tensor_in B, _megdnn_tensor_out C,
                 _megdnn_workspace workspace);
    };
    virtual bool is_available(const SizeArgs& args) const = 0;
    virtual size_t get_workspace_in_bytes(const SizeArgs& args) const = 0;
    virtual void exec(const ExecArgs& args) const = 0;

83
    bool is_available_wk(const SizeArgs& args, size_t limit) const {
84 85
        return is_available(args) && get_workspace_in_bytes(args) <= limit;
    }
86 87
    bool is_available_attribute(
            const SizeArgs& args,
88 89
            const AlgoAttribute& positive_attr = AlgoAttribute::REPRODUCIBLE,
            const AlgoAttribute& negative_attr = AlgoAttribute::DEFAULT,
90
            size_t limit = std::numeric_limits<size_t>::max()) const {
91 92 93
        return contain_attribute_all(positive_attr) &&
               !contain_attribute_any(negative_attr) &&
               is_available_wk(args, limit);
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
    }
    AlgoBase& check_workspace(const SizeArgs& args,
                              const Workspace& workspace) {
        auto req = get_workspace_in_bytes(args);
        megdnn_assert(
                req <= workspace.size,
                "matrix mul fwd algo %s: required workspace %zu bytes, got %zu",
                name(), req, workspace.size);
        return *this;
    }
};

class MatrixMulForwardImpl::AlgoCuBlas final : public AlgoBase {
public:
    AlgoCuBlas() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /* args */) const override {
        return 0_z;
    }
113
    const char* name() const override { return "CUBLAS"; }
114
    void exec(const ExecArgs& args) const override;
115
    MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLAS)
116 117 118
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
119 120 121 122 123 124 125 126
};

#if CUDA_VERSION >= 10000
class MatrixMulForwardImpl::AlgoUInt4x4x32WMMA final : public AlgoBase {
public:
    AlgoUInt4x4x32WMMA() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
127
    const char* name() const override { return "UINT4x4x32_WMMA"; }
128
    void exec(const ExecArgs& args) const override;
129
    MEGDNN_DECL_ALGO_TYPE(CUDA_WMMA_UINT4X4X32)
130 131 132
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
133 134 135 136 137 138 139
};
#endif
#if CUDA_VERSION >= 10010
class MatrixMulForwardImpl::AlgoCuBlasLt final : public AlgoBase {
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
140
    const char* name() const override { return "CUBLAS_LT"; }
141
    void exec(const ExecArgs& args) const override;
142
    MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLASLT)
143 144 145
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
146 147 148 149 150 151 152 153 154 155 156 157
};
#endif

class MatrixMulForwardImpl::AlgoNaive final : public AlgoBase {
public:
    AlgoNaive() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /* args */) const override {
        return 0_z;
    }
    const char* name() const override { return "NAIVE"; }
    void exec(const ExecArgs& args) const override;
158
    MEGDNN_DECL_ALGO_TYPE(CUDA_NAIVE)
159 160 161
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
162 163
};

164 165 166 167 168 169
#if !MEGDNN_DISABLE_FLOAT16
class MatrixMulForwardImpl::AlgoBFloat16 final : public AlgoBase {
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;
170
    MEGDNN_DECL_ALGO_TYPE(CUDA_BFLOAT16)
171

172 173 174 175 176
    std::vector<SearchItem> get_subopr_list(
            const TensorLayoutArray& layouts,
            const OperatorBase* opr) const override;

    const char* name() const override { return "MATMUL_BFLOAT16"; }
177 178 179 180

    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
181 182 183 184 185 186

private:
    WorkspaceBundle get_workspace_bundle(void* ptr, const SizeArgs& args) const;
};
#endif

187
#if CUDA_VERSION >= 9020
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
class MatrixMulForwardImpl::AlgoFloat32SIMT final : public AlgoBase {
public:
    struct AlgoParam {
        int threadblock_m, threadblock_n, threadblock_k;
        int warp_m, warp_n, warp_k;
        std::string to_string() {
            return ssprintf("%dX%dX%d_%dX%dX%d", threadblock_m, threadblock_n,
                            threadblock_k, warp_m, warp_n, warp_k);
        }
    };
    AlgoFloat32SIMT(AlgoParam algo_param)
            : m_algo_param{algo_param},
              m_name{ssprintf("CUTLASS_FLOAT32_SIMT_%s",
                              m_algo_param.to_string().c_str())} {}
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
    void exec(const ExecArgs& args) const override;
206 207 208
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
209 210 211 212 213 214 215 216 217 218 219 220 221
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT32_SIMT)

    std::string param() const override {
        std::string ret;
        serialize_write_pod(m_algo_param, ret);
        return ret;
    }

private:
    AlgoParam m_algo_param;
    std::string m_name;
};

222 223 224 225 226 227 228 229 230 231 232
class MatrixMulForwardImpl::AlgoFloat32SIMTSplitK final : public AlgoBase {
public:
    using AlgoParam = MatrixMulForwardImpl::AlgoFloat32SIMT::AlgoParam;
    AlgoFloat32SIMTSplitK(AlgoParam algo_param)
            : m_algo_param{algo_param},
              m_name{ssprintf("CUTLASS_FLOAT32_SIMT_SPLIT_K_%s",
                              m_algo_param.to_string().c_str())} {}
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
    void exec(const ExecArgs& args) const override;
233 234 235
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
236 237 238 239 240 241 242 243 244 245 246 247 248
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT32_SIMT_SPLIT_K)

    std::string param() const override {
        std::string ret;
        serialize_write_pod(m_algo_param, ret);
        return ret;
    }

private:
    AlgoParam m_algo_param;
    std::string m_name;
};

249 250 251 252 253 254 255 256 257 258 259
class MatrixMulForwardImpl::AlgoFloat32SIMTGemvBatchedStrided final
        : public AlgoBase {
public:
    AlgoFloat32SIMTGemvBatchedStrided(int threadblock_n)
            : m_threadblock_n{threadblock_n},
              m_name{ssprintf("CUTLASS_FLOAT32_SIMT_GEMV_BATCHED_STRIDED_%d",
                              m_threadblock_n)} {}
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
    void exec(const ExecArgs& args) const override;
260 261 262
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
263 264 265 266 267 268 269 270 271 272 273 274 275 276
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT32_SIMT_GEMV_BATCHED_STRIDED)

    std::string param() const override {
        std::string ret;
        serialize_write_pod(m_threadblock_n, ret);
        return ret;
    }

private:
    int m_threadblock_n;
    std::string m_name;
};
#endif

277 278 279
class MatrixMulForwardImpl::AlgoPack : NonCopyableObj {
private:
    AlgoBase::Mapper m_all_algos_map;
280 281 282 283 284 285 286 287 288 289 290

public:
    AlgoPack();
    AlgoCuBlas cublas;
    AlgoNaive naive;
#if CUDA_VERSION >= 10000
    AlgoUInt4x4x32WMMA wmma_uint4x4x32;
#endif
#if CUDA_VERSION >= 10010
    AlgoCuBlasLt cublas_lt;
#endif
291
#if !MEGDNN_DISABLE_FLOAT16
292
    AlgoBFloat16 bfloat16;
293
#endif
294
#if CUDA_VERSION >= 9020
295
    std::vector<AlgoFloat32SIMT> simt_float32;
296
    std::vector<AlgoFloat32SIMTSplitK> simt_float32_split_k;
297 298 299
    std::vector<AlgoFloat32SIMTGemvBatchedStrided>
            simt_float32_gemv_batched_strided;
#endif
300
    std::vector<AlgoBase*> all_algos;
301 302

    const AlgoBase::Mapper& all_algos_map() const { return m_all_algos_map; }
303
    void fill_cutlass_algos();
304 305 306 307 308 309
};

}  // namespace cuda
}  // namespace megdnn

// vim: syntax=cpp.doxygen