algos.h 14.9 KB
Newer Older
1 2 3 4
/**
 * \file dnn/src/cuda/matrix_mul/algos.h
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
9 10
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
11 12 13 14 15 16
 */

#pragma once
#include "megdnn/oprs.h"
#include "src/common/utils.h"
#include "src/cuda/matrix_mul/opr_impl.h"
17 18
#include "src/common/algo_base.h"
#include "src/common/metahelper.h"
19

20
#include <unordered_map>
21
#include <cuda.h>
22
#include <memory>
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
#if CUDA_VERSION >= 10010
#include <cublasLt.h>
#endif

namespace megdnn {
namespace cuda {

/*!
 * \brief base class for matrix mul algos
 *
 */
class MatrixMulForwardImpl::AlgoBase : public Algorithm {
protected:
    ~AlgoBase() = default;

public:
39 40 41 42 43
    enum class AlgoType : uint32_t {
        CUDA_CUBLAS,
        CUDA_WMMA_UINT4X4X32,
        CUDA_CUBLASLT,
        CUDA_NAIVE,
44
        CUDA_BFLOAT16,
45
#if CUDA_VERSION >= 9020
46 47 48 49 50
        CUDA_FLOAT32_SIMT,
        CUDA_FLOAT32_SIMT_SPLIT_K,
        CUDA_FLOAT32_SIMT_GEMV_BATCHED_STRIDED,
        CUDA_FLOAT16_TENSOR_OP,
        CUDA_FLOAT16_TENSOR_OP_SPLIT_K,
51
#endif
52 53 54
    };
    using Mapper = std::unordered_map<AlgorithmDesc, AlgoBase*>;

55
    AlgoBase() : Algorithm() { m_handle_type = Handle::HandleType::CUDA; }
56 57 58 59 60
    struct SizeArgs {
        MatrixMulForwardImpl* opr;
        TensorLayout layout_a, layout_b, layout_c;

        std::string to_string() const;
61 62
        SizeArgs(MatrixMulForwardImpl* opr, const TensorLayout& A,
                 const TensorLayout& B, const TensorLayout& C);
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

        bool can_be_treated_as_int8x8x32() const {
            return layout_a.dtype.enumv() == layout_b.dtype.enumv() &&
                   (layout_a.dtype.enumv() == DTypeEnum::Int8 ||
                    layout_a.dtype.enumv() == DTypeEnum::QuantizedS8) &&
                   (layout_c.dtype.enumv() == DTypeEnum::Int32 ||
                    layout_c.dtype.enumv() == DTypeEnum::QuantizedS32) &&
                   opr->param().format == param::MatrixMul::Format::DEFAULT;
        }
    };
    struct ExecArgs : public SizeArgs {
        TensorND tensor_a, tensor_b, tensor_c;
        Workspace workspace;

        ExecArgs(MatrixMulForwardImpl* opr, _megdnn_tensor_in A,
                 _megdnn_tensor_in B, _megdnn_tensor_out C,
                 _megdnn_workspace workspace);
    };
    virtual bool is_available(const SizeArgs& args) const = 0;
    virtual size_t get_workspace_in_bytes(const SizeArgs& args) const = 0;
    virtual void exec(const ExecArgs& args) const = 0;

85
    bool is_available_wk(const SizeArgs& args, size_t limit) const {
86 87
        return is_available(args) && get_workspace_in_bytes(args) <= limit;
    }
88 89
    bool is_available_attribute(
            const SizeArgs& args,
90 91
            const AlgoAttribute& positive_attr = AlgoAttribute::REPRODUCIBLE,
            const AlgoAttribute& negative_attr = AlgoAttribute::DEFAULT,
92
            size_t limit = std::numeric_limits<size_t>::max()) const {
93 94 95
        return contain_attribute_all(positive_attr) &&
               !contain_attribute_any(negative_attr) &&
               is_available_wk(args, limit);
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
    }
    AlgoBase& check_workspace(const SizeArgs& args,
                              const Workspace& workspace) {
        auto req = get_workspace_in_bytes(args);
        megdnn_assert(
                req <= workspace.size,
                "matrix mul fwd algo %s: required workspace %zu bytes, got %zu",
                name(), req, workspace.size);
        return *this;
    }
};

class MatrixMulForwardImpl::AlgoCuBlas final : public AlgoBase {
public:
    AlgoCuBlas() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /* args */) const override {
        return 0_z;
    }
115
    const char* name() const override { return "CUBLAS"; }
116
    void exec(const ExecArgs& args) const override;
117
    MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLAS)
118
    AlgoAttribute attribute() const override {
119
        return AlgoAttribute::REPRODUCIBLE |
120 121
               AlgoAttribute::USABLE_DEPEND_ON_SHAPE |
               AlgoAttribute::ACCURACY_DEPEND_ON_BATCH;
122
    }
123 124 125 126 127 128 129 130
};

#if CUDA_VERSION >= 10000
class MatrixMulForwardImpl::AlgoUInt4x4x32WMMA final : public AlgoBase {
public:
    AlgoUInt4x4x32WMMA() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
131
    const char* name() const override { return "UINT4x4x32_WMMA"; }
132
    void exec(const ExecArgs& args) const override;
133
    MEGDNN_DECL_ALGO_TYPE(CUDA_WMMA_UINT4X4X32)
134 135 136
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
137 138 139 140 141 142 143
};
#endif
#if CUDA_VERSION >= 10010
class MatrixMulForwardImpl::AlgoCuBlasLt final : public AlgoBase {
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
144
    const char* name() const override { return "CUBLAS_LT"; }
145
    void exec(const ExecArgs& args) const override;
146
    MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLASLT)
147
    AlgoAttribute attribute() const override {
148 149
        return AlgoAttribute::REPRODUCIBLE |
               AlgoAttribute::ACCURACY_DEPEND_ON_BATCH;
150
    }
151 152 153 154 155 156 157 158 159 160 161 162
};
#endif

class MatrixMulForwardImpl::AlgoNaive final : public AlgoBase {
public:
    AlgoNaive() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /* args */) const override {
        return 0_z;
    }
    const char* name() const override { return "NAIVE"; }
    void exec(const ExecArgs& args) const override;
163
    MEGDNN_DECL_ALGO_TYPE(CUDA_NAIVE)
164
    AlgoAttribute attribute() const override {
165
        return AlgoAttribute::REPRODUCIBLE | AlgoAttribute::NAIVE;
166
    }
167 168
};

169 170 171 172 173 174
#if !MEGDNN_DISABLE_FLOAT16
class MatrixMulForwardImpl::AlgoBFloat16 final : public AlgoBase {
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;
175
    MEGDNN_DECL_ALGO_TYPE(CUDA_BFLOAT16)
176

177 178 179 180 181
    std::vector<SearchItem> get_subopr_list(
            const TensorLayoutArray& layouts,
            const OperatorBase* opr) const override;

    const char* name() const override { return "MATMUL_BFLOAT16"; }
182 183 184 185

    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
186 187 188 189 190 191

private:
    WorkspaceBundle get_workspace_bundle(void* ptr, const SizeArgs& args) const;
};
#endif

192
#if CUDA_VERSION >= 9020
193
class MatrixMulForwardImpl::AlgoCutlassMatrixMulBase : public AlgoBase {
194 195 196 197
public:
    struct AlgoParam {
        int threadblock_m, threadblock_n, threadblock_k;
        int warp_m, warp_n, warp_k;
198 199 200 201 202 203 204 205 206 207 208 209 210 211
        int instruction_m, instruction_n, instruction_k;
        AlgoParam(int threadblock_m_, int threadblock_n_, int threadblock_k_,
                  int warp_m_, int warp_n_, int warp_k_, int instruction_m_ = 1,
                  int instruction_n_ = 1, int instruction_k_ = 1)
                : threadblock_m{threadblock_m_},
                  threadblock_n{threadblock_n_},
                  threadblock_k{threadblock_k_},
                  warp_m{warp_m_},
                  warp_n{warp_n_},
                  warp_k{warp_k_},
                  instruction_m{instruction_m_},
                  instruction_n{instruction_n_},
                  instruction_k{instruction_k_} {}
        std::string to_string() const;
212
    };
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
    AlgoCutlassMatrixMulBase(AlgoParam algo_param) : m_algo_param{algo_param} {}
    void exec(const ExecArgs& args) const override;
    std::string param() const override {
        std::string ret;
        serialize_write_pod(m_algo_param, ret);
        return ret;
    }

protected:
    virtual int min_alignment_requirement() const = 0;
    virtual void do_exec(const ExecArgs& args) const = 0;
    std::pair<bool, TensorLayoutArray> construct_aligned_layouts(
            const SizeArgs& args) const;
    int max_alignment(const SizeArgs& args) const;
    AlgoParam m_algo_param;
};

class MatrixMulForwardImpl::AlgoFloat32SIMT final
        : public AlgoCutlassMatrixMulBase {
public:
233
    AlgoFloat32SIMT(AlgoParam algo_param)
234
            : AlgoCutlassMatrixMulBase{algo_param},
235 236 237 238 239
              m_name{ssprintf("CUTLASS_FLOAT32_SIMT_%s",
                              m_algo_param.to_string().c_str())} {}
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
240 241 242
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
243
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT32_SIMT)
244 245 246 247 248 249 250 251 252 253 254 255 256 257
    std::string param() const override {
        std::string ret;
        // FIXME: algo param compatible with old version, to avoid fastrun cache error
        struct AlgoParam_ {
            int threadblock_m, threadblock_n, threadblock_k;
            int warp_m, warp_n, warp_k;
        };
        AlgoParam_ algo_param{
                m_algo_param.threadblock_m, m_algo_param.threadblock_n,
                m_algo_param.threadblock_k, m_algo_param.warp_m,
                m_algo_param.warp_n,        m_algo_param.warp_k};
        serialize_write_pod(algo_param, ret);
        return ret;
    }
258 259

private:
260 261
    void do_exec(const ExecArgs& args) const override;
    int min_alignment_requirement() const override { return 1; }
262 263 264
    std::string m_name;
};

265 266
class MatrixMulForwardImpl::AlgoFloat32SIMTSplitK final
        : public AlgoCutlassMatrixMulBase {
267 268
public:
    AlgoFloat32SIMTSplitK(AlgoParam algo_param)
269
            : AlgoCutlassMatrixMulBase{algo_param},
270 271 272 273 274
              m_name{ssprintf("CUTLASS_FLOAT32_SIMT_SPLIT_K_%s",
                              m_algo_param.to_string().c_str())} {}
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
275
    AlgoAttribute attribute() const override {
276 277
        return AlgoAttribute::REPRODUCIBLE |
               AlgoAttribute::USABLE_DEPEND_ON_SHAPE;
278
    }
279
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT32_SIMT_SPLIT_K)
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
    std::string param() const override {
        std::string ret;
        // FIXME: algo param compatible with old version, to avoid fastrun cache
        // error
        struct AlgoParam_ {
            int threadblock_m, threadblock_n, threadblock_k;
            int warp_m, warp_n, warp_k;
        };
        AlgoParam_ algo_param{
                m_algo_param.threadblock_m, m_algo_param.threadblock_n,
                m_algo_param.threadblock_k, m_algo_param.warp_m,
                m_algo_param.warp_n,        m_algo_param.warp_k};
        serialize_write_pod(algo_param, ret);
        return ret;
    }
295 296

private:
297 298
    void do_exec(const ExecArgs& args) const override;
    int min_alignment_requirement() const override { return 1; }
299 300 301
    std::string m_name;
};

302 303 304 305 306 307 308 309 310 311 312
class MatrixMulForwardImpl::AlgoFloat32SIMTGemvBatchedStrided final
        : public AlgoBase {
public:
    AlgoFloat32SIMTGemvBatchedStrided(int threadblock_n)
            : m_threadblock_n{threadblock_n},
              m_name{ssprintf("CUTLASS_FLOAT32_SIMT_GEMV_BATCHED_STRIDED_%d",
                              m_threadblock_n)} {}
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
    void exec(const ExecArgs& args) const override;
313 314 315
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
316 317 318 319 320 321 322 323 324 325 326 327
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT32_SIMT_GEMV_BATCHED_STRIDED)

    std::string param() const override {
        std::string ret;
        serialize_write_pod(m_threadblock_n, ret);
        return ret;
    }

private:
    int m_threadblock_n;
    std::string m_name;
};
328

329
#if CUDA_VERSION >= 10020
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
class MatrixMulForwardImpl::AlgoFloat16TensorOp final
        : public AlgoCutlassMatrixMulBase {
public:
    AlgoFloat16TensorOp(AlgoParam algo_param)
            : AlgoCutlassMatrixMulBase{algo_param},
              m_name{ssprintf("CUTLASS_FLOAT16_TENSOR_OP_h%d%d%d_%s",
                              m_algo_param.instruction_m,
                              m_algo_param.instruction_n,
                              m_algo_param.instruction_k,
                              m_algo_param.to_string().c_str())} {}
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT16_TENSOR_OP)

private:
    void do_exec(const ExecArgs& args) const override;
    int min_alignment_requirement() const override { return 2; }
    std::string m_name;
};

class MatrixMulForwardImpl::AlgoFloat16TensorOpSplitK final
        : public AlgoCutlassMatrixMulBase {
public:
    AlgoFloat16TensorOpSplitK(AlgoParam algo_param)
            : AlgoCutlassMatrixMulBase{algo_param},
              m_name{ssprintf("CUTLASS_FLOAT16_TENSOR_OP_SPLIT_K_h%d%d%d_%s",
                              m_algo_param.instruction_m,
                              m_algo_param.instruction_n,
                              m_algo_param.instruction_k,
                              m_algo_param.to_string().c_str())} {}
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE |
               AlgoAttribute::USABLE_DEPEND_ON_SHAPE;
    }
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT16_TENSOR_OP_SPLIT_K)

private:
    void do_exec(const ExecArgs& args) const override;
    int min_alignment_requirement() const override { return 2; }
    std::string m_name;
};
378
#endif
379 380
#endif

381 382 383
class MatrixMulForwardImpl::AlgoPack : NonCopyableObj {
private:
    AlgoBase::Mapper m_all_algos_map;
384 385 386 387 388 389 390 391 392 393 394

public:
    AlgoPack();
    AlgoCuBlas cublas;
    AlgoNaive naive;
#if CUDA_VERSION >= 10000
    AlgoUInt4x4x32WMMA wmma_uint4x4x32;
#endif
#if CUDA_VERSION >= 10010
    AlgoCuBlasLt cublas_lt;
#endif
395
#if !MEGDNN_DISABLE_FLOAT16
396
    AlgoBFloat16 bfloat16;
397
#endif
398
#if CUDA_VERSION >= 9020
399
    std::vector<AlgoFloat32SIMT> simt_float32;
400
    std::vector<AlgoFloat32SIMTSplitK> simt_float32_split_k;
401 402
    std::vector<AlgoFloat32SIMTGemvBatchedStrided>
            simt_float32_gemv_batched_strided;
403
#if CUDA_VERSION >= 10020
404 405
    std::vector<AlgoFloat16TensorOp> tensorop_float16;
    std::vector<AlgoFloat16TensorOpSplitK> tensorop_float16_split_k;
406
#endif
407
#endif
408
    std::vector<AlgoBase*> all_algos;
409 410

    const AlgoBase::Mapper& all_algos_map() const { return m_all_algos_map; }
411
    void fill_cutlass_algos();
412 413 414 415 416 417
};

}  // namespace cuda
}  // namespace megdnn

// vim: syntax=cpp.doxygen