algos.h 16.5 KB
Newer Older
1 2 3 4
/**
 * \file dnn/src/cuda/matrix_mul/algos.h
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
9 10
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
11 12 13
 */

#pragma once
14 15 16
#include <cuda.h>
#include <memory>
#include <unordered_map>
17
#include "megdnn/oprs.h"
18 19
#include "src/common/algo_base.h"
#include "src/common/metahelper.h"
20 21 22 23
#include "src/common/utils.h"
#include "src/cuda/conv_bias/algo.h"
#include "src/cuda/conv_bias/opr_impl.h"
#include "src/cuda/matrix_mul/opr_impl.h"
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
#if CUDA_VERSION >= 10010
#include <cublasLt.h>
#endif
namespace megdnn {
namespace cuda {

/*!
 * \brief base class for matrix mul algos
 *
 */
class MatrixMulForwardImpl::AlgoBase : public Algorithm {
protected:
    ~AlgoBase() = default;

public:
39 40 41 42 43
    enum class AlgoType : uint32_t {
        CUDA_CUBLAS,
        CUDA_WMMA_UINT4X4X32,
        CUDA_CUBLASLT,
        CUDA_NAIVE,
44
        CUDA_BFLOAT16,
45
        CUDA_CONV1X1_CUDNN,
46
#if CUDA_VERSION >= 9020
47 48 49 50 51
        CUDA_FLOAT32_SIMT,
        CUDA_FLOAT32_SIMT_SPLIT_K,
        CUDA_FLOAT32_SIMT_GEMV_BATCHED_STRIDED,
        CUDA_FLOAT16_TENSOR_OP,
        CUDA_FLOAT16_TENSOR_OP_SPLIT_K,
52
#endif
53 54 55
    };
    using Mapper = std::unordered_map<AlgorithmDesc, AlgoBase*>;

56
    AlgoBase() : Algorithm() { m_handle_type = Handle::HandleType::CUDA; }
57 58 59 60 61
    struct SizeArgs {
        MatrixMulForwardImpl* opr;
        TensorLayout layout_a, layout_b, layout_c;

        std::string to_string() const;
62 63
        SizeArgs(MatrixMulForwardImpl* opr, const TensorLayout& A,
                 const TensorLayout& B, const TensorLayout& C);
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85

        bool can_be_treated_as_int8x8x32() const {
            return layout_a.dtype.enumv() == layout_b.dtype.enumv() &&
                   (layout_a.dtype.enumv() == DTypeEnum::Int8 ||
                    layout_a.dtype.enumv() == DTypeEnum::QuantizedS8) &&
                   (layout_c.dtype.enumv() == DTypeEnum::Int32 ||
                    layout_c.dtype.enumv() == DTypeEnum::QuantizedS32) &&
                   opr->param().format == param::MatrixMul::Format::DEFAULT;
        }
    };
    struct ExecArgs : public SizeArgs {
        TensorND tensor_a, tensor_b, tensor_c;
        Workspace workspace;

        ExecArgs(MatrixMulForwardImpl* opr, _megdnn_tensor_in A,
                 _megdnn_tensor_in B, _megdnn_tensor_out C,
                 _megdnn_workspace workspace);
    };
    virtual bool is_available(const SizeArgs& args) const = 0;
    virtual size_t get_workspace_in_bytes(const SizeArgs& args) const = 0;
    virtual void exec(const ExecArgs& args) const = 0;

86
    bool is_available_wk(const SizeArgs& args, size_t limit) const {
87 88
        return is_available(args) && get_workspace_in_bytes(args) <= limit;
    }
89 90
    bool is_available_attribute(
            const SizeArgs& args,
91 92
            const AlgoAttribute& positive_attr = AlgoAttribute::REPRODUCIBLE,
            const AlgoAttribute& negative_attr = AlgoAttribute::DEFAULT,
93
            size_t limit = std::numeric_limits<size_t>::max()) const {
94 95 96
        return contain_attribute_all(positive_attr) &&
               !contain_attribute_any(negative_attr) &&
               is_available_wk(args, limit);
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
    }
    AlgoBase& check_workspace(const SizeArgs& args,
                              const Workspace& workspace) {
        auto req = get_workspace_in_bytes(args);
        megdnn_assert(
                req <= workspace.size,
                "matrix mul fwd algo %s: required workspace %zu bytes, got %zu",
                name(), req, workspace.size);
        return *this;
    }
};

class MatrixMulForwardImpl::AlgoCuBlas final : public AlgoBase {
public:
    AlgoCuBlas() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /* args */) const override {
        return 0_z;
    }
116
    const char* name() const override { return "CUBLAS"; }
117
    void exec(const ExecArgs& args) const override;
118
    MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLAS)
119
    AlgoAttribute attribute() const override {
120
        return AlgoAttribute::REPRODUCIBLE |
121 122
               AlgoAttribute::USABLE_DEPEND_ON_SHAPE |
               AlgoAttribute::ACCURACY_DEPEND_ON_BATCH;
123
    }
124 125 126 127 128 129 130 131
};

#if CUDA_VERSION >= 10000
class MatrixMulForwardImpl::AlgoUInt4x4x32WMMA final : public AlgoBase {
public:
    AlgoUInt4x4x32WMMA() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
132
    const char* name() const override { return "UINT4x4x32_WMMA"; }
133
    void exec(const ExecArgs& args) const override;
134
    MEGDNN_DECL_ALGO_TYPE(CUDA_WMMA_UINT4X4X32)
135 136 137
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
138 139 140 141 142 143 144
};
#endif
#if CUDA_VERSION >= 10010
class MatrixMulForwardImpl::AlgoCuBlasLt final : public AlgoBase {
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
145
    const char* name() const override { return "CUBLAS_LT"; }
146
    void exec(const ExecArgs& args) const override;
147
    MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLASLT)
148
    AlgoAttribute attribute() const override {
149 150
        return AlgoAttribute::REPRODUCIBLE |
               AlgoAttribute::ACCURACY_DEPEND_ON_BATCH;
151
    }
152 153 154 155 156 157 158 159 160 161 162 163
};
#endif

class MatrixMulForwardImpl::AlgoNaive final : public AlgoBase {
public:
    AlgoNaive() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /* args */) const override {
        return 0_z;
    }
    const char* name() const override { return "NAIVE"; }
    void exec(const ExecArgs& args) const override;
164
    MEGDNN_DECL_ALGO_TYPE(CUDA_NAIVE)
165
    AlgoAttribute attribute() const override {
166
        return AlgoAttribute::REPRODUCIBLE | AlgoAttribute::NAIVE;
167
    }
168 169
};

170 171 172 173 174 175
#if !MEGDNN_DISABLE_FLOAT16
class MatrixMulForwardImpl::AlgoBFloat16 final : public AlgoBase {
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;
176
    MEGDNN_DECL_ALGO_TYPE(CUDA_BFLOAT16)
177

178 179 180 181 182
    std::vector<SearchItem> get_subopr_list(
            const TensorLayoutArray& layouts,
            const OperatorBase* opr) const override;

    const char* name() const override { return "MATMUL_BFLOAT16"; }
183 184 185 186

    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
187 188 189 190 191 192

private:
    WorkspaceBundle get_workspace_bundle(void* ptr, const SizeArgs& args) const;
};
#endif

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
class MatrixMulForwardImpl::AlgoConv1X1CUDNN final : public AlgoBase {
public:
    AlgoConv1X1CUDNN(cudnnConvolutionFwdAlgo_t algo_enum) {
        m_impl = std::make_unique<ConvBiasForwardImpl::AlgoCUDNNConv>(
                ConvBiasForwardImpl::AlgoCUDNNConv(algo_enum));
        std::string algoname(m_impl.get()->name());
        m_name = "MATMUL_CONV1X1:" + algoname;
    }
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
    void exec(const ExecArgs& args) const override;
    AlgoAttribute attribute() const override {
        auto ret = AlgoAttribute::DEFAULT;
#define cb(attr)                                     \
    if (m_impl.get()->contain_attribute_all(attr)) { \
        ret |= attr;                                 \
    }
        MEGDNN_FOREACH_ALGO_ATTRIBUTE_INHERITABLE(cb)
#undef cb
        if (m_impl.get()->contain_attribute_all(AlgoAttribute::REPRODUCIBLE)) {
            ret |= AlgoAttribute::REPRODUCIBLE;
        }
        return ret;
    }
    MEGDNN_DECL_ALGO_TYPE(CUDA_CONV1X1_CUDNN)
private:
    std::unique_ptr<ConvBiasForwardImpl::AlgoCUDNNConv> m_impl;
    std::string m_name;
    WorkspaceBundle get_workspace_bundle(void* ptr, const SizeArgs& args) const;
};

225
#if CUDA_VERSION >= 9020
226
class MatrixMulForwardImpl::AlgoCutlassMatrixMulBase : public AlgoBase {
227 228 229 230
public:
    struct AlgoParam {
        int threadblock_m, threadblock_n, threadblock_k;
        int warp_m, warp_n, warp_k;
231 232 233 234 235 236 237 238 239 240 241 242 243 244
        int instruction_m, instruction_n, instruction_k;
        AlgoParam(int threadblock_m_, int threadblock_n_, int threadblock_k_,
                  int warp_m_, int warp_n_, int warp_k_, int instruction_m_ = 1,
                  int instruction_n_ = 1, int instruction_k_ = 1)
                : threadblock_m{threadblock_m_},
                  threadblock_n{threadblock_n_},
                  threadblock_k{threadblock_k_},
                  warp_m{warp_m_},
                  warp_n{warp_n_},
                  warp_k{warp_k_},
                  instruction_m{instruction_m_},
                  instruction_n{instruction_n_},
                  instruction_k{instruction_k_} {}
        std::string to_string() const;
245
    };
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
    AlgoCutlassMatrixMulBase(AlgoParam algo_param) : m_algo_param{algo_param} {}
    void exec(const ExecArgs& args) const override;
    std::string param() const override {
        std::string ret;
        serialize_write_pod(m_algo_param, ret);
        return ret;
    }

protected:
    virtual int min_alignment_requirement() const = 0;
    virtual void do_exec(const ExecArgs& args) const = 0;
    std::pair<bool, TensorLayoutArray> construct_aligned_layouts(
            const SizeArgs& args) const;
    int max_alignment(const SizeArgs& args) const;
    AlgoParam m_algo_param;
};

class MatrixMulForwardImpl::AlgoFloat32SIMT final
        : public AlgoCutlassMatrixMulBase {
public:
266
    AlgoFloat32SIMT(AlgoParam algo_param)
267
            : AlgoCutlassMatrixMulBase{algo_param},
268 269 270
              m_name{ssprintf("CUTLASS_FLOAT32_SIMT_%s",
                              m_algo_param.to_string().c_str())} {}
    bool is_available(const SizeArgs& args) const override;
271

272 273
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
274 275 276
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
277
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT32_SIMT)
278 279
    std::string param() const override {
        std::string ret;
280 281
        // FIXME: algo param compatible with old version, to avoid fastrun cache
        // error
282 283 284 285 286 287 288 289 290 291 292
        struct AlgoParam_ {
            int threadblock_m, threadblock_n, threadblock_k;
            int warp_m, warp_n, warp_k;
        };
        AlgoParam_ algo_param{
                m_algo_param.threadblock_m, m_algo_param.threadblock_n,
                m_algo_param.threadblock_k, m_algo_param.warp_m,
                m_algo_param.warp_n,        m_algo_param.warp_k};
        serialize_write_pod(algo_param, ret);
        return ret;
    }
293 294

private:
295 296
    void do_exec(const ExecArgs& args) const override;
    int min_alignment_requirement() const override { return 1; }
297
    std::string m_name;
298
    const void* get_available_op(const SizeArgs& args) const;
299 300
};

301 302
class MatrixMulForwardImpl::AlgoFloat32SIMTSplitK final
        : public AlgoCutlassMatrixMulBase {
303 304
public:
    AlgoFloat32SIMTSplitK(AlgoParam algo_param)
305
            : AlgoCutlassMatrixMulBase{algo_param},
306 307 308
              m_name{ssprintf("CUTLASS_FLOAT32_SIMT_SPLIT_K_%s",
                              m_algo_param.to_string().c_str())} {}
    bool is_available(const SizeArgs& args) const override;
309

310 311
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
312
    AlgoAttribute attribute() const override {
313 314
        return AlgoAttribute::REPRODUCIBLE |
               AlgoAttribute::USABLE_DEPEND_ON_SHAPE;
315
    }
316
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT32_SIMT_SPLIT_K)
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
    std::string param() const override {
        std::string ret;
        // FIXME: algo param compatible with old version, to avoid fastrun cache
        // error
        struct AlgoParam_ {
            int threadblock_m, threadblock_n, threadblock_k;
            int warp_m, warp_n, warp_k;
        };
        AlgoParam_ algo_param{
                m_algo_param.threadblock_m, m_algo_param.threadblock_n,
                m_algo_param.threadblock_k, m_algo_param.warp_m,
                m_algo_param.warp_n,        m_algo_param.warp_k};
        serialize_write_pod(algo_param, ret);
        return ret;
    }
332 333

private:
334 335
    void do_exec(const ExecArgs& args) const override;
    int min_alignment_requirement() const override { return 1; }
336
    std::string m_name;
337
    const void* get_available_op(const SizeArgs& args) const;
338 339
};

340 341 342 343 344 345 346 347 348 349 350
class MatrixMulForwardImpl::AlgoFloat32SIMTGemvBatchedStrided final
        : public AlgoBase {
public:
    AlgoFloat32SIMTGemvBatchedStrided(int threadblock_n)
            : m_threadblock_n{threadblock_n},
              m_name{ssprintf("CUTLASS_FLOAT32_SIMT_GEMV_BATCHED_STRIDED_%d",
                              m_threadblock_n)} {}
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
    void exec(const ExecArgs& args) const override;
351 352 353
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
354 355 356 357 358 359 360 361 362 363 364 365
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT32_SIMT_GEMV_BATCHED_STRIDED)

    std::string param() const override {
        std::string ret;
        serialize_write_pod(m_threadblock_n, ret);
        return ret;
    }

private:
    int m_threadblock_n;
    std::string m_name;
};
366

367
#if CUDA_VERSION >= 10020
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
class MatrixMulForwardImpl::AlgoFloat16TensorOp final
        : public AlgoCutlassMatrixMulBase {
public:
    AlgoFloat16TensorOp(AlgoParam algo_param)
            : AlgoCutlassMatrixMulBase{algo_param},
              m_name{ssprintf("CUTLASS_FLOAT16_TENSOR_OP_h%d%d%d_%s",
                              m_algo_param.instruction_m,
                              m_algo_param.instruction_n,
                              m_algo_param.instruction_k,
                              m_algo_param.to_string().c_str())} {}
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT16_TENSOR_OP)

private:
    void do_exec(const ExecArgs& args) const override;
    int min_alignment_requirement() const override { return 2; }
    std::string m_name;
};

class MatrixMulForwardImpl::AlgoFloat16TensorOpSplitK final
        : public AlgoCutlassMatrixMulBase {
public:
    AlgoFloat16TensorOpSplitK(AlgoParam algo_param)
            : AlgoCutlassMatrixMulBase{algo_param},
              m_name{ssprintf("CUTLASS_FLOAT16_TENSOR_OP_SPLIT_K_h%d%d%d_%s",
                              m_algo_param.instruction_m,
                              m_algo_param.instruction_n,
                              m_algo_param.instruction_k,
                              m_algo_param.to_string().c_str())} {}
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE |
               AlgoAttribute::USABLE_DEPEND_ON_SHAPE;
    }
    MEGDNN_DECL_ALGO_TYPE(CUDA_FLOAT16_TENSOR_OP_SPLIT_K)

private:
    void do_exec(const ExecArgs& args) const override;
    int min_alignment_requirement() const override { return 2; }
    std::string m_name;
};
416
#endif
417 418
#endif

419 420 421
class MatrixMulForwardImpl::AlgoPack : NonCopyableObj {
private:
    AlgoBase::Mapper m_all_algos_map;
422 423 424 425 426 427 428 429 430 431 432

public:
    AlgoPack();
    AlgoCuBlas cublas;
    AlgoNaive naive;
#if CUDA_VERSION >= 10000
    AlgoUInt4x4x32WMMA wmma_uint4x4x32;
#endif
#if CUDA_VERSION >= 10010
    AlgoCuBlasLt cublas_lt;
#endif
433
#if !MEGDNN_DISABLE_FLOAT16
434
    AlgoBFloat16 bfloat16;
435
#endif
436
#if CUDA_VERSION >= 9020
437
    std::vector<AlgoFloat32SIMT> simt_float32;
438
    std::vector<AlgoFloat32SIMTSplitK> simt_float32_split_k;
439 440
    std::vector<AlgoFloat32SIMTGemvBatchedStrided>
            simt_float32_gemv_batched_strided;
441
#if CUDA_VERSION >= 10020
442 443
    std::vector<AlgoFloat16TensorOp> tensorop_float16;
    std::vector<AlgoFloat16TensorOpSplitK> tensorop_float16_split_k;
444
#endif
445
#endif
446
    std::vector<AlgoConv1X1CUDNN> conv1x1;
447
    std::vector<AlgoBase*> all_algos;
448 449

    const AlgoBase::Mapper& all_algos_map() const { return m_all_algos_map; }
450
    void fill_cutlass_algos();
451 452 453 454 455 456
};

}  // namespace cuda
}  // namespace megdnn

// vim: syntax=cpp.doxygen