algo.h 6.5 KB
Newer Older
1 2 3 4
#pragma once
#include <cuda.h>
#include "megdnn/dtype.h"
#include "megdnn/oprs.h"
M
Megvii Engine Team 已提交
5
#include "src/common/metahelper.h"
6 7 8
#include "src/common/utils.h"
#include "src/cuda/batched_matrix_mul/opr_impl.h"
#include "src/cuda/matrix_mul/cublasLt_wrapper.h"
9

10 11 12 13 14 15 16 17 18 19 20 21
#if CUDA_VERSION >= 10010
#include <cublasLt.h>
#endif

namespace megdnn {
namespace cuda {

class BatchedMatrixMulForwardImpl::AlgoBase : public Algorithm {
protected:
    ~AlgoBase() = default;

public:
22 23 24 25 26
    enum class AlgoType : uint32_t {
        CUDA_BRUTE_FORCE,
        CUDA_CUBLAS,
        CUDA_CUBLASLT,
        CUDA_INT8X8X32,
M
Megvii Engine Team 已提交
27
        CUDA_NAIVE_BMM,
28 29 30
    };
    using Mapper = std::unordered_map<AlgorithmDesc, AlgoBase*>;

31
    AlgoBase() : Algorithm() { m_handle_type = Handle::HandleType::CUDA; }
32 33 34 35
    struct SizeArgs {
        BatchedMatrixMulForwardImpl* opr;
        TensorLayout layout_a, layout_b, layout_c;
        std::string to_string() const;
M
Megvii Engine Team 已提交
36 37 38
        SizeArgs(
                BatchedMatrixMulForwardImpl* o, const TensorLayout& A,
                const TensorLayout& B, const TensorLayout& C);
39 40 41 42 43 44 45 46 47 48 49 50
        bool can_be_treated_as_int8x8x32() const {
            return layout_a.dtype.enumv() == layout_b.dtype.enumv() &&
                   (layout_a.dtype.enumv() == DTypeEnum::Int8 ||
                    layout_a.dtype.enumv() == DTypeEnum::QuantizedS8) &&
                   (layout_c.dtype.enumv() == DTypeEnum::Int32 ||
                    layout_c.dtype.enumv() == DTypeEnum::QuantizedS32) &&
                   opr->param().format == param::MatrixMul::Format::DEFAULT;
        }
    };
    struct ExecArgs : public SizeArgs {
        TensorND tensor_a, tensor_b, tensor_c;
        Workspace workspace;
M
Megvii Engine Team 已提交
51 52 53
        ExecArgs(
                BatchedMatrixMulForwardImpl* o, _megdnn_tensor_in A,
                _megdnn_tensor_in B, _megdnn_tensor_in C, _megdnn_workspace workspace);
54 55 56 57 58 59 60 61
    };
    virtual bool is_available(const SizeArgs& args) const = 0;
    virtual size_t get_workspace_in_bytes(const SizeArgs& args) const = 0;
    virtual void exec(const ExecArgs& args) const = 0;
    virtual const char* name() const = 0;
    bool is_available_wk(const SizeArgs& args, size_t limit) {
        return is_available(args) && get_workspace_in_bytes(args) <= limit;
    }
62 63
    bool is_available_attribute(
            const SizeArgs& args,
64 65
            const AlgoAttribute& positive_attr = AlgoAttribute::REPRODUCIBLE,
            const AlgoAttribute& negative_attr = AlgoAttribute::DEFAULT,
66
            size_t limit = std::numeric_limits<size_t>::max()) {
67
        return contain_attribute_all(positive_attr) &&
M
Megvii Engine Team 已提交
68
               !contain_attribute_any(negative_attr) && is_available_wk(args, limit);
69
    }
M
Megvii Engine Team 已提交
70
    AlgoBase& check_workspace(const SizeArgs& args, const Workspace& workspace) {
71
        auto req = get_workspace_in_bytes(args);
M
Megvii Engine Team 已提交
72 73 74 75 76
        megdnn_assert(
                req <= workspace.size,
                "batched matrix mul fwd algo %s: required workspace %zu "
                "bytes, got %zu",
                name(), req, workspace.size);
77 78 79 80 81 82
        return *this;
    }
};
class BatchedMatrixMulForwardImpl::AlgoBruteForce final
        : public BatchedMatrixMulForwardImpl::AlgoBase {
    using Param = MatrixMulForward::Param;
M
Megvii Engine Team 已提交
83

84 85 86 87 88 89 90
private:
    WorkspaceBundle get_workspace_bundle();

public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
    void exec(const ExecArgs& args) const final;
M
Megvii Engine Team 已提交
91
    AlgoAttribute attribute() const override { return AlgoAttribute::REPRODUCIBLE; }
92
    const char* name() const override { return "BRUTE_FORCE"; }
93 94
    MEGDNN_DECL_ALGO_TYPE(CUDA_BRUTE_FORCE)

95
    std::vector<SearchItem> get_subopr_list(
M
Megvii Engine Team 已提交
96
            const TensorLayoutArray& layouts, const OperatorBase* opr) const override;
97
};
M
Megvii Engine Team 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118

class BatchedMatrixMulForwardImpl::AlgoNaive final
        : public BatchedMatrixMulForwardImpl::AlgoBase {
    using Param = MatrixMulForward::Param;

private:
    WorkspaceBundle get_workspace_bundle();

public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override {
        return 0;
    };
    void exec(const ExecArgs& args) const final;
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE | AlgoAttribute::NAIVE;
    }
    const char* name() const override { return "NAIVE_BMM"; }
    MEGDNN_DECL_ALGO_TYPE(CUDA_NAIVE_BMM)
};

119 120 121 122 123 124 125
class BatchedMatrixMulForwardImpl::AlgoCublas final
        : public BatchedMatrixMulForwardImpl::AlgoBase {
public:
    AlgoCublas() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
    void exec(const ExecArgs& args) const final;
126
    AlgoAttribute attribute() const override {
M
Megvii Engine Team 已提交
127
        return AlgoAttribute::REPRODUCIBLE | AlgoAttribute::ACCURACY_DEPEND_ON_BATCH;
128
    }
129
    const char* name() const override { return "CUBLAS"; }
130
    MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLAS)
131 132 133 134 135 136 137 138
};
#if CUDA_VERSION >= 10010
class BatchedMatrixMulForwardImpl::AlgoCublasLt final : public AlgoBase {
public:
    AlgoCublasLt() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
    void exec(const ExecArgs& args) const final;
139
    AlgoAttribute attribute() const override {
M
Megvii Engine Team 已提交
140
        return AlgoAttribute::REPRODUCIBLE | AlgoAttribute::ACCURACY_DEPEND_ON_BATCH;
141
    }
142
    const char* name() const override { return "CUBLAS_LT"; }
143
    MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLASLT)
144 145 146 147 148 149 150 151 152
};
#endif
class BatchedMatrixMulForwardImpl::AlgoInt8x8x32 final
        : public BatchedMatrixMulForwardImpl::AlgoBase {
public:
    AlgoInt8x8x32() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
    void exec(const ExecArgs& args) const final;
M
Megvii Engine Team 已提交
153
    AlgoAttribute attribute() const override { return AlgoAttribute::REPRODUCIBLE; }
154
    const char* name() const override { return "INT8x8x32"; }
155
    MEGDNN_DECL_ALGO_TYPE(CUDA_INT8X8X32)
156
};
157 158 159 160

class BatchedMatrixMulForwardImpl::AlgoPack : NonCopyableObj {
private:
    AlgoBase::Mapper m_all_algos_map;
161 162 163 164 165 166 167 168 169 170 171
    MatrixMulForwardImpl::AlgoPack mm_pack;

public:
    AlgoPack();

    AlgoCublas cublas;
#if CUDA_VERSION >= 10010
    AlgoCublasLt cublasLt;
#endif
    AlgoInt8x8x32 int8x8x32;
    std::vector<AlgoBase*> all_algos;
172
    AlgoBruteForce brute_force;
M
Megvii Engine Team 已提交
173
    AlgoNaive naive_bmm;
174 175

    const AlgoBase::Mapper& all_algos_map() const { return m_all_algos_map; }
176 177 178
};
}  // namespace cuda
}  // namespace megdnn