algo.h 6.3 KB
Newer Older
1 2 3 4
/**
 * \file dnn/src/cuda/batched_matrix_mul/algo.h
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 */

#pragma once
#include <cuda.h>
#include "megdnn/dtype.h"
#include "megdnn/oprs.h"
#include "src/common/utils.h"
#include "src/cuda/batched_matrix_mul/opr_impl.h"
#include "src/cuda/matrix_mul/cublasLt_wrapper.h"
19 20
#include "src/common/metahelper.h"

21 22 23 24 25 26 27 28 29 30 31 32
#if CUDA_VERSION >= 10010
#include <cublasLt.h>
#endif

namespace megdnn {
namespace cuda {

class BatchedMatrixMulForwardImpl::AlgoBase : public Algorithm {
protected:
    ~AlgoBase() = default;

public:
33 34 35 36 37 38 39 40
    enum class AlgoType : uint32_t {
        CUDA_BRUTE_FORCE,
        CUDA_CUBLAS,
        CUDA_CUBLASLT,
        CUDA_INT8X8X32,
    };
    using Mapper = std::unordered_map<AlgorithmDesc, AlgoBase*>;

41
    AlgoBase() : Algorithm() { m_handle_type = Handle::HandleType::CUDA; }
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
    struct SizeArgs {
        BatchedMatrixMulForwardImpl* opr;
        TensorLayout layout_a, layout_b, layout_c;
        std::string to_string() const;
        SizeArgs(BatchedMatrixMulForwardImpl* o, const TensorLayout& A,
                 const TensorLayout& B, const TensorLayout& C);
        bool can_be_treated_as_int8x8x32() const {
            return layout_a.dtype.enumv() == layout_b.dtype.enumv() &&
                   (layout_a.dtype.enumv() == DTypeEnum::Int8 ||
                    layout_a.dtype.enumv() == DTypeEnum::QuantizedS8) &&
                   (layout_c.dtype.enumv() == DTypeEnum::Int32 ||
                    layout_c.dtype.enumv() == DTypeEnum::QuantizedS32) &&
                   opr->param().format == param::MatrixMul::Format::DEFAULT;
        }
    };
    struct ExecArgs : public SizeArgs {
        TensorND tensor_a, tensor_b, tensor_c;
        Workspace workspace;
        ExecArgs(BatchedMatrixMulForwardImpl* o, _megdnn_tensor_in A,
                 _megdnn_tensor_in B, _megdnn_tensor_in C,
                 _megdnn_workspace workspace);
    };
    virtual bool is_available(const SizeArgs& args) const = 0;
    virtual size_t get_workspace_in_bytes(const SizeArgs& args) const = 0;
    virtual void exec(const ExecArgs& args) const = 0;
    virtual const char* name() const = 0;
    bool is_available_wk(const SizeArgs& args, size_t limit) {
        return is_available(args) && get_workspace_in_bytes(args) <= limit;
    }
71 72
    bool is_available_attribute(
            const SizeArgs& args,
73 74
            const AlgoAttribute& positive_attr = AlgoAttribute::REPRODUCIBLE,
            const AlgoAttribute& negative_attr = AlgoAttribute::DEFAULT,
75
            size_t limit = std::numeric_limits<size_t>::max()) {
76 77 78
        return contain_attribute_all(positive_attr) &&
               !contain_attribute_any(negative_attr) &&
               is_available_wk(args, limit);
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
    }
    AlgoBase& check_workspace(const SizeArgs& args,
                              const Workspace& workspace) {
        auto req = get_workspace_in_bytes(args);
        megdnn_assert(req <= workspace.size,
                      "batched matrix mul fwd algo %s: required workspace %zu "
                      "bytes, got %zu",
                      name(), req, workspace.size);
        return *this;
    }
};
class BatchedMatrixMulForwardImpl::AlgoBruteForce final
        : public BatchedMatrixMulForwardImpl::AlgoBase {
    using Param = MatrixMulForward::Param;
private:
    WorkspaceBundle get_workspace_bundle();

public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
    void exec(const ExecArgs& args) const final;
100 101 102
    AlgoAttribute attribute()const override{
        return AlgoAttribute::REPRODUCIBLE;
    }
103
    const char* name() const override { return "BRUTE_FORCE"; }
104 105
    MEGDNN_DECL_ALGO_TYPE(CUDA_BRUTE_FORCE)

106 107 108
    std::vector<SearchItem> get_subopr_list(
            const TensorLayoutArray& layouts,
            const OperatorBase* opr) const override;
109 110 111 112 113 114 115 116
};
class BatchedMatrixMulForwardImpl::AlgoCublas final
        : public BatchedMatrixMulForwardImpl::AlgoBase {
public:
    AlgoCublas() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
    void exec(const ExecArgs& args) const final;
117
    AlgoAttribute attribute() const override {
118 119
        return AlgoAttribute::REPRODUCIBLE |
               AlgoAttribute::ACCURACY_DEPEND_ON_BATCH;
120
    }
121
    const char* name() const override { return "CUBLAS"; }
122
    MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLAS)
123 124 125 126 127 128 129 130
};
#if CUDA_VERSION >= 10010
class BatchedMatrixMulForwardImpl::AlgoCublasLt final : public AlgoBase {
public:
    AlgoCublasLt() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
    void exec(const ExecArgs& args) const final;
131
    AlgoAttribute attribute() const override {
132 133
        return AlgoAttribute::REPRODUCIBLE |
               AlgoAttribute::ACCURACY_DEPEND_ON_BATCH;
134
    }
135
    const char* name() const override { return "CUBLAS_LT"; }
136
    MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLASLT)
137 138 139 140 141 142 143 144 145
};
#endif
class BatchedMatrixMulForwardImpl::AlgoInt8x8x32 final
        : public BatchedMatrixMulForwardImpl::AlgoBase {
public:
    AlgoInt8x8x32() = default;
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
    void exec(const ExecArgs& args) const final;
146 147 148
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
149
    const char* name() const override { return "INT8x8x32"; }
150
    MEGDNN_DECL_ALGO_TYPE(CUDA_INT8X8X32)
151
};
152 153 154 155

class BatchedMatrixMulForwardImpl::AlgoPack : NonCopyableObj {
private:
    AlgoBase::Mapper m_all_algos_map;
156 157 158 159 160 161 162 163 164 165 166
    MatrixMulForwardImpl::AlgoPack mm_pack;

public:
    AlgoPack();

    AlgoCublas cublas;
#if CUDA_VERSION >= 10010
    AlgoCublasLt cublasLt;
#endif
    AlgoInt8x8x32 int8x8x32;
    std::vector<AlgoBase*> all_algos;
167
    AlgoBruteForce brute_force;
168 169

    const AlgoBase::Mapper& all_algos_map() const { return m_all_algos_map; }
170 171 172
};
}  // namespace cuda
}  // namespace megdnn