algo.h 13.0 KB
Newer Older
1 2 3 4
/**
 * \file dnn/src/cuda/convolution/backward_data/algo.h
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
9 10
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
11 12 13 14 15
 */

#pragma once

#include <unordered_map>
16 17 18 19
#include "src/common/algo_base.h"
#include "src/common/metahelper.h"
#include "src/cuda/convolution/helper.h"
#include "src/cuda/cudnn_wrapper.h"
20 21 22 23 24 25 26 27 28 29

namespace megdnn {
namespace cuda {

/*!
 * \brief base class for convolution algos
 *
 * All the algo impls should try to support non-contiguous batch dim, for group
 * conv execution.
 */
30 31 32
class ConvolutionBackwardDataImpl::AlgoBase : public Algorithm {
protected:
    ~AlgoBase() = default;
33

34 35 36 37 38 39 40 41
public:
    enum class AlgoType : uint32_t {
        CUDA_CUDNN,
        CUDA_MATMUL,
        CUDA_CHANWISE,
        CUDA_CHANWISE_SMALL,
        CUDA_BFLOAT16,
        CUDA_GROUP_CONV_GENERAL,
42
        CUDA_IMPLICIT_GEMM_NCHW4_DOTPROD_INT8,
43 44
        CUDA_IMPLICIT_GEMM_NCHW_DOTPROD_INT8,
        CUDA_IMPLICIT_GEMM_NHWC_IMMA_INT8
45 46 47 48 49 50 51 52
    };
    using Mapper = std::unordered_map<AlgorithmDesc, AlgoBase*>;

    AlgoBase() : Algorithm() { m_handle_type = Handle::HandleType::CUDA; }
    struct SizeArgs {
        HandleImpl* handle;
        CanonizedFilterMeta filter_meta;
        const TensorLayout *diff_layout, *grad_layout, *filter_layout;
53
        const ConvolutionBackwardDataImpl* opr;
54 55 56 57

        std::string to_string() const;
        void init_desc(convolution::CUDNNBwdDataDescs& desc) const {
            desc.set(filter_meta, *diff_layout, *grad_layout, opr->param());
58
        }
M
Megvii Engine Team 已提交
59 60 61 62 63 64 65
        SizeArgs(
                const ConvolutionBackwardDataImpl* opr, const TensorLayout& filter,
                const TensorLayout& diff, const TensorLayout& grad);
        SizeArgs(
                const ConvolutionBackwardDataImpl* opr, const TensorLayout& filter,
                const CanonizedFilterMeta& filter_meta, const TensorLayout& diff,
                const TensorLayout& grad);
66 67

        convolution::ForwardSizeArgs as_fwd_args() const {
M
Megvii Engine Team 已提交
68
            return {handle, grad_layout, filter_layout, filter_meta, diff_layout};
69
        }
70 71 72 73 74
    };
    struct ExecArgs : public SizeArgs {
        const TensorND *filter_tensor, *diff_tensor, *grad_tensor;
        Workspace workspace;

M
Megvii Engine Team 已提交
75 76 77 78
        ExecArgs(
                const ConvolutionBackwardDataImpl* opr, _megdnn_tensor_in filter,
                _megdnn_tensor_in diff, _megdnn_tensor_out grad,
                _megdnn_workspace workspace);
79 80 81 82 83 84 85 86 87
    };
    virtual bool is_available(const SizeArgs& args) const = 0;
    virtual size_t get_workspace_in_bytes(const SizeArgs& args) const = 0;
    virtual void exec(const ExecArgs& args) const = 0;

    bool is_available_wk(const SizeArgs& args, size_t limit) {
        return is_available(args) && get_workspace_in_bytes(args) <= limit;
    }

88 89
    bool is_available_attribute(
            const SizeArgs& args,
90 91
            const AlgoAttribute& positive_attr = AlgoAttribute::REPRODUCIBLE,
            const AlgoAttribute& negative_attr = AlgoAttribute::DEFAULT,
92
            size_t limit = std::numeric_limits<size_t>::max()) {
93
        return contain_attribute_all(positive_attr) &&
M
Megvii Engine Team 已提交
94
               !contain_attribute_any(negative_attr) && is_available_wk(args, limit);
95 96
    }

M
Megvii Engine Team 已提交
97
    AlgoBase& check_workspace(const SizeArgs& args, const Workspace& workspace) {
98
        auto req = get_workspace_in_bytes(args);
M
Megvii Engine Team 已提交
99 100 101 102 103
        megdnn_assert(
                req <= workspace.size,
                "conv bwd data algo %s: "
                "required workspace %zu bytes, got %zu",
                name(), req, workspace.size);
104 105 106 107
        return *this;
    }

    virtual bool is_cudnn() const { return false; }
108 109 110 111
};

class ConvolutionBackwardDataImpl::AlgoCUDNN final : public AlgoBase {
    cudnnConvolutionBwdDataAlgo_t m_cudnn_enum;
112
    CudnnAlgoPack::Attr m_attr;
113

114
public:
M
Megvii Engine Team 已提交
115 116 117 118
    AlgoCUDNN(cudnnConvolutionBwdDataAlgo_t cudnn_enum) : m_cudnn_enum(cudnn_enum) {
        megdnn_assert(
                CudnnAlgoPack::conv_bwd_data_algos().find(cudnn_enum) !=
                CudnnAlgoPack::conv_bwd_data_algos().end());
119 120
        m_attr = CudnnAlgoPack::conv_bwd_data_algos().at(cudnn_enum);
    }
121

122 123 124
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;
125

126
    const char* name() const override { return m_attr.name.c_str(); }
127 128 129 130 131
    AlgoAttribute attribute() const override {
        auto ret = static_cast<AlgoAttribute>(0);
        if (m_attr.is_reproducible) {
            ret |= AlgoAttribute::REPRODUCIBLE;
        }
132 133 134
        if (m_attr.accuracy_depend_on_batch) {
            ret |= AlgoAttribute::ACCURACY_DEPEND_ON_BATCH;
        }
135 136
        return ret;
    }
137
    cudnnConvolutionBwdDataAlgo_t cudnn_enum() const { return m_cudnn_enum; }
138

139 140
    bool is_cudnn() const override { return true; }
    MEGDNN_DECL_ALGO_TYPE(CUDA_CUDNN)
141

142 143 144 145 146
    std::string param() const override {
        std::string ret;
        serialize_write_pod(m_cudnn_enum, ret);
        return ret;
    }
147 148 149
};

//! im2col and matmul, with dilation
150 151 152
class ConvolutionBackwardDataImpl::AlgoMatmul final : public AlgoBase {
    template <typename T>
    static void exec_internal(const ExecArgs& args);
153

154 155 156 157
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;
158

159
    std::vector<SearchItem> get_subopr_list(
M
Megvii Engine Team 已提交
160
            const TensorLayoutArray& layouts, const OperatorBase* opr) const override;
161

162 163
    const char* name() const override { return "MATMUL"; }
    MEGDNN_DECL_ALGO_TYPE(CUDA_MATMUL)
164
    AlgoAttribute attribute() const override {
M
Megvii Engine Team 已提交
165
        return AlgoAttribute::REPRODUCIBLE | AlgoAttribute::ACCURACY_DEPEND_ON_BATCH;
166
    }
167 168
};

169 170 171 172 173
class ConvolutionBackwardDataImpl::AlgoChanwise final : public AlgoBase {
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;
174

175 176
    const char* name() const override { return "CHANNEL_WISE"; }
    MEGDNN_DECL_ALGO_TYPE(CUDA_CHANWISE)
M
Megvii Engine Team 已提交
177
    AlgoAttribute attribute() const override { return AlgoAttribute::REPRODUCIBLE; }
178 179
};

180 181 182 183 184
class ConvolutionBackwardDataImpl::AlgoChanwiseSmall final : public AlgoBase {
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;
185

186 187
    const char* name() const override { return "CHANNEL_WISE_SMALL"; }
    MEGDNN_DECL_ALGO_TYPE(CUDA_CHANWISE_SMALL)
188
    AlgoAttribute attribute() const override {
M
Megvii Engine Team 已提交
189
        return AlgoAttribute::REPRODUCIBLE | AlgoAttribute::USABLE_DEPEND_ON_SHAPE;
190
    }
191 192
};

193 194 195 196 197 198
class ConvolutionBackwardDataImpl::AlgoBFloat16 final : public AlgoBase {
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;

199
    std::vector<SearchItem> get_subopr_list(
M
Megvii Engine Team 已提交
200
            const TensorLayoutArray& layouts, const OperatorBase* opr) const override;
201

M
Megvii Engine Team 已提交
202
    const char* name() const override { return "CONVOLUTION_BACKWARD_DATD_BFLOAT16"; }
203

M
Megvii Engine Team 已提交
204
    AlgoAttribute attribute() const override { return AlgoAttribute::REPRODUCIBLE; }
205 206 207

private:
    WorkspaceBundle get_workspace_bundle(void* ptr, const SizeArgs& args) const;
208
    MEGDNN_DECL_ALGO_TYPE(CUDA_BFLOAT16)
209 210
};

211
//! implement group conv by another algo
M
Megvii Engine Team 已提交
212
class ConvolutionBackwardDataImpl::AlgoGroupConvGeneral final : public AlgoBase {
213 214 215 216
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;
217

218
    std::vector<SearchItem> get_subopr_list(
M
Megvii Engine Team 已提交
219
            const TensorLayoutArray& layouts, const OperatorBase* opr) const override;
220

M
Megvii Engine Team 已提交
221
    const char* name() const override { return "CUDA:GROUP_CONV_BACKWARD_DATA"; }
222 223

    MEGDNN_DECL_ALGO_TYPE(CUDA_GROUP_CONV_GENERAL)
M
Megvii Engine Team 已提交
224
    AlgoAttribute attribute() const override { return AlgoAttribute::REPRODUCIBLE; }
225 226 227

private:
    WorkspaceBundle get_workspace_bundle(void* ptr, const SizeArgs& args) const;
228 229
};

230 231 232 233 234 235 236 237 238 239 240 241
class ConvolutionBackwardDataImpl::AlgoInt8NCHW4DotProdImplicitGemm final
        : public AlgoBase {
public:
    struct AlgoParam {
        int threadblock_m;
        int threadblock_n;
        int threadblock_k;
        int warp_m;
        int warp_n;
        int warp_k;
        int stage;
        std::string to_string() {
M
Megvii Engine Team 已提交
242 243 244
            return ssprintf(
                    "_%dX%dX%d_%dX%dX%d_%dstage", threadblock_m, threadblock_n,
                    threadblock_k, warp_m, warp_n, warp_k, stage);
245 246 247 248
        }
    };
    AlgoInt8NCHW4DotProdImplicitGemm(AlgoParam algo_param)
            : m_algo_param{algo_param},
M
Megvii Engine Team 已提交
249 250 251
              m_name{ssprintf(
                      "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM%s",
                      m_algo_param.to_string().c_str())} {}
252 253 254 255
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
M
Megvii Engine Team 已提交
256
    AlgoAttribute attribute() const override { return AlgoAttribute::REPRODUCIBLE; }
257 258
    MEGDNN_DECL_ALGO_TYPE(CUDA_IMPLICIT_GEMM_NCHW4_DOTPROD_INT8)
private:
M
Megvii Engine Team 已提交
259
    WorkspaceBundle get_workspace_bundle(dt_byte* raw_ptr, const SizeArgs& args) const;
260
    const void* get_available_op(const SizeArgs& args) const;
261 262 263 264
    AlgoParam m_algo_param;
    std::string m_name;
};

265 266 267 268 269 270
class ConvolutionBackwardDataImpl::AlgoInt8NCHWDotProdImplicitGemm final
        : public AlgoBase {
public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;
M
Megvii Engine Team 已提交
271 272
    const char* name() const override { return "INT8_NCHW_DOTPROD_IMPLICIT_GEMM"; }
    AlgoAttribute attribute() const override { return AlgoAttribute::REPRODUCIBLE; }
273
    MEGDNN_DECL_ALGO_TYPE(CUDA_IMPLICIT_GEMM_NCHW_DOTPROD_INT8);
M
Megvii Engine Team 已提交
274

275
private:
M
Megvii Engine Team 已提交
276
    WorkspaceBundle get_workspace_bundle(dt_byte* raw_ptr, const SizeArgs& args) const;
277
    const void* get_available_op(const SizeArgs& args) const;
278 279
};

280 281 282 283 284 285 286 287 288 289 290 291 292
class ConvolutionBackwardDataImpl::AlgoInt8NHWCIMMAImplicitGemm final
        : public AlgoBase {
public:
    struct AlgoParam {
        int threadblock_m;
        int threadblock_n;
        int threadblock_k;
        int warp_m;
        int warp_n;
        int warp_k;
        int stage;
        int access_size;
        std::string to_string() {
M
Megvii Engine Team 已提交
293 294 295
            return ssprintf(
                    "_%dX%dX%d_%dX%dX%d_%dstage_%d", threadblock_m, threadblock_n,
                    threadblock_k, warp_m, warp_n, warp_k, stage, access_size);
296 297 298 299
        }
    };
    AlgoInt8NHWCIMMAImplicitGemm(AlgoParam algo_param)
            : m_algo_param{algo_param},
M
Megvii Engine Team 已提交
300 301 302
              m_name{ssprintf(
                      "INT8_NHWC_IMMA_IMPLICIT_GEMM%s",
                      m_algo_param.to_string().c_str())} {}
303 304 305 306
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;
    const char* name() const override { return m_name.c_str(); }
M
Megvii Engine Team 已提交
307
    AlgoAttribute attribute() const override { return AlgoAttribute::REPRODUCIBLE; }
308 309
    MEGDNN_DECL_ALGO_TYPE(CUDA_IMPLICIT_GEMM_NHWC_IMMA_INT8)
private:
M
Megvii Engine Team 已提交
310
    WorkspaceBundle get_workspace_bundle(dt_byte* raw_ptr, const SizeArgs& args) const;
311
    const void* get_available_op(const SizeArgs& args) const;
M
Megvii Engine Team 已提交
312 313
    void reorder_filter(
            const ExecArgs& args, const int iterleaved, int8_t* reordered_filter) const;
314 315 316 317
    AlgoParam m_algo_param;
    std::string m_name;
};

318
class ConvolutionBackwardDataImpl::AlgoPack : NonCopyableObj {
319 320
    // defined in cudnn.cpp
    void fill_cudnn_algos();
321 322
    // defined in implicit_gemm_int8_nchw4_dp4a.cpp
    void fill_int8_dp4a_algos();
323 324
    // defined in implicit_gemm_int8_nhwc_imma.cpp
    void fill_int8_imma_algos();
325

326
    AlgoBase::Mapper m_all_algos_map;
327

328 329
public:
    AlgoPack();
330

331 332 333 334
    std::vector<AlgoCUDNN> cudnn;
    AlgoMatmul matmul;
    AlgoChanwise chanwise;
    AlgoChanwiseSmall chanwise_small;
335
    AlgoBFloat16 bfloat16;
336
    AlgoGroupConvGeneral group;
337
    std::vector<AlgoInt8NCHW4DotProdImplicitGemm> int8_nchw4_dotprod;
338
    AlgoInt8NCHWDotProdImplicitGemm int8_nchw_dotprod;
339
    std::vector<AlgoInt8NHWCIMMAImplicitGemm> int8_nhwc_imma;
340

341
    std::vector<AlgoBase*>
342 343 344
            //! all algorithms
            all_algos,
            //! non-cudnn algos, used for heuristic if cudnn is not supported
345
            non_cudnn_algos, bfloat16_algos, int8_algos;
346 347

    AlgoCUDNN* cudnn_from_enum(cudnnConvolutionBwdDataAlgo_t algo);
348

349
    const AlgoBase::Mapper& all_algos_map() const { return m_all_algos_map; }
350 351
};

352 353
}  // namespace cuda
}  // namespace megdnn
354 355

// vim: syntax=cpp.doxygen