algo.cpp 15.8 KB
Newer Older
1 2 3 4
/**
 * \file dnn/src/cuda/conv_bias/algo.cpp
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
9 10
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
11 12 13 14 15 16 17 18 19 20 21
 */

#include "src/cuda/conv_bias/algo.h"
#include "src/cuda/utils.h"

using namespace megdnn;
using namespace cuda;

ConvBiasForwardImpl::AlgoPack::AlgoPack() {
    non_cudnn_algos.push_back(&chanwise);
    non_cudnn_algos.push_back(&chanwise_small);
22
    non_cudnn_algos.push_back(&depthwise_large_filter);
23 24 25 26 27

    non_cudnn_algos.push_back(&inplace_matmul);
    non_cudnn_algos.push_back(&matmul);
    non_cudnn_algos.push_back(&matmul8x8x32);
    non_cudnn_algos.push_back(&batched_matmul);
28
    non_cudnn_algos.push_back(&int1_simple);
29 30 31 32 33 34 35 36 37
    fill_cudnn_algos();
    for (auto&& algo : cudnn_conv_bias_activations) {
        all_algos.push_back(&algo);
    }

    //! add conv+nonlinear algos
    std::vector<AlgoBase*> conv_algos;
    conv_algos.push_back(&chanwise);
    conv_algos.push_back(&chanwise_small);
38
    conv_algos.push_back(&depthwise_large_filter);
39 40 41 42 43 44 45 46
    conv_algos.push_back(&chanwise8x8x32);
    for (auto&& algo : cudnn_convs) {
        conv_algos.push_back(&algo);
    }
    conv_algos.push_back(&inplace_matmul);
    conv_algos.push_back(&matmul);
    conv_algos.push_back(&matmul8x8x32);
    conv_algos.push_back(&batched_matmul);
47
    conv_algos.push_back(&group);
48
    conv_algos.push_back(&int1_simple);
49 50 51 52 53

    for (auto&& algo : conv_algos) {
        all_algos.push_back(algo);
    }

54 55
    all_algos.push_back(&bfloat16);
    bfloat16_algos.push_back(&bfloat16);
56

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
    size_t all_algo_size = all_algos.size();
#if CUDA_VERSION >= 10000
    fill_imma_algos();
    all_algos.push_back(&wmma_quint4x4x32);
    for (auto&& algo : int8_nchw4_imma) {
        all_algos.push_back(&algo);
    }
    for (auto&& algo : int8_chwn4_imma) {
        all_algos.push_back(&algo);
    }
    for (auto&& algo : int8_chwn4_imma_reorder_filter) {
        all_algos.push_back(&algo);
    }
    for (auto&& algo : int8_chwn4_imma_unroll_width) {
        all_algos.push_back(&algo);
    }
73 74 75 76
#if CUDA_VERSION >= 10020
    for (auto&& algo : int8_nchw32_imma) {
        all_algos.push_back(&algo);
    }
77 78 79
    for (auto&& algo : int8_nhwc_imma) {
        all_algos.push_back(&algo);
    }
80 81 82
    for (auto&& algo : int4_int4_nchw64_imma) {
        all_algos.push_back(&algo);
    }
83 84 85
    for (auto&& algo : uint4_int4_nchw64_imma) {
        all_algos.push_back(&algo);
    }
86 87 88 89 90 91
    for (auto&& algo : int4_int4_nhwc_imma) {
        all_algos.push_back(&algo);
    }
    for (auto&& algo : uint4_int4_nhwc_imma) {
        all_algos.push_back(&algo);
    }
92
#endif
93
#endif
94 95 96 97
    fill_dp4a_algos();
    for (auto&& algo : int8_nchw4_dotprod) {
        all_algos.push_back(&algo);
    }
98
    fill_dwconv_algos();
99
    all_algos.push_back(&int8_chwn4_dotprod);
100
    all_algos.push_back(&fallback_nchw_qs8);
101 102 103
    for (size_t i = all_algo_size; i < all_algos.size(); ++i) {
        non_cudnn_algos.push_back(all_algos[i]);
    }
104 105 106 107

    for (auto&& algo : all_algos) {
        m_all_algos_map.emplace(algo->info().desc, algo);
    }
108 109 110 111
}

ConvBiasForwardImpl::AlgoPack ConvBiasForwardImpl::sm_algo_pack;

112 113
MEGDNN_DEF_GET_ALGO_FROM_DESC(ConvBiasForwardImpl)

M
Megvii Engine Team 已提交
114
ConvBiasForwardImpl::AlgoBase::SizeArgs::SizeArgs(
115
        const ConvBiasForwardImpl* o, const TensorLayout& src,
M
Megvii Engine Team 已提交
116 117 118 119 120
        const TensorLayout& filter, const TensorLayout& bias, const TensorLayout& z,
        const TensorLayout& dst, const PreprocessedFilter* preprocessed_filter)
        : SizeArgs(
                  o, src, filter, o->make_canonized_filter_meta(src.ndim, filter), bias,
                  z, dst, preprocessed_filter) {}
121 122

ConvBiasForwardImpl::AlgoBase::SizeArgs::SizeArgs(
123
        const ConvBiasForwardImpl* o, const TensorLayout& src,
124
        const TensorLayout& filter, const CanonizedFilterMeta& filter_meta,
M
Megvii Engine Team 已提交
125 126
        const TensorLayout& bias, const TensorLayout& z, const TensorLayout& dst,
        const PreprocessedFilter* preprocessed_filter)
127 128 129 130 131 132 133 134
        : BiasForwardSizeArgs{concrete_handle(o->handle()),
                              &src,
                              &filter,
                              &bias,
                              &z,
                              filter_meta,
                              &dst,
                              o->param().nonlineMode},
M
Megvii Engine Team 已提交
135 136
          opr{o},
          preprocessed_filter{preprocessed_filter} {}
137 138

ConvBiasForwardImpl::AlgoBase::ExecArgs::ExecArgs(
M
Megvii Engine Team 已提交
139 140 141 142 143 144
        ConvBiasForwardImpl* opr, _megdnn_tensor_in src, _megdnn_tensor_in filter,
        _megdnn_tensor_in bias, _megdnn_tensor_in z, _megdnn_tensor_out dst,
        _megdnn_workspace workspace, const PreprocessedFilter* preprocessed_filter)
        : SizeArgs(
                  opr, src.layout, filter.layout, bias.layout, z.layout, dst.layout,
                  preprocessed_filter),
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
          src_tensor{&src},
          filter_tensor{&filter},
          bias_tensor{&bias},
          z_tensor{&z},
          dst_tensor{&dst},
          workspace{workspace} {}

std::string ConvBiasForwardImpl::AlgoBase::SizeArgs::to_string() const {
    auto&& fm = filter_meta;
    MEGDNN_MARK_USED_VAR(fm);
    std::string nonlinear_mode_str;
    switch (nonlinear_mode) {
        case param::ConvBias::NonlineMode::RELU:
            nonlinear_mode_str = "RELU";
            break;
        case param::ConvBias::NonlineMode::SIGMOID:
            nonlinear_mode_str = "SIGMOID";
            break;
        case param::ConvBias::NonlineMode::IDENTITY:
            nonlinear_mode_str = "IDENTITY";
            break;
166 167 168
        case param::ConvBias::NonlineMode::H_SWISH:
            nonlinear_mode_str = "H_SWISH";
            break;
169 170 171
        default:
            megdnn_throw("invalid conv bias nonlinear mode");
    }
M
Megvii Engine Team 已提交
172
    return ssprintf(
173
            "src=%s, filter=%s, bias=%s, z=%s, dst=%s, "
174 175
            "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, dtype=%s,%s, "
            "nonlinear_mode=%s",
176 177
            src_layout->to_string().c_str(), filter_layout->to_string().c_str(),
            bias_layout->to_string().c_str(), z_layout->to_string().c_str(),
M
Megvii Engine Team 已提交
178 179 180
            dst_layout->to_string().c_str(), fm.padding[0], fm.padding[1], fm.stride[0],
            fm.stride[1], fm.dilation[0], fm.dilation[1], !fm.should_flip,
            src_layout->dtype.name(), dst_layout->dtype.name(),
M
Megvii Engine Team 已提交
181
            nonlinear_mode_str.c_str());
182 183 184
}

void ConvBiasForwardImpl::AlgoPack::fill_cudnn_algos() {
185 186 187 188
    for (auto&& algo : CudnnAlgoPack::conv_fwd_algos()) {
        cudnn_conv_bias_activations.push_back(algo.first);
        cudnn_convs.push_back(algo.first);
    }
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
}

#if CUDA_VERSION >= 10000
void ConvBiasForwardImpl::AlgoPack::fill_imma_algos() {
    int8_chwn4_imma.push_back(
            {AlgoInt8CHWN4IMMAImplicitGemm::MMATileSize::IMMA16x16x16});
    int8_chwn4_imma.push_back(
            {AlgoInt8CHWN4IMMAImplicitGemm::MMATileSize::IMMA32x8x16});
    int8_chwn4_imma.push_back(
            {AlgoInt8CHWN4IMMAImplicitGemm::MMATileSize::IMMA8x32x16});
    int8_nchw4_imma.push_back(
            {AlgoInt8NCHW4IMMAImplicitGemm::MMATileSize::IMMA16x16x16});
    int8_nchw4_imma.push_back(
            {AlgoInt8NCHW4IMMAImplicitGemm::MMATileSize::IMMA32x8x16});
    int8_nchw4_imma.push_back(
            {AlgoInt8NCHW4IMMAImplicitGemm::MMATileSize::IMMA8x32x16});
    int8_chwn4_imma_reorder_filter.push_back(
M
Megvii Engine Team 已提交
206
            {AlgoInt8CHWN4IMMAImplicitGemmReorderFilter::MMATileSize::IMMA16x16x16});
207
    int8_chwn4_imma_reorder_filter.push_back(
M
Megvii Engine Team 已提交
208
            {AlgoInt8CHWN4IMMAImplicitGemmReorderFilter::MMATileSize::IMMA32x8x16});
209
    int8_chwn4_imma_reorder_filter.push_back(
M
Megvii Engine Team 已提交
210
            {AlgoInt8CHWN4IMMAImplicitGemmReorderFilter::MMATileSize::IMMA8x32x16});
211
    int8_chwn4_imma_unroll_width.push_back(
M
Megvii Engine Team 已提交
212
            {AlgoInt8CHWN4IMMAImplicitGemmUnrollWidth::MMATileSize::IMMA16x16x16});
213
    int8_chwn4_imma_unroll_width.push_back(
M
Megvii Engine Team 已提交
214
            {AlgoInt8CHWN4IMMAImplicitGemmUnrollWidth::MMATileSize::IMMA32x8x16});
215
    int8_chwn4_imma_unroll_width.push_back(
M
Megvii Engine Team 已提交
216
            {AlgoInt8CHWN4IMMAImplicitGemmUnrollWidth::MMATileSize::IMMA8x32x16});
217 218 219
#if CUDA_VERSION >= 10020
    {
        using AlgoParam = AlgoInt8NCHW32IMMAImplicitGemm::AlgoParam;
M
Megvii Engine Team 已提交
220 221 222 223 224 225 226 227 228
        int8_nchw32_imma.emplace_back(AlgoParam{128, 256, 64, 64, 64, 64, 8, 8, 16, 2});
        int8_nchw32_imma.emplace_back(AlgoParam{256, 128, 64, 64, 64, 64, 8, 8, 16, 2});
        int8_nchw32_imma.emplace_back(AlgoParam{128, 128, 64, 64, 64, 64, 8, 8, 16, 2});
        int8_nchw32_imma.emplace_back(AlgoParam{128, 64, 64, 64, 32, 64, 8, 8, 16, 2});
        int8_nchw32_imma.emplace_back(AlgoParam{64, 128, 64, 32, 64, 64, 8, 8, 16, 2});
        int8_nchw32_imma.emplace_back(AlgoParam{128, 64, 32, 64, 32, 32, 8, 8, 16, 1});
        int8_nchw32_imma.emplace_back(AlgoParam{128, 32, 32, 64, 32, 32, 8, 8, 16, 1});
        int8_nchw32_imma.emplace_back(AlgoParam{64, 128, 32, 32, 64, 32, 8, 8, 16, 1});
        int8_nchw32_imma.emplace_back(AlgoParam{32, 128, 32, 32, 64, 32, 8, 8, 16, 1});
229
    }
230 231
    {
        using AlgoParam = AlgoInt8NHWCIMMAImplicitGemm::AlgoParam;
M
Megvii Engine Team 已提交
232 233 234
        int8_nhwc_imma.emplace_back(AlgoParam{64, 16, 32, 64, 16, 32, 8, 8, 16, 2, 16});
        int8_nhwc_imma.emplace_back(AlgoParam{64, 16, 32, 64, 16, 32, 8, 8, 16, 2, 8});
        int8_nhwc_imma.emplace_back(AlgoParam{64, 16, 32, 64, 16, 32, 8, 8, 16, 2, 4});
235 236
        int8_nhwc_imma.emplace_back(
                AlgoParam{128, 32, 32, 64, 32, 32, 8, 8, 16, 1, 16});
M
Megvii Engine Team 已提交
237 238
        int8_nhwc_imma.emplace_back(AlgoParam{128, 32, 32, 64, 32, 32, 8, 8, 16, 1, 8});
        int8_nhwc_imma.emplace_back(AlgoParam{128, 32, 32, 64, 32, 32, 8, 8, 16, 1, 4});
239
    }
240 241
    {
        using AlgoParam = AlgoInt4Int4NCHW64IMMAImplicitGemm::AlgoParam;
242
        int4_int4_nchw64_imma.emplace_back(
243
                AlgoParam{128, 128, 128, 64, 64, 128, 8, 8, 32, 2});
244
        int4_int4_nchw64_imma.emplace_back(
245
                AlgoParam{128, 256, 128, 64, 64, 128, 8, 8, 32, 2});
246
        int4_int4_nchw64_imma.emplace_back(
247
                AlgoParam{128, 64, 128, 64, 64, 128, 8, 8, 32, 2});
248
        int4_int4_nchw64_imma.emplace_back(
249
                AlgoParam{128, 64, 64, 64, 64, 64, 8, 8, 32, 1});
250 251 252 253
    }
    {
        using AlgoParam = AlgoUInt4Int4NCHW64IMMAImplicitGemm::AlgoParam;
        uint4_int4_nchw64_imma.emplace_back(
254
                AlgoParam{128, 128, 128, 64, 64, 128, 8, 8, 32, 2});
255
        uint4_int4_nchw64_imma.emplace_back(
256
                AlgoParam{128, 256, 128, 64, 64, 128, 8, 8, 32, 2});
257
        uint4_int4_nchw64_imma.emplace_back(
258
                AlgoParam{128, 64, 128, 64, 64, 128, 8, 8, 32, 2});
259
        uint4_int4_nchw64_imma.emplace_back(
260
                AlgoParam{128, 64, 64, 64, 64, 64, 8, 8, 32, 1});
261
    }
262 263
    {
        using AlgoParam = AlgoInt4Int4NHWCIMMAImplicitGemm::AlgoParam;
264 265 266 267 268 269
        int4_int4_nhwc_imma.emplace_back(
                AlgoParam{128, 16, 64, 128, 16, 64, 8, 8, 32, 2, 32});
        int4_int4_nhwc_imma.emplace_back(
                AlgoParam{128, 16, 64, 128, 16, 64, 8, 8, 32, 2, 16});
        int4_int4_nhwc_imma.emplace_back(
                AlgoParam{128, 16, 64, 128, 16, 64, 8, 8, 32, 2, 8});
270
        int4_int4_nhwc_imma.emplace_back(
271
                AlgoParam{128, 32, 64, 64, 32, 64, 8, 8, 32, 1, 32});
272
        int4_int4_nhwc_imma.emplace_back(
273
                AlgoParam{128, 32, 64, 64, 32, 64, 8, 8, 32, 1, 16});
274
        int4_int4_nhwc_imma.emplace_back(
275
                AlgoParam{128, 32, 64, 64, 32, 64, 8, 8, 32, 1, 8});
276
        int4_int4_nhwc_imma.emplace_back(
277
                AlgoParam{128, 64, 64, 64, 64, 64, 8, 8, 32, 1, 32});
278
        int4_int4_nhwc_imma.emplace_back(
279
                AlgoParam{128, 64, 64, 64, 64, 64, 8, 8, 32, 1, 16});
280
        int4_int4_nhwc_imma.emplace_back(
281
                AlgoParam{128, 64, 64, 64, 64, 64, 8, 8, 32, 1, 8});
282 283 284
    }
    {
        using AlgoParam = AlgoUInt4Int4NHWCIMMAImplicitGemm::AlgoParam;
285 286 287 288 289 290
        uint4_int4_nhwc_imma.emplace_back(
                AlgoParam{128, 16, 64, 128, 16, 64, 8, 8, 32, 2, 32});
        uint4_int4_nhwc_imma.emplace_back(
                AlgoParam{128, 16, 64, 128, 16, 64, 8, 8, 32, 2, 16});
        uint4_int4_nhwc_imma.emplace_back(
                AlgoParam{128, 16, 64, 128, 16, 64, 8, 8, 32, 2, 8});
291
        uint4_int4_nhwc_imma.emplace_back(
292
                AlgoParam{128, 32, 64, 64, 32, 64, 8, 8, 32, 1, 32});
293
        uint4_int4_nhwc_imma.emplace_back(
294
                AlgoParam{128, 32, 64, 64, 32, 64, 8, 8, 32, 1, 16});
295
        uint4_int4_nhwc_imma.emplace_back(
296
                AlgoParam{128, 32, 64, 64, 32, 64, 8, 8, 32, 1, 8});
297
        uint4_int4_nhwc_imma.emplace_back(
298
                AlgoParam{128, 64, 64, 64, 64, 64, 8, 8, 32, 1, 32});
299
        uint4_int4_nhwc_imma.emplace_back(
300
                AlgoParam{128, 64, 64, 64, 64, 64, 8, 8, 32, 1, 16});
301
        uint4_int4_nhwc_imma.emplace_back(
302
                AlgoParam{128, 64, 64, 64, 64, 64, 8, 8, 32, 1, 8});
303
    }
304
#endif
305 306 307
}
#endif

308 309
void ConvBiasForwardImpl::AlgoPack::fill_dwconv_algos() {
    using AlgoParam = AlgoCutlassConvolutionBase::AlgoParam;
310 311
    /// preferred algo
    f32_implicit_bmm.emplace_back(AlgoParam{64, 128, 8, 32, 64, 8, 1, 1, 1, 2});
312 313 314 315
    f32_implicit_bmm.emplace_back(AlgoParam{128, 128, 8, 32, 64, 8, 1, 1, 1, 2});
    f32_implicit_bmm.emplace_back(AlgoParam{128, 64, 8, 64, 32, 8, 1, 1, 1, 2});
    f32_implicit_bmm.emplace_back(AlgoParam{128, 32, 8, 64, 32, 8, 1, 1, 1, 2});
    f32_implicit_bmm.emplace_back(AlgoParam{32, 128, 8, 32, 64, 8, 1, 1, 1, 2});
316
    f32_implicit_bmm.emplace_back(AlgoParam{64, 64, 8, 32, 64, 8, 1, 1, 1, 2});
317 318 319 320 321 322
    f32_implicit_bmm.emplace_back(AlgoParam{32, 64, 8, 32, 64, 8, 1, 1, 1, 2});
    f32_implicit_bmm.emplace_back(AlgoParam{32, 32, 8, 32, 32, 8, 1, 1, 1, 2});
    f32_implicit_bmm.emplace_back(AlgoParam{64, 32, 8, 64, 32, 8, 1, 1, 1, 2});
    for (auto&& algo : f32_implicit_bmm) {
        all_algos.push_back(&algo);
    }
323
#if CUDA_VERSION >= 10010
324 325
    /// preferred algo
    f16_implicit_bmm.emplace_back(AlgoParam{64, 128, 32, 32, 32, 32, 8, 8, 4, 2});
326 327 328 329 330 331 332 333 334 335
    f16_implicit_bmm.emplace_back(AlgoParam{128, 128, 32, 32, 32, 32, 8, 8, 4, 2});
    f16_implicit_bmm.emplace_back(AlgoParam{128, 256, 32, 64, 64, 32, 8, 8, 4, 2});
    f16_implicit_bmm.emplace_back(AlgoParam{128, 64, 32, 32, 32, 32, 8, 8, 4, 2});
    f16_implicit_bmm.emplace_back(AlgoParam{64, 64, 32, 32, 32, 32, 8, 8, 4, 2});
    for (auto&& algo : f16_implicit_bmm) {
        all_algos.push_back(&algo);
    }
#endif
}

336 337
void ConvBiasForwardImpl::AlgoPack::fill_dp4a_algos() {
    using AlgoParam = AlgoInt8NCHW4DotProdImplicitGemm::AlgoParam;
M
Megvii Engine Team 已提交
338 339 340 341 342 343 344 345 346
    int8_nchw4_dotprod.emplace_back(AlgoParam{128, 128, 32, 64, 32, 32, 1, 1, 4, 2});
    int8_nchw4_dotprod.emplace_back(AlgoParam{128, 64, 32, 64, 32, 32, 1, 1, 4, 2});
    int8_nchw4_dotprod.emplace_back(AlgoParam{64, 128, 32, 64, 32, 32, 1, 1, 4, 2});
    int8_nchw4_dotprod.emplace_back(AlgoParam{32, 128, 32, 32, 64, 32, 1, 1, 4, 2});
    int8_nchw4_dotprod.emplace_back(AlgoParam{128, 32, 32, 64, 32, 32, 1, 1, 4, 2});
    int8_nchw4_dotprod.emplace_back(AlgoParam{32, 64, 32, 32, 64, 32, 1, 1, 4, 2});
    int8_nchw4_dotprod.emplace_back(AlgoParam{64, 32, 32, 64, 32, 32, 1, 1, 4, 2});
    int8_nchw4_dotprod.emplace_back(AlgoParam{16, 128, 16, 16, 128, 16, 1, 1, 4, 1});
    int8_nchw4_dotprod.emplace_back(AlgoParam{16, 64, 8, 16, 64, 8, 1, 1, 4, 2});
347 348
}

M
Megvii Engine Team 已提交
349
ConvBiasForwardImpl::AlgoBase* ConvBiasForwardImpl::AlgoPack::cudnn_conv_from_enum(
350 351 352 353 354
        cudnnConvolutionFwdAlgo_t algo) {
    for (auto&& i : cudnn_convs) {
        if (i.cudnn_enum() == algo)
            return &i;
    }
M
Megvii Engine Team 已提交
355 356
    megdnn_throw(ssprintf(
            "can not find cudnn conv fwd algorithm %d", static_cast<int>(algo)));
357 358
}

M
Megvii Engine Team 已提交
359 360
ConvBiasForwardImpl::AlgoBase* ConvBiasForwardImpl::AlgoPack::
        cudnn_conv_bias_act_from_enum(cudnnConvolutionFwdAlgo_t algo) {
361 362 363 364
    for (auto&& i : cudnn_conv_bias_activations) {
        if (i.cudnn_enum() == algo)
            return &i;
    }
M
Megvii Engine Team 已提交
365 366
    megdnn_throw(ssprintf(
            "can not find cudnn conv bias act algorithm %d", static_cast<int>(algo)));
367 368 369
}

// vim: syntax=cpp.doxygen