algos.cpp 12.2 KB
Newer Older
1 2 3 4 5 6 7 8
/**
 * \file dnn/src/fallback/conv_bias/conv1x1/algos.cpp
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
 * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
9 10
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
11 12 13 14
 */

#include "src/common/opr_delegate.h"
#include "src/fallback/conv_bias/common.h"
15
#include "src/fallback/conv_bias/conv1x1/algos.h"
16 17 18 19 20 21 22 23 24
#include "src/fallback/conv_bias/conv1x1/conv1x1_dispatcher.h"
#include "src/fallback/conv_bias/conv1x1/conv1x1_strategy.h"
#include "src/fallback/conv_bias/opr_impl.h"

#include "megdnn/opr_param_defs.h"
#include "src/naive/convolution/helper.h"

#if MEGDNN_X86
#include "src/x86/conv_bias/postprocess_helper.h"
25 26
#elif (MEGDNN_ARMV7 || MEGDNN_AARCH64)
#include "src/arm_common/conv_bias/postprocess_helper.h"
27 28
#else
#include "src/common/postprocess_helper.h"
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
#endif

#include "midout.h"
MIDOUT_DECL(megdnn_fallback_conv1x1)

using namespace megdnn;
using namespace fallback;
#if MEGDNN_X86
using namespace x86;
#endif
using namespace conv1x1;

size_t ConvBiasImpl::AlgoConv1x1::get_oc_tile_size_heuristic(
        const NCBKernSizeParam& param) const {
    size_t OH = param.osz[0];
    size_t OW = param.osz[1];
    size_t OC = param.filter_meta.ocpg;
    if (OH * OW >= 56 * 56 || OC >= 64)
        return m_oc_block_size;
48 49
    size_t oc_block_size_one_thread = div_ceil(OC, param.nr_threads);
    return round_up<size_t>(oc_block_size_one_thread, 24);
50 51
}

52
WorkspaceBundle ConvBiasImpl::AlgoConv1x1::get_bundle_according_packmode(
53
        const NCBKernSizeParam& param) const {
54 55 56 57 58
    size_t OH = param.osz[0];
    size_t OW = param.osz[1];
    size_t compt_oc_block_size = get_oc_tile_size_heuristic(param);

    auto matmul_param =
59
            utils::get_matmul_kern_param(param, OH * OW, compt_oc_block_size);
60

61 62
    auto pack_mode = m_matmul_algo->packmode();
    if (pack_mode == MatrixMulImpl::AlgoBase::PackMode::DEFAULT) {
63 64 65
        MIDOUT_BEGIN(megdnn_fallback_conv1x1,
                     midout_iv("get_bundle_default"_hash)) {
            return Conv1x1Kerns<MatrixMulImpl::AlgoBase::PackMode::DEFAULT>()
66
                    .get_bundle(param, matmul_param, m_matmul_algo,
67
                                compt_oc_block_size);
68 69 70
        }
        MIDOUT_END();
    } else if (pack_mode == MatrixMulImpl::AlgoBase::PackMode::ONLY_PACKA) {
71 72 73
        MIDOUT_BEGIN(megdnn_fallback_conv1x1,
                     midout_iv("get_bundle_only_packa"_hash)) {
            return Conv1x1Kerns<MatrixMulImpl::AlgoBase::PackMode::ONLY_PACKA>()
74
                    .get_bundle(param, matmul_param, m_matmul_algo,
75
                                compt_oc_block_size);
76 77 78
        }
        MIDOUT_END();
    } else {
79 80 81
        MIDOUT_BEGIN(megdnn_fallback_conv1x1,
                     midout_iv("get_bundle_no_pack"_hash)) {
            return Conv1x1Kerns<MatrixMulImpl::AlgoBase::PackMode::NO_PACK>()
82
                    .get_bundle(param, matmul_param, m_matmul_algo,
83
                                compt_oc_block_size);
84 85 86
        }
        MIDOUT_END();
    }
87
    return {nullptr, {}};
88 89
}

90
size_t ConvBiasImpl::AlgoConv1x1::get_workspace(
91
        const NCBKernSizeParam& param) const {
92 93 94 95 96 97
    return get_bundle_according_packmode(param).total_size_in_bytes();
}

SmallVector<ConvBiasImpl::NCBKern>
ConvBiasImpl::AlgoConv1x1::get_kerns_according_packmode(
        const NCBKernSizeParam& param, bool weight_preprocess) const {
98 99 100
    size_t OH = param.osz[0];
    size_t OW = param.osz[1];
    size_t compt_oc_block_size = get_oc_tile_size_heuristic(param);
101
    auto pack_mode = m_matmul_algo->packmode();
102

103 104 105
    Conv1x1StrategyBase* conv1x1_strategy =
            Conv1x1Factory::make_conv1x1_strategy(param, pack_mode,
                                                  param.filter_meta.format);
106
    auto matmul_param =
107
            utils::get_matmul_kern_param(param, OH * OW, compt_oc_block_size);
108

109 110
    WorkspaceBundle whole_bundle = get_bundle_according_packmode(param);
    //! NO_PACK not implement get_bundle
111
    WorkspaceBundle matmul_bundle = {nullptr, {}};
112 113 114 115 116 117 118 119 120
    if (pack_mode == MatrixMulImpl::AlgoBase::PackMode::NO_PACK) {
        matmul_bundle = {nullptr,
                         {0, 0, m_matmul_algo->get_workspace(matmul_param)}};
    } else {
        matmul_bundle = m_matmul_algo->get_bundle(matmul_param);
    }
    WorkspaceBundle thread_bundle = utils::get_thread_bundle(
            param, matmul_bundle.get_size(2), compt_oc_block_size);

121
    if (pack_mode == MatrixMulImpl::AlgoBase::PackMode::DEFAULT) {
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
        MIDOUT_BEGIN(megdnn_fallback_conv1x1,
                     midout_iv("get_kern_default"_hash)) {
            if (!weight_preprocess) {
                return Conv1x1Kerns<
                               MatrixMulImpl::AlgoBase::PackMode::DEFAULT>()
                        .get_kern(param, whole_bundle, matmul_bundle,
                                  thread_bundle, conv1x1_strategy,
                                  m_matmul_algo, compt_oc_block_size);
            } else {
                return Conv1x1Kerns<
                               MatrixMulImpl::AlgoBase::PackMode::DEFAULT>()
                        .get_kern_preprocess(param, whole_bundle, matmul_bundle,
                                             conv1x1_strategy, m_matmul_algo,
                                             compt_oc_block_size);
            }
137 138 139
        }
        MIDOUT_END();
    } else if (pack_mode == MatrixMulImpl::AlgoBase::PackMode::ONLY_PACKA) {
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
        MIDOUT_BEGIN(megdnn_fallback_conv1x1,
                     midout_iv("get_kern_only_packa"_hash)) {
            if (!weight_preprocess) {
                return Conv1x1Kerns<
                               MatrixMulImpl::AlgoBase::PackMode::ONLY_PACKA>()
                        .get_kern(param, whole_bundle, matmul_bundle,
                                  thread_bundle, conv1x1_strategy,
                                  m_matmul_algo, compt_oc_block_size);
            } else {
                return Conv1x1Kerns<
                               MatrixMulImpl::AlgoBase::PackMode::ONLY_PACKA>()
                        .get_kern_preprocess(param, whole_bundle, matmul_bundle,
                                             conv1x1_strategy, m_matmul_algo,
                                             compt_oc_block_size);
            }
155 156 157
        }
        MIDOUT_END();
    } else {
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
        MIDOUT_BEGIN(megdnn_fallback_conv1x1,
                     midout_iv("get_kern_no_pack"_hash)) {
            if (!weight_preprocess) {
                return Conv1x1Kerns<
                               MatrixMulImpl::AlgoBase::PackMode::NO_PACK>()
                        .get_kern(param, whole_bundle, matmul_bundle,
                                  thread_bundle, conv1x1_strategy,
                                  m_matmul_algo, compt_oc_block_size);
            } else {
                return Conv1x1Kerns<
                               MatrixMulImpl::AlgoBase::PackMode::NO_PACK>()
                        .get_kern_preprocess(param, whole_bundle, matmul_bundle,
                                             conv1x1_strategy, m_matmul_algo,
                                             compt_oc_block_size);
            }
173 174 175
        }
        MIDOUT_END();
    }
176
}
177

178 179 180 181
SmallVector<ConvBiasImpl::NCBKern> ConvBiasImpl::AlgoConv1x1::dispatch_kerns(
        const NCBKernSizeParam& param) const {
    return get_kerns_according_packmode(param, false);
}
182

183 184 185 186 187 188
SmallVector<TensorLayout>
ConvBiasImpl::AlgoConv1x1::deduce_preprocessed_filter_layout(
        const NCBKernSizeParam& param) const {
    MIDOUT_BEGIN(megdnn_fallback_conv1x1,
                 midout_iv("deduce_preprocessed_filter_layout"_hash)) {
        WorkspaceBundle wb = get_bundle_according_packmode(param);
189

190 191 192 193 194
        size_t GROUP = param.filter_meta.group;
        SmallVector<TensorLayout> preprocessed_layouts;
        preprocessed_layouts.push_back(
                {{GROUP, wb.get_size(0)}, dtype::Int8()});
        return preprocessed_layouts;
195
    }
196 197 198 199 200 201 202 203
    MIDOUT_END();
    return {};
}

SmallVector<ConvBiasImpl::NCBKern>
ConvBiasImpl::AlgoConv1x1::dispatch_preprocess_kerns(
        const NCBKernSizeParam& param) const {
    return get_kerns_according_packmode(param, true);
204 205
}

206
bool ConvBiasImpl::AlgoConv1x1::usable(const NCBKernSizeParam& param,
207 208 209 210 211 212 213 214
                                       AlgoSelectionStrategy) const {
    MIDOUT_BEGIN(megdnn_fallback_conv1x1, 0, 2) {
        size_t FH = param.filter_meta.spatial[0],
               FW = param.filter_meta.spatial[1];
        size_t PH = param.filter_meta.padding[0],
               PW = param.filter_meta.padding[1];
        size_t SH = param.filter_meta.stride[0],
               SW = param.filter_meta.stride[1];
215 216 217 218 219 220 221
        auto format = param.filter_meta.format;
        size_t OH = param.osz[0];
        size_t OW = param.osz[1];
#if MEGDNN_AARCH64 || MEGDNN_ARMV7
        if (format != param::ConvBias::Format::NCHW &&
            format != param::ConvBias::Format::NCHW44 &&
            format != param::ConvBias::Format::NCHW44_DOT) {
222
            return false;
223 224 225 226 227 228 229 230 231
        }
        //! hybird mode is not support
        if (param.filter_meta.format == param::ConvBias::Format::NCHW44 ||
            param.filter_meta.format == param::ConvBias::Format::NCHW44_DOT) {
            if (param.filter_meta.icpg < 4_z || param.filter_meta.icpg == 1 ||
                param.filter_meta.ocpg == 1) {
                return false;
            }
        }
232
#else   //! x86 only support nchw mode
233
        if (format != param::ConvBias::Format::NCHW) {
234 235
            return false;
        }
236 237 238
#endif
        //! param
        if (FH != 1 || FW != 1 || PH || PW || SH != 1 || SW != 1) {
239 240
            return false;
        }
241 242 243 244 245
        //! data type
        if (param.src_type.enumv() != param.filter_type.enumv() ||
            (param.src_type.enumv() != DTypeEnum::Int8 &&
             param.src_type.enumv() != DTypeEnum::QuantizedS8 &&
             param.src_type.enumv() != DTypeEnum::Quantized8Asymm &&
246
#if !MEGDNN_DISABLE_FLOAT16
247
             param.src_type.enumv() != DTypeEnum::Float16 &&
248
#endif
249
             param.src_type.enumv() != DTypeEnum::Float32)) {
250 251
            return false;
        }
252 253 254 255 256 257
        //! x86 disable  Quntized8Asymm
#if MEGDNN_X86
        if (param.src_type.enumv() == DTypeEnum::Quantized8Asymm) {
            return false;
        }
#endif
258 259 260
        //! make sure 8x8x16 and 8x8x32 biasmode is nobias and nonlineMode
        //! is identity otherwise return false mean that 8x8x32 and 8x8x16
        //! not support PostProcess
261
        if (param.dst_type.enumv() == DTypeEnum::Int16 ||
262
            param.dst_type.enumv() == DTypeEnum::QuantizedS16 ||
263 264
            param.dst_type.enumv() == DTypeEnum::Int32 ||
            param.dst_type.enumv() == DTypeEnum::QuantizedS32) {
265
            if (param.nonlineMode != megdnn::NonlineMode::IDENTITY) {
266 267 268
                return false;
            }
        }
269 270 271
        MatrixMulImpl::KernSizeParam matmul_param =
                utils::get_matmul_kern_param(param, OH * OW,
                                             get_oc_tile_size_heuristic(param));
272
        bool matmul_usable = m_matmul_algo->usable(matmul_param);
273 274
        auto pack_mode = m_matmul_algo->packmode();
        bool strategy_usable = Conv1x1Factory::can_make_conv1x1_strategy(
275
                param, pack_mode, param.filter_meta.format);
276
        return matmul_usable && strategy_usable &&
277 278 279 280 281 282 283 284
               (param.filter_meta.dilation[0] ==
                        param.filter_meta.dilation[1] &&
                param.filter_meta.dilation[0] == 1) &&
               param.compute_mode == param::ConvBias::ComputeMode::DEFAULT;
    }
    MIDOUT_END();
    return false;
}
285 286

bool ConvBiasImpl::AlgoConv1x1::is_preferred(
287
        const NCBKernSizeParam& param) const {
288 289 290 291 292 293 294 295 296
    size_t OH = param.osz[0];
    size_t OW = param.osz[1];
    if (OH * OW != 1) {
        return true;
    } else {
#if (MEGDNN_ARMV7 || MEGDNN_AARCH64)
        if (param.src_type.enumv() == DTypeEnum::Int8 &&
            param.filter_type.enumv() == DTypeEnum::Int8 &&
            param.dst_type.enumv() == DTypeEnum::Int16) {
297 298
            return true;
        }
299 300 301 302 303 304 305 306 307
#elif MEGDNN_X86
        size_t OC = param.filter_meta.ocpg;
        if (OC > 2 || param.src_type.enumv() == DTypeEnum::Float32)
            return true;
#endif
        return false;
    }
}

308
// vim: syntax=cpp.doxygen