helper.cpp 7.3 KB
Newer Older
1 2 3 4
/**
 * \file dnn/src/cuda/conv_bias/helper.cpp
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 */
#include "src/cuda/conv_bias/helper.h"

#include "src/cuda/utils.h"

namespace megdnn {
namespace cuda {

ConvBiasDesc::ConvBiasDesc() {
    cudnn_check(cudnnCreateActivationDescriptor(&act_desc));
    cudnn_check(cudnnCreateConvolutionDescriptor(&conv_desc));
#if CUDNN_VERSION >= 7000
    cudnn_check(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
#endif
}

ConvBiasDesc::~ConvBiasDesc() {
    cudnn_check(cudnnDestroyConvolutionDescriptor(conv_desc));
    cudnn_check(cudnnDestroyActivationDescriptor(act_desc));
}

void ConvBiasDesc::set_conv_bias(DType data_type, const param::ConvBias& param,
                                 size_t nr_group) {
#if CUDNN_VERSION < 7100
M
Megvii Engine Team 已提交
34 35
    megdnn_throw(
            "ConvBias(CUDNN_ACTIVATION_IDENTITY) require cudnn 7.1 or higher");
36 37 38 39 40 41 42 43 44 45 46
#else
    cudnnConvolutionMode_t mode;
    using Param = param::ConvBias;
    switch (param.mode) {
        case Param::Mode::CROSS_CORRELATION:
            mode = CUDNN_CROSS_CORRELATION;
            break;
        case Param::Mode::CONVOLUTION:
            mode = CUDNN_CONVOLUTION;
            break;
        default:
M
Megvii Engine Team 已提交
47
            megdnn_throw("conv mode must be conv or xcorr.");
48 49 50 51 52 53 54 55 56 57 58 59
    }
    cudnn_check(cudnnSetConvolutionGroupCount(conv_desc, nr_group));
    cudnnDataType_t compute_type;
    switch (data_type.category()) {
        case DTypeCategory::FLOAT:
            compute_type = CUDNN_DATA_FLOAT;
            break;
        case DTypeCategory::INT:
        case DTypeCategory::QUANTIZED:
            compute_type = CUDNN_DATA_INT32;
            break;
        default:
M
Megvii Engine Team 已提交
60
            megdnn_throw("unspport data type for conv bias");
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
    }
    if (data_type.enumv() == DTypeEnum::Float16) {
        auto comp_mode = param.compute_mode;
        compute_type = get_compute_type_fp16(comp_mode);
    }
    cudnn_check(cudnnSetConvolution2dDescriptor(
            conv_desc, param.pad_h, param.pad_w, param.stride_h, param.stride_w,
            param.dilate_h, param.dilate_w, mode, compute_type));

    switch (param.nonlineMode) {
        case Param::NonlineMode::IDENTITY:
        case Param::NonlineMode::SIGMOID:
        case Param::NonlineMode::H_SWISH:
            cudnn_check(cudnnSetActivationDescriptor(
                    act_desc, CUDNN_ACTIVATION_IDENTITY,
                    CUDNN_NOT_PROPAGATE_NAN, 0));
            break;
        case Param::NonlineMode::RELU:
            cudnn_check(cudnnSetActivationDescriptor(
                    act_desc, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN,
                    0));
            break;
        default:
M
Megvii Engine Team 已提交
84
            megdnn_throw("unsupported non linear mode");
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
    }
#endif
}

void ConvBiasDesc::set_conv(DType data_type, const param::ConvBias& param,
                            const size_t nr_group) {
    using Param = param::ConvBias;
    cudnnConvolutionMode_t mode;
    switch (param.mode) {
        case Param::Mode::CROSS_CORRELATION:
            mode = CUDNN_CROSS_CORRELATION;
            break;
        case Param::Mode::CONVOLUTION:
            mode = CUDNN_CONVOLUTION;
            break;
        default:
M
Megvii Engine Team 已提交
101
            megdnn_throw("conv mode must be conv or xcorr.");
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
    }
    cudnnDataType_t compute_type;
    MEGDNN_MARK_USED_VAR(compute_type);
    if (data_type.enumv() == DTypeEnum::Float32) {
        // FLOAT_CONFIG
        compute_type = CUDNN_DATA_FLOAT;
    } else if (data_type.enumv() == DTypeEnum::Float16) {
        auto comp_mode = param.compute_mode;
        compute_type = get_compute_type_fp16(comp_mode);
#if CUDNN_MAJOR >= 7
    } else if (data_type.category() == DTypeCategory::INT ||
               data_type.category() == DTypeCategory::QUANTIZED) {
        compute_type = CUDNN_DATA_INT32;
#endif
    } else {
M
Megvii Engine Team 已提交
117
        megdnn_throw("unspport data type for conv bias");
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
    }
#if CUDNN_MAJOR >= 7
    cudnn_check(cudnnSetConvolutionGroupCount(conv_desc, nr_group));
#else
    megdnn_assert(nr_group == 1);
#endif

#if CUDNN_MAJOR >= 6
    cudnn_check(cudnnSetConvolution2dDescriptor(
            conv_desc, param.pad_h, param.pad_w, param.stride_h, param.stride_w,
            param.dilate_h, param.dilate_w, mode, compute_type));
#else
    cudnn_check(cudnnSetConvolution2dDescriptor(
            conv_desc, param.pad_h, param.pad_w, param.stride_h, param.stride_w,
            param.dilate_h, param.dilate_w, mode));
#endif
}

namespace conv_bias {

bool is_cudnn_supported(const BiasForwardSizeArgs& args) {
139 140 141 142
    if (args.src_layout->dtype.enumv() == DTypeEnum::QuantizedS4 &&
        args.filter_layout->dtype.enumv() == DTypeEnum::QuantizedS4)
        return false;

143 144 145 146 147
    if (args.src_layout->dtype == args.filter_layout->dtype &&
        args.src_layout->dtype == dtype::BFloat16()) {
        return false;
    }

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
    // CUDNN_STATUS_EXECUTION_FAILED on Tegra K1, so disable CUDNN
    // on Tegra K1.
    if (args.handle->is_tegra_k1())
        return false;

    // TODO: We only support NCHW format now. It seems cuDNN provides support
    // for NHWC as well.
    if (args.filter_meta.format == param::Convolution::Format::NCHW4) {
        if (args.dst_layout->dtype.enumv() != DTypeEnum::Int8 &&
            args.dst_layout->dtype.enumv() != DTypeEnum::QuantizedS8) {
            return false;
        }
    } else if (args.filter_meta.format != param::Convolution::Format::NCHW) {
        return false;
    }
    auto& fm = args.filter_meta;
    bool supported = true;
    supported &= (fm.spatial_ndim == 2);
#if CUDNN_VERSION < 7000
    supported &= (fm.group == 1);
#endif
#if CUDNN_VERSION < 7500
    supported &= (fm.dilation[0] == 1 && fm.dilation[1] == 1);
#endif
    return supported;
}

175 176
SmallVector<size_t> matmul_get_workspace_bundle(
        const BiasForwardSizeArgs& args) {
177 178 179 180 181 182 183 184 185 186 187
    auto dtype = args.src_layout->dtype;
    auto&& fm = args.filter_meta;
    megdnn_assert(fm.group == 1);
    auto N = args.src_layout->shape[0];
    auto OC = fm.ocpg, IC = fm.icpg, FH = fm.spatial[0], FW = fm.spatial[1];
    auto OH = args.dst_layout->shape[2], OW = args.dst_layout->shape[3];
    SmallVector<size_t> sizes{dtype.size() * args.dst_layout->total_nr_elems(),
                              dtype.size() * IC * FH * FW * OH * OW * N};
    if (args.filter_meta.should_flip) {
        sizes.push_back(dtype.size() * OC * IC * FH * FW);
    }
188
    return sizes;
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
}

void flip_filter(const BiasForwardSizeArgs& args, const Workspace& workspace,
                 void*& raw_ptr) {
    auto&& fm = args.filter_meta;
    megdnn_assert(fm.group == 1 && fm.spatial_ndim == 2);
    auto OC = fm.ocpg, IC = fm.icpg, FH = fm.spatial[0], FW = fm.spatial[1];
    auto dtype = fm.dtype;
    megdnn_assert(workspace.size >= dtype.size() * OC * IC * FH * FW);

    TensorND src{raw_ptr, {{OC, IC, FH, FW}, dtype}},
            dst{workspace.raw_ptr + (FH * FW - 1) * dtype.size(), src.layout};
    dst.layout.stride[2] = -dst.layout.stride[2];
    dst.layout.stride[3] = -dst.layout.stride[3];
    args.handle->relayout_opr()->exec(src, dst);
    raw_ptr = workspace.raw_ptr;
}

} // conv_bias

} // cuda
} // megdnn

// vim: syntax=cpp.doxygen