diff --git a/dnn/CMakeLists.txt b/dnn/CMakeLists.txt index 22c5ada84c3df69ec191f4c2e06a5c0544dfe0ba..6bdb2681723c6652a36ad6b5033d41c80f6abe52 100644 --- a/dnn/CMakeLists.txt +++ b/dnn/CMakeLists.txt @@ -57,6 +57,13 @@ add_dependencies(opr_param_defs _opr_param_defs) install(TARGETS opr_param_defs EXPORT ${MGE_EXPORT_TARGETS}) +if(MGE_WITH_CUDA) + add_library(cutlass INTERFACE) + target_include_directories(cutlass + INTERFACE + $) + install(TARGETS cutlass EXPORT ${MGE_EXPORT_TARGETS}) +endif() if(MGE_WITH_TEST) if(NOT MGE_BUILD_IMPERATIVE_RT) diff --git a/dnn/scripts/Makefile b/dnn/scripts/Makefile index f21cd594626111b601fd44cf3b08f35f1e946f6e..a348e48f763d2379883358e505f43c875f74b0ff 100644 --- a/dnn/scripts/Makefile +++ b/dnn/scripts/Makefile @@ -36,8 +36,9 @@ all: ${PARAM_DEFS} ${ELEMWISE_IMPL} ${CUDA_CONV_IMPL} ../src/cuda/conv_bias/int8/kimpl: gen_cuda_conv_bias_kern_impls.py ./$^ --type dp4a $@ -../src/cuda/conv_bias/int8_imma/kimpl: gen_cuda_conv_bias_kern_impls.py - ./$^ --type imma $@ +../src/cuda/conv_bias/int8_imma/kimpl: gen_cuda_conv_bias_kern_impls.py gen_cutlass_conv_bias_kern_impls.py + ./gen_cuda_conv_bias_kern_impls.py --type imma $@ + ./gen_cutlass_conv_bias_kern_impls.py --type imma $@ ../src/cuda/batch_conv_bias/int8/kimpl: gen_cuda_batch_conv_bias_kern_impls.py ./$^ --type dp4a $@ diff --git a/dnn/src/CMakeLists.txt b/dnn/src/CMakeLists.txt index 7efd12fdc1995456d37752573c1a6d7bbb5ae23c..579878e03fb5b5aea69147389a2d43d8bfa61b2d 100644 --- a/dnn/src/CMakeLists.txt +++ b/dnn/src/CMakeLists.txt @@ -51,6 +51,9 @@ add_definitions(${LIBMEGDNN_DEF}) add_library(megdnn EXCLUDE_FROM_ALL OBJECT ${SOURCES}) target_link_libraries(megdnn PUBLIC opr_param_defs) +if(MGE_WITH_CUDA) + target_link_libraries(megdnn PUBLIC cutlass) +endif() if(${MGE_ARCH} STREQUAL "x86_64" OR ${MGE_ARCH} STREQUAL "i386" OR ${MGE_ARCH} STREQUAL "armv7" OR ${MGE_ARCH} STREQUAL "aarch64") if(MGE_ENABLE_CPUINFO) diff --git a/dnn/src/cuda/conv_bias/algo.cpp b/dnn/src/cuda/conv_bias/algo.cpp index 2a2e73204c27fd8335cf8ba3aebb695984f22022..6d3b1b4e07b73fc9797fbdb32cc0b8605ab71ac3 100644 --- a/dnn/src/cuda/conv_bias/algo.cpp +++ b/dnn/src/cuda/conv_bias/algo.cpp @@ -85,6 +85,11 @@ ConvBiasForwardImpl::AlgoPack::AlgoPack() { for (auto&& algo : int8_chwn4_imma_unroll_width) { all_algos.push_back(&algo); } +#if CUDA_VERSION >= 10020 + for (auto&& algo : int8_nchw32_imma) { + all_algos.push_back(&algo); + } +#endif #endif all_algos.push_back(&int8_nchw4_dotprod); all_algos.push_back(&int8_chwn4_dotprod); @@ -233,6 +238,18 @@ void ConvBiasForwardImpl::AlgoPack::fill_imma_algos() { int8_chwn4_imma_unroll_width.push_back( {AlgoInt8CHWN4IMMAImplicitGemmUnrollWidth::MMATileSize:: IMMA8x32x16}); +#if CUDA_VERSION >= 10020 + { + using AlgoParam = AlgoInt8NCHW32IMMAImplicitGemm::AlgoParam; + int8_nchw32_imma.emplace_back(AlgoParam{128, 256, 64, 64, 64, 64}); + int8_nchw32_imma.emplace_back(AlgoParam{256, 128, 64, 64, 64, 64}); + int8_nchw32_imma.emplace_back(AlgoParam{128, 128, 64, 64, 64, 64}); + int8_nchw32_imma.emplace_back(AlgoParam{64, 128, 64, 32, 64, 64}); + int8_nchw32_imma.emplace_back(AlgoParam{128, 64, 64, 64, 32, 64}); + int8_nchw32_imma.emplace_back(AlgoParam{64, 64, 64, 32, 32, 64}); + int8_nchw32_imma.emplace_back(AlgoParam{32, 64, 64, 32, 16, 64}); + } +#endif } #endif diff --git a/dnn/src/cuda/conv_bias/algo.h b/dnn/src/cuda/conv_bias/algo.h index 4a87bb765d1dae73442efe4f90d4a84afe9d0312..08da5449e3150db8f45809b46fe47603ca7bd1a9 100644 --- a/dnn/src/cuda/conv_bias/algo.h +++ b/dnn/src/cuda/conv_bias/algo.h @@ -499,6 +499,41 @@ private: }; #endif +#if CUDA_VERSION >= 10020 +class ConvBiasForwardImpl::AlgoInt8NCHW32IMMAImplicitGemm final + : public AlgoBase { +public: + struct AlgoParam { + int threadblock_m; + int threadblock_n; + int threadblock_k; + int warp_m; + int warp_n; + int warp_k; + }; + AlgoInt8NCHW32IMMAImplicitGemm(AlgoParam algo_param) + : m_algo_param{algo_param} { + m_name = ConvBias::algo_name( + ssprintf("INT8_NCHW32_IMMA_IMPLICIT_GEMM_%s", + to_string(m_algo_param).c_str()), + ConvBias::DirectParam{}); + } + bool is_available(const SizeArgs& args) const override; + size_t get_workspace_in_bytes(const SizeArgs& args) const override; + void exec(const ExecArgs& args) const override; + const char* name() const override { return m_name.c_str(); } + bool is_reproducible() const override { return true; } + static std::string to_string(AlgoParam algo_param); + +private: + WorkspaceBundle get_workspace_bundle(dt_byte* raw_ptr, + const SizeArgs& args) const; + + AlgoParam m_algo_param; + std::string m_name; +}; +#endif + class ConvBiasForwardImpl::AlgoBFloat16 final : public AlgoBase { public: AlgoBFloat16(AlgoBase* impl); @@ -553,6 +588,9 @@ public: int8_chwn4_imma_reorder_filter; std::vector int8_chwn4_imma_unroll_width; +#endif +#if CUDA_VERSION >= 10020 + std::vector int8_nchw32_imma; #endif std::vector> gconv_refhold; std::vector> bfloat16_refhold; diff --git a/dnn/src/cuda/conv_bias/conv_bias_int8.cuh b/dnn/src/cuda/conv_bias/conv_bias_int8.cuh index e9cc68ebee676d29ff2e7057b0628c3f2273d508..ea4bb8bd037fb75d0b69df91565d7c7fe57acdc3 100644 --- a/dnn/src/cuda/conv_bias/conv_bias_int8.cuh +++ b/dnn/src/cuda/conv_bias/conv_bias_int8.cuh @@ -142,4 +142,12 @@ void do_conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4_unroll_width( UNPACK_CONV_PARAMETER(_filter_meta, _param); \ MARK_USED_VAR +#define UNPACK_CONV_BIAS_NCHW32_PARAM(_src, _filter_meta, _dst, _param) \ + using Format = param::ConvBias::Format; \ + megdnn_assert(_param.format == Format::NCHW32); \ + size_t n = (_src)[0], ci = (_src)[1] * 32, hi = (_src)[2], wi = (_src)[3]; \ + size_t co = (_dst)[1] * 32, ho = (_dst)[2], wo = (_dst)[3]; \ + UNPACK_CONV_PARAMETER(_filter_meta, _param); \ + MARK_USED_VAR + // vim: syntax=cuda.doxygen diff --git a/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cu b/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cu new file mode 100644 index 0000000000000000000000000000000000000000..02a293aea3b5241bda4d123cbb6ce547884f93fb --- /dev/null +++ b/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cu @@ -0,0 +1,152 @@ +/** + * \file dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cu + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. + */ +// ignore warning of cutlass +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wstrict-aliasing" + +#if !MEGDNN_TEGRA_X1 +#include "cutlass/convolution/device/convolution.h" +#endif +#include "src/common/opr_param_defs_enumv.cuh" +#include "src/cuda/conv_bias/cutlass_convolution_wrapper.cuh" + +#pragma GCC diagnostic pop + +using namespace megdnn; +using namespace cuda; +using namespace cutlass_wrapper; + +#if MEGDNN_TEGRA_X1 +template +void megdnn::cuda::cutlass_wrapper:: + do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32( + const int8_t* /* d_src */, const int8_t* /* d_filter */, + const int32_t* /* d_bias */, const int8_t* /* d_z */, + int8_t* /* d_dst */, int* /* workspace */, + const convolution::ConvParam& /* param */, + uint32_t /* nonlinear_mode */, float /* alpha */, + float /* beta */, float /* gamma */, float /* scale */, + const GemmCoord& /* threadblock_shape */, + const GemmCoord& /* warp_shape */, cudaStream_t /* stream */) {} +#else +template +void megdnn::cuda::cutlass_wrapper:: + do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32( + const int8_t* d_src, const int8_t* d_filter, + const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, + int* workspace, const convolution::ConvParam& param, + uint32_t nonlinear_mode, float alpha, float beta, float gamma, + float scale, const GemmCoord& threadblock_shape, + const GemmCoord& warp_shape, cudaStream_t stream) { +#define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ + threadblock_k_, warp_m_, warp_n_, \ + warp_k_) \ + if (threadblock_shape.m() == threadblock_m_ && \ + threadblock_shape.n() == threadblock_n_ && \ + threadblock_shape.k() == threadblock_k_ && \ + warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ + warp_shape.k() == warp_k_) { \ + using ThreadBlockShape = \ + cutlass::gemm::GemmShape; \ + using WarpShape = cutlass::gemm::GemmShape; \ + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; \ + using Convolution = cutlass::convolution::device::Convolution< \ + int8_t, cutlass::layout::TensorNCxHWx<32>, int8_t, \ + cutlass::layout::TensorCxRSKx<32>, ElementOutput, \ + cutlass::layout::TensorNCxHWx<32>, int32_t, \ + cutlass::layout::TensorNCxHWx<32>, int32_t, \ + cutlass::convolution::ConvType::kConvolution, \ + cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, \ + ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ + cutlass::convolution::threadblock:: \ + ConvolutionNCxHWxThreadblockSwizzle< \ + cutlass::convolution::ConvType::kConvolution>, \ + 2, 16, 16, NeedLoadFromConstMem>; \ + typename Convolution::ConvolutionParameter conv_param{ \ + param.n, param.ci, param.co, param.hi, param.wi, \ + param.fh, param.fw, param.ho, param.wo, param.sh, \ + param.sw, param.ph, param.pw, 1, 1}; \ + return cutlass_convolution_wrapper( \ + d_src, d_filter, d_bias, d_z, d_dst, workspace, conv_param, \ + epilogue, stream); \ + } +#define DISPATCH_KERNEL \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(256, 128, 64, 64, 64, 64); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 256, 64, 64, 64, 64); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 64, 64, 64, 64); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 64, 32, 64, 64); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 64, 64, 32, 64); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 64, 32, 32, 64); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 64, 32, 16, 64); \ + megdnn_assert(false, \ + "unsupported threadblock shape (%dx%dx%d) and warp shape " \ + "(%dx%dx%d)", \ + threadblock_shape.m(), threadblock_shape.n(), \ + threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ + warp_shape.k()); + using ElementOutput = int8_t; + using ElementAccumulator = int32_t; + using ElementBias = int32_t; + using ElementCompute = float; + using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; + switch (nonlinear_mode) { + case NonlineMode::IDENTITY: { + using EpilogueOp = + cutlass::epilogue::thread::BiasAddLinearCombinationClamp< + ElementOutput, 8, ElementAccumulator, ElementBias, + ElementCompute>; + typename EpilogueOp::Params epilogue{alpha, beta, gamma}; + DISPATCH_KERNEL; + } + case NonlineMode::RELU: { + using EpilogueOp = cutlass::epilogue::thread:: + BiasAddLinearCombinationReluClamp< + ElementOutput, 8, ElementAccumulator, ElementBias, + ElementCompute>; + typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; + DISPATCH_KERNEL; + } + case NonlineMode::H_SWISH: { + using EpilogueOp = cutlass::epilogue::thread:: + BiasAddLinearCombinationHSwishClamp< + ElementOutput, 8, ElementAccumulator, ElementBias, + ElementCompute>; + typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; + DISPATCH_KERNEL; + } + default: + megdnn_assert(false, + "unsupported nonlinear mode for conv bias operator"); + } +#undef DISPATCH_KERNEL_WITH_TILE_SHAPE +#undef DISPATCH_KERNEL +} +#endif + +#define INST(need_load_from_const_mem) \ + template void megdnn::cuda::cutlass_wrapper:: \ + do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32< \ + need_load_from_const_mem>( \ + const int8_t* d_src, const int8_t* d_filter, \ + const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, \ + int* workspace, const convolution::ConvParam& param, \ + uint32_t nonlinear_mode, float alpha, float beta, \ + float gamma, float scale, \ + const GemmCoord& threadblock_shape, \ + const GemmCoord& warp_shape, cudaStream_t stream); +INST(true); +INST(false); +#undef INST + +// vim: syntax=cuda.doxygen diff --git a/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cuh b/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cuh new file mode 100644 index 0000000000000000000000000000000000000000..02481004a8ed2c374fd7cb10cdd3e0d8c3bf8bc2 --- /dev/null +++ b/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cuh @@ -0,0 +1,44 @@ +/** + * \file dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cuh + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. + */ +#pragma once +#include "cutlass/gemm/gemm.h" +#include "src/cuda/convolution_helper/parameter.cuh" +#include "src/cuda/utils.cuh" + +namespace megdnn { +namespace cuda { +namespace cutlass_wrapper { + +using GemmCoord = cutlass::gemm::GemmCoord; + +template +void cutlass_convolution_wrapper( + const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, + const int8_t* d_z, int8_t* d_dst, int* workspace, + typename Convolution::ConvolutionParameter const& conv_param, + typename Convolution::EpilogueOutputOp::Params const& epilogue, + cudaStream_t stream); + +template +void do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32( + const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, + const int8_t* d_z, int8_t* d_dst, int* workspace, + const convolution::ConvParam& param, uint32_t nonlinear_mode, + float alpha, float beta, float gamma, float scale, + const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, + cudaStream_t stream); + +} // namespace cutlass_wrapper +} // namespace cuda +} // namespace megdnn + +// vim: syntax=cuda.doxygen diff --git a/dnn/src/cuda/conv_bias/implicit_gemm_int8_nchw32_imma.cpp b/dnn/src/cuda/conv_bias/implicit_gemm_int8_nchw32_imma.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c350ffaaef67845219148fae0013b50e2840bba4 --- /dev/null +++ b/dnn/src/cuda/conv_bias/implicit_gemm_int8_nchw32_imma.cpp @@ -0,0 +1,188 @@ +/** + * \file dnn/src/cuda/conv_bias/implicit_gemm_int8_nchw32_imma.cpp + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. + */ + +#include "./algo.h" +#include "src/cuda/conv_bias/cutlass_convolution_wrapper.cuh" +#include "src/cuda/convolution_helper/parameter.cuh" +#include "src/cuda/utils.h" + +using namespace megdnn; +using namespace cuda; +using namespace convolution; + +#if CUDA_VERSION >= 10020 +bool ConvBiasForwardImpl::AlgoInt8NCHW32IMMAImplicitGemm::is_available( + const SizeArgs& args) const { + if (args.bias_layout->ndim <= 0) + return false; + + using Param = param::ConvBias; + using Format = Param::Format; + using Sparse = Param::Sparse; + using Mode = Param::Mode; + bool available = true; + auto&& param = args.opr->param(); + auto&& fm = args.filter_meta; + if (!conv_bias::check_bias_share_in_channel(*(args.bias_layout), + param.format)) + return false; + if (param.format != Format::NCHW32) + return false; + UNPACK_CONV_BIAS_NCHW32_PARAM(*(args.src_layout), fm, *(args.dst_layout), + param); + // TODO support group conv + available &= param.sparse == Sparse::DENSE; + // mode must be cross correlation + available &= param.mode == Mode::CROSS_CORRELATION; + // check data type + auto src_dtype = args.src_layout->dtype, + filter_dtype = args.filter_layout->dtype, + bias_dtype = args.bias_layout->dtype, + dst_dtype = args.dst_layout->dtype; + available &= (src_dtype.enumv() == DTypeEnum::QuantizedS8 && + filter_dtype.enumv() == DTypeEnum::QuantizedS8 && + bias_dtype.enumv() == DTypeEnum::QuantizedS32 && + dst_dtype.enumv() == DTypeEnum::QuantizedS8); + // TODO: support dialtion + available &= dh == 1 && dw == 1; + // only support sm_75 or later, platform should have tensorcore int8 + // support + available &= is_compute_capability_required(7, 5); + if (fh == 1 && fw == 1) + return available; + // for non 1x1 convolution, we have to check constant memory size + auto&& device_prop = current_device_prop(); + // const mem size >= 64K + available &= device_prop.totalConstMem >= 65536; + size_t const_mem_usage = get_workspace_in_bytes(args) - + args.filter_layout->span().dist_byte(); + available &= const_mem_usage <= device_prop.totalConstMem; + return available; +} + +WorkspaceBundle +ConvBiasForwardImpl::AlgoInt8NCHW32IMMAImplicitGemm::get_workspace_bundle( + dt_byte* raw_ptr, const SizeArgs& args) const { + size_t ci = args.filter_layout->operator[](1) * 32; + size_t fh = args.filter_layout->operator[](2); + size_t fw = args.filter_layout->operator[](3); + size_t ws_filter = args.filter_layout->span().dist_byte(); + if (fh == 1 && fw == 1) { + return WorkspaceBundle{raw_ptr, {ws_filter}}; + } + size_t ws_size = (ci / 32) * fh * fw * sizeof(int32_t) * 2; + return WorkspaceBundle{raw_ptr, {ws_filter, ws_size}}; +} + +size_t +ConvBiasForwardImpl::AlgoInt8NCHW32IMMAImplicitGemm::get_workspace_in_bytes( + const SizeArgs& args) const { + return get_workspace_bundle(nullptr, args).total_size_in_bytes(); +} + +void ConvBiasForwardImpl::AlgoInt8NCHW32IMMAImplicitGemm::exec( + const ExecArgs& args) const { + using Format = Param::Format; + auto&& param = args.opr->param(); + auto&& fm = args.filter_meta; + UNPACK_CONV_BIAS_NCHW32_PARAM(*(args.src_layout), fm, *(args.dst_layout), + param); + auto ws = get_workspace_bundle(args.workspace.raw_ptr, args); + auto ws_filter = ws.get(0); + auto&& stream = cuda_stream(args.opr->handle()); + + // reformat filter from nchw32 to chwn32 + { + TensorLayout src{{co, ci / 32, fh, fw, 32}, dtype::Int8()}; + src.init_contiguous_stride(); + TensorLayout dst = src; + dst.stride[0] = 32; + dst.stride[1] = co * fh * fw * 32; + dst.stride[2] = co * fw * 32; + dst.stride[3] = co * 32; + dst.stride[4] = 1; + TensorND ts_src, ts_dst; + ts_src.raw_ptr = args.filter_tensor->raw_ptr; + ts_src.layout = src; + ts_dst.raw_ptr = ws_filter; + ts_dst.layout = dst; + auto&& transpose = + args.opr->handle()->create_operator(); + transpose->exec(ts_src, ts_dst); + } + + ConvParam kern_param; + kern_param.n = n, kern_param.co = co, kern_param.ci = ci, + kern_param.hi = hi, kern_param.wi = wi, kern_param.ho = ho, + kern_param.wo = wo, kern_param.ph = ph, kern_param.pw = pw, + kern_param.sh = sh, kern_param.sw = sw, kern_param.fh = fh, + kern_param.fw = fw; + + float src_scale = args.src_layout->dtype.param().scale, + filter_scale = + args.filter_layout->dtype.param().scale, + bias_scale = + args.bias_layout->dtype.param().scale, + dst_scale = args.dst_layout->dtype.param().scale; + float alpha = src_scale * filter_scale / dst_scale, + beta = bias_scale / dst_scale; + int8_t* z_dev_ptr = nullptr; + float gamma = 0.0; + if (args.z_layout->ndim > 0) { + z_dev_ptr = args.z_tensor->compatible_ptr(); + float z_scale = args.z_layout->dtype.param().scale; + gamma = z_scale / dst_scale; + } + uint32_t nonlinear_mode = static_cast(param.nonlineMode); + if (fh == 1 && fw == 1) { + cutlass_wrapper::do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32< + false>(args.src_tensor->compatible_ptr(), + reinterpret_cast(ws_filter), + args.bias_tensor->compatible_ptr(), z_dev_ptr, + args.dst_tensor->compatible_ptr(), + nullptr, kern_param, nonlinear_mode, + alpha, beta, gamma, dst_scale, + cutlass_wrapper::GemmCoord{m_algo_param.threadblock_m, + m_algo_param.threadblock_n, + m_algo_param.threadblock_k}, + cutlass_wrapper::GemmCoord{m_algo_param.warp_m, + m_algo_param.warp_n, + m_algo_param.warp_k}, + stream); + } else { + auto workspace = ws.get(1); + cutlass_wrapper::do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32( + args.src_tensor->compatible_ptr(), + reinterpret_cast(ws_filter), + args.bias_tensor->compatible_ptr(), z_dev_ptr, + args.dst_tensor->compatible_ptr(), + reinterpret_cast(workspace), kern_param, nonlinear_mode, + alpha, beta, gamma, dst_scale, + cutlass_wrapper::GemmCoord{m_algo_param.threadblock_m, + m_algo_param.threadblock_n, + m_algo_param.threadblock_k}, + cutlass_wrapper::GemmCoord{m_algo_param.warp_m, + m_algo_param.warp_n, + m_algo_param.warp_k}, + stream); + } +} + +std::string ConvBiasForwardImpl::AlgoInt8NCHW32IMMAImplicitGemm::to_string( + AlgoParam algo_param) { + return ssprintf("%uX%uX%u_%uX%uX%u", algo_param.threadblock_m, + algo_param.threadblock_n, algo_param.threadblock_k, + algo_param.warp_m, algo_param.warp_n, algo_param.warp_k); +} +#endif + +// vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/conv_bias/int8_imma/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32.cuinl b/dnn/src/cuda/conv_bias/int8_imma/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32.cuinl new file mode 100644 index 0000000000000000000000000000000000000000..fdde30c39b8a595de1e96a9b082e63bdc0e1bad2 --- /dev/null +++ b/dnn/src/cuda/conv_bias/int8_imma/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32.cuinl @@ -0,0 +1,57 @@ +/** + * \file + * dnn/src/cuda/conv_bias/int8_imma/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32.cuinl + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. + */ +#include "cutlass/convolution/device/convolution.h" +#include "src/cuda/conv_bias/cutlass_convolution_wrapper.cuh" + +using namespace megdnn; +using namespace cuda; +using namespace cutlass_wrapper; + +template +void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper( + const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, + const int8_t* d_z, int8_t* d_dst, int* workspace, + typename Convolution::ConvolutionParameter const& conv_param, + typename Convolution::EpilogueOutputOp::Params const& epilogue, + cudaStream_t stream) { + typename Convolution::TensorRefSrc tensor_src{ + const_cast(d_src), + Convolution::LayoutSrc::packed({conv_param.n(), conv_param.hi(), + conv_param.wi(), conv_param.ci()})}; + typename Convolution::TensorRefFilter tensor_filter{ + const_cast(d_filter), + Convolution::LayoutFilter::packed({conv_param.co(), conv_param.fh(), + conv_param.fw(), + conv_param.ci()})}; + typename Convolution::TensorRefBias tensor_bias{ + const_cast(d_bias), + Convolution::LayoutBias::packed({1, 1, 1, conv_param.co()})}; + typename Convolution::TensorRefDst tensor_z{ + const_cast(d_z), + Convolution::LayoutDst::packed({conv_param.n(), conv_param.ho(), + conv_param.wo(), conv_param.co()})}; + typename Convolution::TensorRefDst tensor_dst{ + d_dst, + Convolution::LayoutDst::packed({conv_param.n(), conv_param.ho(), + conv_param.wo(), conv_param.co()})}; + typename Convolution::Arguments arguments{ + conv_param, tensor_src, tensor_filter, + tensor_bias, tensor_z, tensor_dst.non_const_ref(), + epilogue}; + Convolution conv_op; + cutlass_check(conv_op.initialize(arguments, workspace)); + cutlass_check(conv_op(stream)); + after_kernel_launch(); +} + +// vim: syntax=cuda.doxygen diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x128x64_64x64x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x128x64_64x64x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..73c7b13a27abb8c07de223b6294cbbaaac70fc04 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x128x64_64x64x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x128x64_64x64x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x128x64_64x64x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..0d193b6935fafd8e03600c125a819d9ce1dc16da Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x128x64_64x64x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x128x64_64x64x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x128x64_64x64x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..8b7fffa483af20cdc1e54b3773ca0490e278eb90 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x128x64_64x64x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x256x64_64x64x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x256x64_64x64x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..f8eec15f48d8739088d8c695bd0e48a0edbe0ebc Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x256x64_64x64x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x256x64_64x64x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x256x64_64x64x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..6295113c4d6a3c992786759fc5ad1e16f79a53ee Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x256x64_64x64x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x256x64_64x64x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x256x64_64x64x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..09309517424c75fb1cea595fe9fb08816c3712ad Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x256x64_64x64x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x64x64_64x32x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x64x64_64x32x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..88314309ea4014a7ec433179b4d41ee3fdd1fc26 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x64x64_64x32x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x64x64_64x32x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x64x64_64x32x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..04802d4b6e8a914c98284075ad28f93bb19bebf3 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x64x64_64x32x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x64x64_64x32x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x64x64_64x32x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..45261b6a28a2804cf512a72838d67e7eb2b9317c Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_128x64x64_64x32x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x128x64_64x64x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x128x64_64x64x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..9d39f7d39503912a3a30e12ce4cda9906826725c Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x128x64_64x64x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x128x64_64x64x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x128x64_64x64x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..fc336fd7a6a987304520d8e7b3fbba5b6170606a Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x128x64_64x64x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x128x64_64x64x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x128x64_64x64x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..108fcb4114ef0ee0e1ca7e7010336eba6f9f24c8 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x128x64_64x64x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x256x64_64x64x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x256x64_64x64x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..17f1e4377341b46cc17ce5a4082b53d19f067d53 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x256x64_64x64x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x256x64_64x64x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x256x64_64x64x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..94bc3cfb8e3fc7122ff50bba4b302c49cbef20df Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x256x64_64x64x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x256x64_64x64x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x256x64_64x64x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..00f9d4635fbefe4f8e79e6a67d9690c23aac98e8 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x256x64_64x64x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x64x64_64x32x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x64x64_64x32x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..b19470b7327d173115838e7dd3eb46c9bceb704b Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x64x64_64x32x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x64x64_64x32x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x64x64_64x32x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..309cb6e0c7db4f2b78b1939deffc0f0be7c47ad3 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x64x64_64x32x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x64x64_64x32x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x64x64_64x32x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..627d40249dc3a1f3e87a8d11ac876640037c377d Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_128x64x64_64x32x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_256x128x64_64x64x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_256x128x64_64x64x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..079f5160d35ced08e0fcb46797ee41f73fbd38a3 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_256x128x64_64x64x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_256x128x64_64x64x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_256x128x64_64x64x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..bc58a2eaa0536aa291ff53a73637216a22a9ac86 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_256x128x64_64x64x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_256x128x64_64x64x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_256x128x64_64x64x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..e7e97dabf6bffc93c8b75758b052b5b921e4ec13 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_256x128x64_64x64x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..191c8dc08da9af2aad9811ab0c4a01f9e6181028 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..44d6e8a16afc9bb7a57fd562647e52fd3db81797 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..01c928f0fd69353dfdc647c40a240ce2749f47ea Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x128x64_32x64x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x128x64_32x64x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..e5ba9d050236f3b25ba56dd563c9f9691a498957 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x128x64_32x64x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x128x64_32x64x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x128x64_32x64x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..debf0149cd5124e5f0178a06663e07567c84ece4 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x128x64_32x64x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x128x64_32x64x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x128x64_32x64x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..92ff2ce412cf396bb4ee06889742b13fbcae7ef4 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x128x64_32x64x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x64x64_32x32x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x64x64_32x32x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..843d8d3885a934786f04513444e1258c98d91e00 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x64x64_32x32x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x64x64_32x32x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x64x64_32x32x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..9c7aebc25cacca00b3edc74cb9001e96a9558095 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x64x64_32x32x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x64x64_32x32x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x64x64_32x32x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..cd22a31e1579b0051392bb296d1d097f79686caa Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_64x64x64_32x32x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_256x128x64_64x64x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_256x128x64_64x64x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..522e568f49818ed23742fb4981729c10ab26fae2 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_256x128x64_64x64x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_256x128x64_64x64x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_256x128x64_64x64x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..fbb2dc1cda97a5507d23554b5aec2220f7eb9b25 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_256x128x64_64x64x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_256x128x64_64x64x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_256x128x64_64x64x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..a17bafd4aeaa6d348e3210789562a38aa0849b5c Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_256x128x64_64x64x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_32x64x64_32x16x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_32x64x64_32x16x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..adc33220084b8a8b67b833425e6f0252afacae1c Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_32x64x64_32x16x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_32x64x64_32x16x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_32x64x64_32x16x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..433becc0c0a401e8b764eb6e8db8797aabc1c38c Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_32x64x64_32x16x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_32x64x64_32x16x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_32x64x64_32x16x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..752b2a5c369a1bdb173b0247806cb70c496fd7b6 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_32x64x64_32x16x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x128x64_32x64x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x128x64_32x64x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..671eb7115f593d0bad6653b3e216e14bcd241370 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x128x64_32x64x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x128x64_32x64x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x128x64_32x64x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..e62e4d4f14aa4799076fc5c97ba98277bd100211 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x128x64_32x64x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x128x64_32x64x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x128x64_32x64x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..74decadf9f42d1ec5b52e651bebb3b759dd4e902 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x128x64_32x64x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x64x64_32x32x64_hswish.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x64x64_32x32x64_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..74003cdde1cd183ce73d645f8b4eb57155f17f4e Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x64x64_32x32x64_hswish.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x64x64_32x32x64_id.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x64x64_32x32x64_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..30530fc0de8e7200fb1eb52f17c18b0d17a7c4a6 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x64x64_32x32x64_id.cu differ diff --git a/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x64x64_32x32x64_relu.cu b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x64x64_32x32x64_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..60ece24b2476564520fbb998e586cee56cdeae63 Binary files /dev/null and b/dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_64x64x64_32x32x64_relu.cu differ diff --git a/dnn/src/cuda/conv_bias/opr_impl.h b/dnn/src/cuda/conv_bias/opr_impl.h index 12260b9eef90527f4d5b8f8098dbe76748d817e1..ca07b8f37452d5504b405d4a4703a47096d8625d 100644 --- a/dnn/src/cuda/conv_bias/opr_impl.h +++ b/dnn/src/cuda/conv_bias/opr_impl.h @@ -80,6 +80,7 @@ public: class AlgoInt8NCHW4IMMAImplicitGemm; class AlgoInt8CHWN4IMMAImplicitGemmReorderFilter; class AlgoInt8CHWN4IMMAImplicitGemmUnrollWidth; + class AlgoInt8NCHW32IMMAImplicitGemm; class AlgoBFloat16; class AlgoPack; diff --git a/dnn/src/cuda/utils.cpp b/dnn/src/cuda/utils.cpp index 97a17223b8497dd43ce73698196973940eaa41e5..be8175d19711e42fae92d1115b645d78b69aa004 100644 --- a/dnn/src/cuda/utils.cpp +++ b/dnn/src/cuda/utils.cpp @@ -56,7 +56,6 @@ const char *cublasGetErrorString(cublasStatus_t error) { } return "Unknown CUBLAS error"; } - } // anonymous namespace void cuda::__throw_cuda_error__(cudaError_t err, const char *msg) { @@ -87,6 +86,12 @@ void cuda::__throw_cuda_driver_error__(CUresult err, const char* msg) { megdnn_throw(s.c_str()); } +void cuda::__throw_cutlass_error__(cutlass::Status err, const char* msg) { + auto s = ssprintf("cutlass error %s(%d) occurred; expr: %s", + cutlass::cutlassGetStatusString(err), int(err), msg); + megdnn_throw(s.c_str()); +} + void cuda::report_error(const char *msg) { megdnn_throw(msg); MEGDNN_MARK_USED_VAR(msg); diff --git a/dnn/src/cuda/utils.cuh b/dnn/src/cuda/utils.cuh index 89354b6b7d6782a898c9019131b80b8f90bbc6a4..2617fd12e4d69e0eacddb9ded9548ac87d8179e7 100644 --- a/dnn/src/cuda/utils.cuh +++ b/dnn/src/cuda/utils.cuh @@ -20,6 +20,7 @@ #include #include "cuda.h" #include "src/cuda/cudnn_with_check.h" +#include "cutlass/cutlass.h" #define cuda_check(_x) \ do { \ @@ -61,6 +62,14 @@ } \ } while (0) +#define cutlass_check(_x) \ + do { \ + cutlass::Status _err = (_x); \ + if (_err != cutlass::Status::kSuccess) { \ + ::megdnn::cuda::__throw_cutlass_error__(_err, #_x); \ + } \ + } while (0) + #define after_kernel_launch() \ do { \ cuda_check(cudaGetLastError()); \ @@ -93,6 +102,8 @@ MEGDNN_NORETURN void __throw_cublas_error__(cublasStatus_t err, MEGDNN_NORETURN void __throw_cusolver_error__(cusolverStatus_t err, const char* msg); MEGDNN_NORETURN void __throw_cuda_driver_error__(CUresult err, const char* msg); +MEGDNN_NORETURN void __throw_cutlass_error__(cutlass::Status status, + const char* msg); MEGDNN_NORETURN void report_error(const char* msg); template diff --git a/dnn/test/CMakeLists.txt b/dnn/test/CMakeLists.txt index 823aa88714fd8d85754aff4ba57ff96fef26854b..e0e8499affba7df7922027c18b1f895fc7a03976 100644 --- a/dnn/test/CMakeLists.txt +++ b/dnn/test/CMakeLists.txt @@ -32,6 +32,10 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing") target_link_libraries(megdnn_test gtest) target_link_libraries(megdnn_test megdnn ${MGE_BLAS_LIBS}) +if (MGE_WITH_CUDA) + target_link_libraries(megdnn_test cutlass) +endif() + target_include_directories(megdnn_test PRIVATE ${PROJECT_SOURCE_DIR}/third_party/midout/src diff --git a/dnn/test/common/benchmarker.h b/dnn/test/common/benchmarker.h index cedeb6a2dd40d7defe279d03c02567363d8fcd4c..cfd9f6e5e2d2f8fc95556f2f10e3815fe739ac0e 100644 --- a/dnn/test/common/benchmarker.h +++ b/dnn/test/common/benchmarker.h @@ -254,8 +254,8 @@ public: }; ////////////////// Algo Benchmark //////////////////////// -template > -float algo_benchmark(Benchmarker& benchmark, TensorLayoutArray layouts, +template , typename T = Timer> +float algo_benchmark(Benchmarker& benchmark, TensorLayoutArray layouts, const std::string& algo_base) { Proxy proxy; auto opr = benchmark.opr(); @@ -279,8 +279,8 @@ float algo_benchmark(Benchmarker& benchmark, TensorLayoutArray layouts, return min_used; } -template > -float algo_benchmark(Benchmarker& benchmark, TensorShapeArray shapes, +template , typename T = Timer> +float algo_benchmark(Benchmarker& benchmark, TensorShapeArray shapes, const std::string& algo_base) { return algo_benchmark(benchmark, benchmark.make_layouts(shapes), algo_base); } diff --git a/dnn/test/cuda/conv_bias_int8.cpp b/dnn/test/cuda/conv_bias_int8.cpp index c81fd7e28fc7d27dd34217c0735f2a1c427fa417..cbc475e236d85d72679f67e5287fd01d3ff68e67 100644 --- a/dnn/test/cuda/conv_bias_int8.cpp +++ b/dnn/test/cuda/conv_bias_int8.cpp @@ -18,6 +18,8 @@ #include "test/cuda/fixture.h" #include "test/cuda/utils.h" + +#define MEGDNN_WITH_BENCHMARK 1 #define V1(x) #x #define V(x) V1(x) @@ -107,11 +109,6 @@ void benchmark_target_algo( benchmarker.set_display(false).set_times(RUNS); benchmarker_cudnn.set_display(false).set_times(RUNS); - if (algo) { - benchmarker.set_before_exec_callback( - conv_bias::ConvBiasAlgoChecker(algo)); - } - #define CUDNN_VERSION_STRING \ "v" V(CUDNN_MAJOR) "." V(CUDNN_MINOR) "." V(CUDNN_PATCHLEVEL) benchmarker_cudnn.set_before_exec_callback( @@ -133,168 +130,117 @@ void benchmark_target_algo( using Param = ConvBias::Param; using Format = Param::Format; - if (format == Format::NCHW4) { - for (auto&& arg : args) { - Param param; - param.pad_h = param.pad_w = arg.f / 2; - param.stride_h = param.stride_w = arg.s; - param.format = Format::NCHW4; - - size_t ho = infer_conv_shape(arg.hi, arg.f, arg.s, arg.f / 2); - size_t wo = infer_conv_shape(arg.wi, arg.f, arg.s, arg.f / 2); - - benchmarker.set_param(param); - auto time_in_ms = - benchmarker.execs({{arg.n, arg.ci / 4, arg.hi, arg.wi, 4}, - {arg.co, arg.ci / 4, arg.f, arg.f, 4}, - {1, arg.co / 4, 1, 1, 4}, - {}, - {}}) / - RUNS; - param.nonlineMode = Param::NonlineMode::IDENTITY; - benchmarker_cudnn.set_param(param); - auto time_in_ms_cudnn = - benchmarker_cudnn.execs( - {{arg.n, arg.ci / 4, arg.hi, arg.wi, 4}, - {arg.co, arg.ci / 4, arg.f, arg.f, 4}, - {1, arg.co / 4, 1, 1, 4}, - {}, - {}}) / - RUNS; - float flo = 2.0 * arg.n * arg.co * ho * wo * arg.ci * arg.f * - arg.f / (1e12); - TensorShape src{arg.n, arg.ci, arg.hi, arg.wi}, - filter{arg.co, arg.ci, arg.f, arg.f}; - printf("src=%s, filter=%s, time(algo=%s)=%.2f %.2fTops, " - "time(cudnn)=%.2f %.2fTops, " - "perf(algo=%s)/perf(cudnn)=%.2f\n", - src.to_string().c_str(), filter.to_string().c_str(), algo, - time_in_ms, (flo / (time_in_ms * 1e-3)), time_in_ms_cudnn, - (flo / (time_in_ms_cudnn * 1e-3)), algo, - time_in_ms_cudnn / time_in_ms); + // helper function to change format + auto get_tensor_shape = [](TensorShape shape, + Format format) -> TensorShape { + TensorShape ret; + if (format == Format::NCHW4) { + ret = static_cast( + TensorLayout{shape, dtype::Int8()} + .reshape({shape[0], shape[1] / 4, 4, shape[2], + shape[3]}) + .dimshuffle({0, 1, 3, 4, 2})); + } else if (format == Format::CHWN4) { + ret = static_cast( + TensorLayout{shape, dtype::Int8()} + .reshape({shape[0], shape[1] / 4, 4, shape[2], + shape[3]}) + .dimshuffle({1, 3, 4, 0, 2})); } - printf("bench with z tensor\n"); - for (auto&& arg : args) { - Param param; - param.pad_h = param.pad_w = arg.f / 2; - param.stride_h = param.stride_w = arg.s; - param.format = Format::NCHW4; - - size_t ho = infer_conv_shape(arg.hi, arg.f, arg.s, arg.f / 2); - size_t wo = infer_conv_shape(arg.wi, arg.f, arg.s, arg.f / 2); - - benchmarker.set_param(param); - auto time_in_ms = - benchmarker.execs({{arg.n, arg.ci / 4, arg.hi, arg.wi, 4}, - {arg.co, arg.ci / 4, arg.f, arg.f, 4}, - {1, arg.co / 4, 1, 1, 4}, - {arg.n, arg.co / 4, ho, wo, 4}, - {}}) / - RUNS; - param.format = Format::NCHW4; - param.nonlineMode = Param::NonlineMode::IDENTITY; - benchmarker_cudnn.set_param(param); - auto time_in_ms_cudnn = - benchmarker_cudnn.execs( - {{arg.n, arg.ci / 4, arg.hi, arg.wi, 4}, - {arg.co, arg.ci / 4, arg.f, arg.f, 4}, - {1, arg.co / 4, 1, 1, 4}, - {arg.n, arg.co / 4, ho, wo, 4}, - {}}) / - RUNS; - float flo = 2.0 * arg.n * arg.co * ho * wo * arg.ci * arg.f * - arg.f / (1e12); - TensorShape src{arg.n, arg.ci, arg.hi, arg.wi}, - filter{arg.co, arg.ci, arg.f, arg.f}; - printf("src=%s, filter=%s, time(algo=%s)=%.2f %.2fTops, " - "time(cudnn)=%.2f %.2fTops, " - "perf(algo=%s)/perf(cudnn)=%.2f\n", - src.to_string().c_str(), filter.to_string().c_str(), algo, - time_in_ms, (flo / (time_in_ms * 1e-3)), time_in_ms_cudnn, - (flo / (time_in_ms_cudnn * 1e-3)), algo, - time_in_ms_cudnn / time_in_ms); + return ret; + }; + + for (auto&& arg : args) { + Param param; + param.pad_h = param.pad_w = arg.f / 2; + param.stride_h = param.stride_w = arg.s; + param.format = format; + + size_t ho = infer_conv_shape(arg.hi, arg.f, arg.s, arg.f / 2); + size_t wo = infer_conv_shape(arg.wi, arg.f, arg.s, arg.f / 2); + + benchmarker.set_param(param); + if (!algo) { + benchmarker.proxy()->target_algo = nullptr; } - } else if (format == Format::CHWN4) { - for (auto&& arg : args) { - Param param; - param.pad_h = param.pad_w = arg.f / 2; - param.stride_h = param.stride_w = arg.s; - param.format = Format::CHWN4; - - size_t ho = infer_conv_shape(arg.hi, arg.f, arg.s, arg.f / 2); - size_t wo = infer_conv_shape(arg.wi, arg.f, arg.s, arg.f / 2); - - benchmarker.set_param(param); - auto time_in_ms = - benchmarker.execs({{arg.ci / 4, arg.hi, arg.wi, arg.n, 4}, - {arg.ci / 4, arg.f, arg.f, arg.co, 4}, - {arg.co / 4, 1, 1, 1, 4}, - {}, - {}}) / - RUNS; - param.format = Format::NCHW4; - benchmarker_cudnn.set_param(param); - auto time_in_ms_cudnn = - benchmarker_cudnn.execs( - {{arg.n, arg.ci / 4, arg.hi, arg.wi, 4}, - {arg.co, arg.ci / 4, arg.f, arg.f, 4}, - {1, arg.co / 4, 1, 1, 4}, - {}, - {}}) / + TensorShape src{arg.n, arg.ci, arg.hi, arg.wi}, + filter{arg.co, arg.ci, arg.f, arg.f}, bias{1, arg.co, 1, 1}, + z{arg.n, arg.co, ho, wo}, dst = z; + float time_in_ms = 0.f; + if (algo) { + time_in_ms = + algo_benchmark, + CUTimer>(benchmarker, + {get_tensor_shape(src, format), + get_tensor_shape(filter, format), + get_tensor_shape(bias, format), + {}, + {}}, + algo) / RUNS; - float flo = 2.0 * arg.n * arg.co * ho * wo * arg.ci * arg.f * - arg.f / (1e12); - TensorShape src{arg.n, arg.ci, arg.hi, arg.wi}, - filter{arg.co, arg.ci, arg.f, arg.f}; - printf("src=%s, filter=%s, time(algo=%s)=%.2f %.2fTops, " - "time(cudnn)=%.2f %.2fTops, " - "perf(algo=%s)/perf(cudnn)=%.2f\n", - src.to_string().c_str(), filter.to_string().c_str(), algo, - time_in_ms, (flo / (time_in_ms * 1e-3)), time_in_ms_cudnn, - (flo / (time_in_ms_cudnn * 1e-3)), algo, - time_in_ms_cudnn / time_in_ms); + } else { + time_in_ms = benchmarker.execs({get_tensor_shape(src, format), + get_tensor_shape(filter, format), + get_tensor_shape(bias, format), + {}, + {}}) / + RUNS; } + Format format_cudnn = Format::NCHW4; + param.format = format_cudnn; + benchmarker_cudnn.set_param(param); + auto time_in_ms_cudnn = + benchmarker_cudnn.execs({get_tensor_shape(src, format_cudnn), + get_tensor_shape(filter, format_cudnn), + get_tensor_shape(bias, format_cudnn), + {}, + {}}) / + RUNS; + float flo = 2.0 * arg.n * arg.co * ho * wo * arg.ci * arg.f * arg.f / + (1e12); + printf("src=%s, filter=%s, dst=%s, time(algo=%s)=%.2f %.2fTops, " + "time(cudnn)=%.2f %.2fTops, " + "perf(algo=%s)/perf(cudnn)=%.2f\n", + src.to_string().c_str(), filter.to_string().c_str(), + dst.to_string().c_str(), algo, time_in_ms, + (flo / (time_in_ms * 1e-3)), time_in_ms_cudnn, + (flo / (time_in_ms_cudnn * 1e-3)), algo, + time_in_ms_cudnn / time_in_ms); printf("bench with z tensor\n"); - for (auto&& arg : args) { - Param param; - param.pad_h = param.pad_w = arg.f / 2; - param.stride_h = param.stride_w = arg.s; - param.format = Format::CHWN4; - - size_t ho = infer_conv_shape(arg.hi, arg.f, arg.s, arg.f / 2); - size_t wo = infer_conv_shape(arg.wi, arg.f, arg.s, arg.f / 2); - - benchmarker.set_param(param); - auto time_in_ms = - benchmarker.execs({{arg.ci / 4, arg.hi, arg.wi, arg.n, 4}, - {arg.ci / 4, arg.f, arg.f, arg.co, 4}, - {arg.co / 4, 1, 1, 1, 4}, - {arg.co / 4, ho, wo, arg.n, 4}, - {}}) / + if (algo) { + time_in_ms = + algo_benchmark, + CUTimer>(benchmarker, + {get_tensor_shape(src, format), + get_tensor_shape(filter, format), + get_tensor_shape(bias, format), + get_tensor_shape(z, format), + {}}, + algo) / RUNS; - param.format = Format::NCHW4; - benchmarker_cudnn.set_param(param); - param.nonlineMode = Param::NonlineMode::IDENTITY; - auto time_in_ms_cudnn = - benchmarker_cudnn.execs( - {{arg.n, arg.ci / 4, arg.hi, arg.wi, 4}, - {arg.co, arg.ci / 4, arg.f, arg.f, 4}, - {1, arg.co / 4, 1, 1, 4}, - {arg.n, arg.co / 4, ho, wo, 4}, - {}}) / - RUNS; - float flo = 2.0 * arg.n * arg.co * ho * wo * arg.ci * arg.f * - arg.f / (1e12); - TensorShape src{arg.n, arg.ci, arg.hi, arg.wi}, - filter{arg.co, arg.ci, arg.f, arg.f}; - printf("src=%s, filter=%s, time(algo=%s)=%.2f %.2fTops, " - "time(cudnn)=%.2f %.2fTops, " - "perf(algo=%s)/perf(cudnn)=%.2f\n", - src.to_string().c_str(), filter.to_string().c_str(), algo, - time_in_ms, (flo / (time_in_ms * 1e-3)), time_in_ms_cudnn, - (flo / (time_in_ms_cudnn * 1e-3)), algo, - time_in_ms_cudnn / time_in_ms); + } else { + time_in_ms = benchmarker.execs({get_tensor_shape(src, format), + get_tensor_shape(filter, format), + get_tensor_shape(bias, format), + get_tensor_shape(z, format), + {}}) / + RUNS; } + time_in_ms_cudnn = + benchmarker_cudnn.execs({get_tensor_shape(src, format_cudnn), + get_tensor_shape(filter, format_cudnn), + get_tensor_shape(bias, format_cudnn), + get_tensor_shape(z, format_cudnn), + {}}) / + RUNS; + printf("src=%s, filter=%s, dst=%s, time(algo=%s)=%.2f %.2fTops, " + "time(cudnn)=%.2f %.2fTops, " + "perf(algo=%s)/perf(cudnn)=%.2f\n", + src.to_string().c_str(), filter.to_string().c_str(), + dst.to_string().c_str(), algo, time_in_ms, + (flo / (time_in_ms * 1e-3)), time_in_ms_cudnn, + (flo / (time_in_ms_cudnn * 1e-3)), algo, + time_in_ms_cudnn / time_in_ms); } } @@ -313,10 +259,7 @@ void benchmark_target_algo_with_cudnn_tsc( std::unique_ptr> proxy{ new OprProxy{true}}; - if (algo) { - benchmarker.set_before_exec_callback( - conv_bias::ConvBiasAlgoChecker(algo)); - } else { + if (!algo) { benchmarker.set_proxy(proxy); } @@ -340,163 +283,132 @@ void benchmark_target_algo_with_cudnn_tsc( using Param = ConvBias::Param; using Format = Param::Format; - if (format == Format::NCHW4) { - for (auto&& arg : args) { - Param param; - param.pad_h = param.pad_w = arg.f / 2; - param.stride_h = param.stride_w = arg.s; - param.format = Format::NCHW4; - - size_t ho = infer_conv_shape(arg.hi, arg.f, arg.s, arg.f / 2); - size_t wo = infer_conv_shape(arg.wi, arg.f, arg.s, arg.f / 2); - - benchmarker.set_param(param); - if (!algo) { - benchmarker.proxy()->target_algo = nullptr; - } - auto time_in_ms = - benchmarker.execs({{arg.n, arg.ci / 4, arg.hi, arg.wi, 4}, - {arg.co, arg.ci / 4, arg.f, arg.f, 4}, - {1, arg.co / 4, 1, 1, 4}, - {}, - {}}) / - RUNS; - param.format = Format::NCHW32; - benchmarker_cudnn.set_param(param); - auto time_in_ms_cudnn = - benchmarker_cudnn.execs( - {{arg.n, arg.ci / 32, arg.hi, arg.wi, 32}, - {arg.co, arg.ci / 32, arg.f, arg.f, 32}, - {1, arg.co / 32, 1, 1, 32}, - {}, - {}}) / - RUNS; - float flo = 2.0 * arg.n * arg.co * ho * wo * arg.ci * arg.f * - arg.f / (1e12); - TensorShape src{arg.n, arg.ci, arg.hi, arg.wi}, - filter{arg.co, arg.ci, arg.f, arg.f}; - printf("src=%s, filter=%s, time(algo=%s)=%.2f %.2fTops, " - "time(cudnn)=%.2f %.2fTops, " - "perf(algo=%s)/perf(cudnn)=%.2f\n", - src.to_string().c_str(), filter.to_string().c_str(), algo, - time_in_ms, (flo / (time_in_ms * 1e-3)), time_in_ms_cudnn, - (flo / (time_in_ms_cudnn * 1e-3)), algo, - time_in_ms_cudnn / time_in_ms); + // helper function to change format + auto get_tensor_shape = [](TensorShape shape, + Format format) -> TensorShape { + TensorShape ret; + if (format == Format::NCHW4) { + ret = static_cast( + TensorLayout{shape, dtype::Int8()} + .reshape({shape[0], shape[1] / 4, 4, shape[2], + shape[3]}) + .dimshuffle({0, 1, 3, 4, 2})); + } else if (format == Format::NCHW32) { + ret = static_cast( + TensorLayout{shape, dtype::Int8()} + .reshape({shape[0], shape[1] / 32, 32, shape[2], + shape[3]}) + .dimshuffle({0, 1, 3, 4, 2})); + } else if (format == Format::CHWN4) { + ret = static_cast( + TensorLayout{shape, dtype::Int8()} + .reshape({shape[0], shape[1] / 4, 4, shape[2], + shape[3]}) + .dimshuffle({1, 3, 4, 0, 2})); } - } else if (format == Format::CHWN4) { - for (auto&& arg : args) { - Param param; - param.pad_h = param.pad_w = arg.f / 2; - param.stride_h = param.stride_w = arg.s; - param.format = Format::CHWN4; - - size_t ho = infer_conv_shape(arg.hi, arg.f, arg.s, arg.f / 2); - size_t wo = infer_conv_shape(arg.wi, arg.f, arg.s, arg.f / 2); - - benchmarker.set_param(param); - if (!algo) { - benchmarker.proxy()->target_algo = nullptr; - } - auto time_in_ms = - benchmarker.execs({{arg.ci / 4, arg.hi, arg.wi, arg.n, 4}, - {arg.ci / 4, arg.f, arg.f, arg.co, 4}, - {arg.co / 4, 1, 1, 1, 4}, - {}, - {}}) / + return ret; + }; + + for (auto&& arg : args) { + Param param; + param.pad_h = param.pad_w = arg.f / 2; + param.stride_h = param.stride_w = arg.s; + param.format = format; + + size_t ho = infer_conv_shape(arg.hi, arg.f, arg.s, arg.f / 2); + size_t wo = infer_conv_shape(arg.wi, arg.f, arg.s, arg.f / 2); + + benchmarker.set_param(param); + if (!algo) { + benchmarker.proxy()->target_algo = nullptr; + } + TensorShape src{arg.n, arg.ci, arg.hi, arg.wi}, + filter{arg.co, arg.ci, arg.f, arg.f}, bias{1, arg.co, 1, 1}, + z{arg.n, arg.co, ho, wo}, dst = z; + // skip testcase which cannot enable nchw32 tensorcore + if (format == Format::NCHW32 && (arg.co % 32 != 0 || arg.ci % 32 != 0)) + continue; + // skip testcase which cannot enable nchw4/chwn4 tensorcore + if ((format == Format::CHWN4 || format == Format::NCHW4) && + (arg.ci % 16 != 0)) + continue; + float time_in_ms = 0.f; + if (algo) { + time_in_ms = + algo_benchmark, + CUTimer>(benchmarker, + {get_tensor_shape(src, format), + get_tensor_shape(filter, format), + get_tensor_shape(bias, format), + {}, + {}}, + algo) / RUNS; - float time_in_ms_cudnn = 0.f; - if (arg.ci % 32 == 0 && arg.co % 32 == 0) { - param.format = Format::NCHW32; - benchmarker_cudnn.set_param(param); - time_in_ms_cudnn = - benchmarker_cudnn.execs( - {{arg.n, arg.ci / 32, arg.hi, arg.wi, 32}, - {arg.co, arg.ci / 32, arg.f, arg.f, 32}, - {1, arg.co / 32, 1, 1, 32}, - {}, - {}}) / - RUNS; - } else { - param.format = Format::NCHW4; - benchmarker_cudnn.set_param(param); - time_in_ms_cudnn = - benchmarker_cudnn.execs( - {{arg.n, arg.ci / 4, arg.hi, arg.wi, 4}, - {arg.co, arg.ci / 4, arg.f, arg.f, 4}, - {1, arg.co / 4, 1, 1, 4}, - {}, - {}}) / - RUNS; - } - float flo = 2.0 * arg.n * arg.co * ho * wo * arg.ci * arg.f * - arg.f / (1e12); - TensorShape src{arg.n, arg.ci, arg.hi, arg.wi}, - filter{arg.co, arg.ci, arg.f, arg.f}; - printf("src=%s, filter=%s, time(algo=%s)=%.2f %.2fTops, " - "time(cudnn)=%.2f %.2fTops, " - "perf(algo=%s)/perf(cudnn)=%.2f\n", - src.to_string().c_str(), filter.to_string().c_str(), algo, - time_in_ms, (flo / (time_in_ms * 1e-3)), time_in_ms_cudnn, - (flo / (time_in_ms_cudnn * 1e-3)), algo, - time_in_ms_cudnn / time_in_ms); + } else { + time_in_ms = benchmarker.execs({get_tensor_shape(src, format), + get_tensor_shape(filter, format), + get_tensor_shape(bias, format), + {}, + {}}) / + RUNS; } + Format format_cudnn = arg.ci % 32 == 0 && arg.co % 32 == 0 + ? Format::NCHW32 + : Format::NCHW4; + param.format = format_cudnn; + benchmarker_cudnn.set_param(param); + auto time_in_ms_cudnn = + benchmarker_cudnn.execs({get_tensor_shape(src, format_cudnn), + get_tensor_shape(filter, format_cudnn), + get_tensor_shape(bias, format_cudnn), + {}, + {}}) / + RUNS; + float flo = 2.0 * arg.n * arg.co * ho * wo * arg.ci * arg.f * arg.f / + (1e12); + printf("src=%s, filter=%s, dst=%s, time(algo=%s)=%.2f %.2fTops, " + "time(cudnn)=%.2f %.2fTops, " + "perf(algo=%s)/perf(cudnn)=%.2f\n", + src.to_string().c_str(), filter.to_string().c_str(), + dst.to_string().c_str(), algo, time_in_ms, + (flo / (time_in_ms * 1e-3)), time_in_ms_cudnn, + (flo / (time_in_ms_cudnn * 1e-3)), algo, + time_in_ms_cudnn / time_in_ms); printf("bench with z tensor\n"); - for (auto&& arg : args) { - Param param; - param.pad_h = param.pad_w = arg.f / 2; - param.stride_h = param.stride_w = arg.s; - param.format = Format::CHWN4; - - size_t ho = infer_conv_shape(arg.hi, arg.f, arg.s, arg.f / 2); - size_t wo = infer_conv_shape(arg.wi, arg.f, arg.s, arg.f / 2); - - benchmarker.set_param(param); - if (!algo) { - benchmarker.proxy()->target_algo = nullptr; - } - auto time_in_ms = - benchmarker.execs({{arg.ci / 4, arg.hi, arg.wi, arg.n, 4}, - {arg.ci / 4, arg.f, arg.f, arg.co, 4}, - {arg.co / 4, 1, 1, 1, 4}, - {arg.co / 4, ho, wo, arg.n, 4}, - {}}) / + if (algo) { + time_in_ms = + algo_benchmark, + CUTimer>(benchmarker, + {get_tensor_shape(src, format), + get_tensor_shape(filter, format), + get_tensor_shape(bias, format), + get_tensor_shape(z, format), + {}}, + algo) / RUNS; - float time_in_ms_cudnn = 0.f; - if (arg.ci % 32 == 0 && arg.co % 32 == 0) { - param.format = Format::NCHW32; - benchmarker_cudnn.set_param(param); - time_in_ms_cudnn = - benchmarker_cudnn.execs( - {{arg.n, arg.ci / 32, arg.hi, arg.wi, 32}, - {arg.co, arg.ci / 32, arg.f, arg.f, 32}, - {1, arg.co / 32, 1, 1, 32}, - {arg.n, arg.co / 32, ho, wo, 32}, - {}}) / - RUNS; - } else { - param.format = Format::NCHW4; - benchmarker_cudnn.set_param(param); - time_in_ms_cudnn = - benchmarker_cudnn.execs( - {{arg.n, arg.ci / 4, arg.hi, arg.wi, 4}, - {arg.co, arg.ci / 4, arg.f, arg.f, 4}, - {1, arg.co / 4, 1, 1, 4}, - {arg.n, arg.co / 4, ho, wo, 4}, - {}}) / - RUNS; - } - float flo = 2.0 * arg.n * arg.co * ho * wo * arg.ci * arg.f * - arg.f / (1e12); - TensorShape src{arg.n, arg.ci, arg.hi, arg.wi}, - filter{arg.co, arg.ci, arg.f, arg.f}; - printf("src=%s, filter=%s, time(algo=%s)=%.2f %.2fTops, " - "time(cudnn)=%.2f %.2fTops, " - "perf(algo=%s)/perf(cudnn)=%.2f\n", - src.to_string().c_str(), filter.to_string().c_str(), algo, - time_in_ms, (flo / (time_in_ms * 1e-3)), time_in_ms_cudnn, - (flo / (time_in_ms_cudnn * 1e-3)), algo, - time_in_ms_cudnn / time_in_ms); + } else { + time_in_ms = benchmarker.execs({get_tensor_shape(src, format), + get_tensor_shape(filter, format), + get_tensor_shape(bias, format), + get_tensor_shape(z, format), + {}}) / + RUNS; } + time_in_ms_cudnn = + benchmarker_cudnn.execs({get_tensor_shape(src, format_cudnn), + get_tensor_shape(filter, format_cudnn), + get_tensor_shape(bias, format_cudnn), + get_tensor_shape(z, format_cudnn), + {}}) / + RUNS; + printf("src=%s, filter=%s, dst=%s, time(algo=%s)=%.2f %.2fTops, " + "time(cudnn)=%.2f %.2fTops, " + "perf(algo=%s)/perf(cudnn)=%.2f\n", + src.to_string().c_str(), filter.to_string().c_str(), + dst.to_string().c_str(), algo, time_in_ms, + (flo / (time_in_ms * 1e-3)), time_in_ms_cudnn, + (flo / (time_in_ms_cudnn * 1e-3)), algo, + time_in_ms_cudnn / time_in_ms); } } #endif @@ -1166,6 +1078,77 @@ TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_1x1_ALGO_2) { } +#if CUDA_VERSION >= 10020 +/// \note: we only check several cases and block sizes in megdnn_test, the full +/// testcases are written in cutlass repository +TEST_F(CUDA, CUTLASS_CONV_BIAS_INT8_NCHW32_IMMA) { + require_compute_capability_eq(7, 5); + Checker checker(handle_cuda()); + auto check = [&checker](const std::string& algo) { + checker.set_before_exec_callback( + conv_bias::ConvBiasAlgoChecker(algo.c_str())); + UniformIntRNG rng{-8, 8}; + UniformIntRNG bias_rng{-50, 50}; + UniformIntRNG const_rng{1, 1}; + // use scale that are all integers to avoid rouding error + checker.set_rng(0, &rng) + .set_rng(1, &rng) + .set_rng(2, &bias_rng) + .set_rng(3, &rng) + .set_dtype(0, dtype::QuantizedS8{6.0f}) + .set_dtype(1, dtype::QuantizedS8{1.0f}) + .set_dtype(2, dtype::QuantizedS32{6.0f}) + .set_dtype(3, dtype::QuantizedS8{1.0f}) + .set_dtype(4, dtype::QuantizedS8{6.0f}) + .set_epsilon(1e-3); + param::ConvBias param; + param.pad_h = param.pad_w = 1; + param.stride_h = param.stride_w = 1; + param.format = param::ConvBias::Format::NCHW32; + checker.set_param(param).execs({{16, 16, 7, 7, 32}, + {512, 16, 3, 3, 32}, + {1, 16, 1, 1, 32}, + {}, + {}}); + param.nonlineMode = param::ConvBias::NonlineMode::RELU; + checker.set_param(param).execs({{16, 16, 7, 7, 32}, + {512, 16, 1, 1, 32}, + {1, 16, 1, 1, 32}, + {}, + {}}); + param.nonlineMode = param::ConvBias::NonlineMode::H_SWISH; + checker.set_param(param).execs({{16, 16, 7, 7, 32}, + {512, 16, 3, 3, 32}, + {1, 16, 1, 1, 32}, + {}, + {}}); + // use non integer scale + param.nonlineMode = param::ConvBias::NonlineMode::H_SWISH; + checker.set_dtype(0, dtype::QuantizedS8{1.1f}) + .set_dtype(1, dtype::QuantizedS8{1.2f}) + .set_dtype(2, dtype::QuantizedS32{1.1f * 1.2f}) + .set_dtype(3, dtype::QuantizedS8{1.1f}) + .set_dtype(4, dtype::QuantizedS8{6.0f}) + .set_epsilon(1 + 1e-3) + .set_max_avg_error(1e-1) + .set_max_avg_biased_error(1e-1) + .execs({{16, 16, 7, 7, 32}, + {512, 16, 3, 3, 32}, + {1, 16, 1, 1, 32}, + {16, 16, 7, 7, 32}, + {}}); + }; + std::string algo = ConvBias::algo_name( + "INT8_NCHW32_IMMA_IMPLICIT_GEMM_256X128X64_64X64X64", + ConvBias::DirectParam{}); + check(algo); + algo = ConvBias::algo_name( + "INT8_NCHW32_IMMA_IMPLICIT_GEMM_32X64X64_32X16X64", + ConvBias::DirectParam{}); + check(algo); +} +#endif + #if MEGDNN_WITH_BENCHMARK TEST_F(CUDA, BENCHMARK_CONV_BIAS_INT8_CHWN4) { require_compute_capability(6, 1); @@ -1233,6 +1216,18 @@ TEST_F(CUDA, BENCHMARK_CONV_BIAS_INT8_CHWN4_SMALL_CHANNEL) { param::ConvBias::Format::CHWN4); } + +#if CUDA_VERSION >= 10020 +TEST_F(CUDA, BENCHMARK_CUTLASS_CONV_BIAS_INT8_NCHW32) { + require_compute_capability(7, 5); + benchmark_target_algo_with_cudnn_tsc( + handle_cuda(), get_resnet50_bench_args(256), + dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f}, + dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.0f}, + "DIRECT:INT8_NCHW32_IMMA_IMPLICIT_GEMM", + param::ConvBias::Format::NCHW32); +} +#endif #endif } // namespace test diff --git a/dnn/test/cuda/utils.h b/dnn/test/cuda/utils.h index 299d4ec5a6cbeded96ba23d638c30394ad8fbc02..3ce2379d26fdb26ff0bf9d0420666d65b02b8236 100644 --- a/dnn/test/cuda/utils.h +++ b/dnn/test/cuda/utils.h @@ -34,7 +34,7 @@ bool check_compute_capability_eq(int major, int minor); do { \ if (!megdnn::test::check_compute_capability((x), (y))) { \ printf("skip testcase due to cuda compute capability not " \ - "require.(expected:%d.%d)", \ + "require.(expected:%d.%d)\n", \ (x), (y)); \ return; \ } \ @@ -44,7 +44,7 @@ bool check_compute_capability_eq(int major, int minor); do { \ if (!megdnn::test::check_compute_capability_eq((x), (y))) { \ printf("skip testcase due to cuda compute capability not " \ - "equal to %d.%d", \ + "equal to %d.%d\n", \ (x), (y)); \ return; \ } \