From 2e4b9a42f7fadba311926fb9f88d9597f3b982e7 Mon Sep 17 00:00:00 2001 From: Megvii Engine Team Date: Fri, 5 Feb 2021 14:23:25 +0800 Subject: [PATCH] fix(mgb/gopt): fix folding conv dimshuffle opt pass GitOrigin-RevId: 878b7de9deec202fa559899544d35b2ff1468d5e --- .../conv_bias/cutlass_convolution_wrapper.cu | 79 ++++++++++-------- .../conv_bias/cutlass_convolution_wrapper.cuh | 6 +- .../implicit_gemm_int8_nchw4_dp4a.cpp | 19 +++-- ...4a_ncdiv4hw4_16x128x16_16x128x16_hswish.cu | Bin 1686 -> 1914 bytes ...m_dp4a_ncdiv4hw4_16x128x16_16x128x16_id.cu | Bin 1680 -> 1908 bytes ...dp4a_ncdiv4hw4_16x128x16_16x128x16_relu.cu | Bin 1684 -> 1912 bytes ...cdiv4hw4_1x1_16x128x16_16x128x16_hswish.cu | Bin 1687 -> 1915 bytes ...4a_ncdiv4hw4_1x1_16x128x16_16x128x16_id.cu | Bin 1681 -> 1909 bytes ..._ncdiv4hw4_1x1_16x128x16_16x128x16_relu.cu | Bin 1685 -> 1913 bytes ...div4hw4_nchw_16x128x16_16x128x16_hswish.cu | Bin 0 -> 1890 bytes ...a_ncdiv4hw4_nchw_16x128x16_16x128x16_id.cu | Bin 0 -> 1884 bytes ...ncdiv4hw4_nchw_16x128x16_16x128x16_relu.cu | Bin 0 -> 1888 bytes ...hw4_nchw_1x1_16x128x16_16x128x16_hswish.cu | Bin 0 -> 1891 bytes ...div4hw4_nchw_1x1_16x128x16_16x128x16_id.cu | Bin 0 -> 1885 bytes ...v4hw4_nchw_1x1_16x128x16_16x128x16_relu.cu | Bin 0 -> 1889 bytes src/gopt/test/inference.cpp | 6 ++ 16 files changed, 63 insertions(+), 47 deletions(-) create mode 100644 dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_16x128x16_16x128x16_hswish.cu create mode 100644 dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_16x128x16_16x128x16_id.cu create mode 100644 dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_16x128x16_16x128x16_relu.cu create mode 100644 dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_1x1_16x128x16_16x128x16_hswish.cu create mode 100644 dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_1x1_16x128x16_16x128x16_id.cu create mode 100644 dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_1x1_16x128x16_16x128x16_relu.cu diff --git a/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cu b/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cu index 46a65af5..667b5771 100644 --- a/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cu +++ b/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cu @@ -286,7 +286,8 @@ void megdnn::cuda::cutlass_wrapper:: uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, - const GemmCoord& /* warp_shape */, cudaStream_t /* stream */) {} + const GemmCoord& /* warp_shape */, int /* stages */, + cudaStream_t /* stream */) {} #else template void megdnn::cuda::cutlass_wrapper:: @@ -296,15 +297,15 @@ void megdnn::cuda::cutlass_wrapper:: int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, - const GemmCoord& warp_shape, cudaStream_t stream) { + const GemmCoord& warp_shape, int stages, cudaStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ - warp_k_, stage_, aligned_) \ + warp_k_, stage_, aligned_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ - warp_shape.k() == warp_k_) { \ + warp_shape.k() == warp_k_ && stages == stage_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape; \ @@ -397,7 +398,8 @@ void megdnn::cuda::cutlass_wrapper:: uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ - const GemmCoord& warp_shape, cudaStream_t stream); + const GemmCoord& warp_shape, int stages, \ + cudaStream_t stream); INST(true); INST(false); #undef INST @@ -414,7 +416,8 @@ void megdnn::cuda::cutlass_wrapper:: uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, - const GemmCoord& /* warp_shape */, cudaStream_t /* stream */) {} + const GemmCoord& /* warp_shape */, int /* stages */, + cudaStream_t /* stream */) {} #else template void megdnn::cuda::cutlass_wrapper:: @@ -424,15 +427,15 @@ void megdnn::cuda::cutlass_wrapper:: int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, - const GemmCoord& warp_shape, cudaStream_t stream) { + const GemmCoord& warp_shape, int stages, cudaStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ - warp_k_, aligned_) \ + warp_k_, stages_, aligned_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ - warp_shape.k() == warp_k_) { \ + warp_shape.k() == warp_k_ && stages == stages_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape; \ @@ -449,7 +452,7 @@ void megdnn::cuda::cutlass_wrapper:: cutlass::convolution::threadblock:: \ ConvolutionNCxHWxThreadblockSwizzle< \ cutlass::convolution::ConvType::kConvolution>, \ - 2, 4, aligned_, NeedLoadFromConstMem, \ + stages_, 4, aligned_, NeedLoadFromConstMem, \ cutlass::arch::OpMultiplyAdd>; \ typename Convolution::ConvolutionParameter conv_param{ \ param.n, param.ci, param.co, param.hi, param.wi, \ @@ -460,16 +463,17 @@ void megdnn::cuda::cutlass_wrapper:: epilogue, stream); \ } #define DISPATCH_KERNEL \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 128, 32, 32, 64, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 32, 32, 64, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 32, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 32, 32, 32, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(16, 64, 8, 16, 64, 8, 4); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 128, 32, 32, 64, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 32, 32, 64, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 32, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 32, 32, 32, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(16, 128, 16, 16, 128, 16, 1, 8); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(16, 64, 8, 16, 64, 8, 2, 4); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ @@ -525,7 +529,8 @@ void megdnn::cuda::cutlass_wrapper:: uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ - const GemmCoord& warp_shape, cudaStream_t stream); + const GemmCoord& warp_shape, int stages, \ + cudaStream_t stream); INST(true); INST(false); #undef INST @@ -542,7 +547,8 @@ void megdnn::cuda::cutlass_wrapper:: uint32_t /* nonlinear_mode */, float /* alpha */, float /* beta */, float /* gamma */, float /* scale */, const GemmCoord& /* threadblock_shape */, - const GemmCoord& /* warp_shape */, cudaStream_t /* stream */) {} + const GemmCoord& /* warp_shape */, int /* stages */, + cudaStream_t /* stream */) {} #else template void megdnn::cuda::cutlass_wrapper:: @@ -552,15 +558,15 @@ void megdnn::cuda::cutlass_wrapper:: int* workspace, const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, - const GemmCoord& warp_shape, cudaStream_t stream) { + const GemmCoord& warp_shape, int stages, cudaStream_t stream) { #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ threadblock_k_, warp_m_, warp_n_, \ - warp_k_, aligned_) \ + warp_k_, stages_, aligned_) \ if (threadblock_shape.m() == threadblock_m_ && \ threadblock_shape.n() == threadblock_n_ && \ threadblock_shape.k() == threadblock_k_ && \ warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ - warp_shape.k() == warp_k_) { \ + warp_shape.k() == warp_k_ && stages == stages_) { \ using ThreadBlockShape = \ cutlass::gemm::GemmShape; \ @@ -577,7 +583,7 @@ void megdnn::cuda::cutlass_wrapper:: cutlass::convolution::threadblock:: \ ConvolutionNCxHWxThreadblockSwizzle< \ cutlass::convolution::ConvType::kConvolution>, \ - 2, 4, aligned_, NeedLoadFromConstMem>; \ + stages_, 4, aligned_, NeedLoadFromConstMem>; \ typename Convolution::ConvolutionParameter conv_param{ \ param.n, param.ci, param.co, param.hi, param.wi, \ param.fh, param.fw, param.ho, param.wo, param.sh, \ @@ -587,15 +593,15 @@ void megdnn::cuda::cutlass_wrapper:: epilogue, stream); \ } #define DISPATCH_KERNEL \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 128, 32, 32, 64, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 32, 32, 64, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 32, 32, 64, 32, 32, 16); \ - DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 32, 32, 32, 32, 32, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 128, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 128, 32, 32, 64, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 64, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 64, 32, 32, 64, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(64, 32, 32, 64, 32, 32, 2, 16); \ + DISPATCH_KERNEL_WITH_TILE_SHAPE(32, 32, 32, 32, 32, 32, 2, 16); \ megdnn_assert(false, \ "unsupported threadblock shape (%dx%dx%d) and warp shape " \ "(%dx%dx%d)", \ @@ -651,7 +657,8 @@ void megdnn::cuda::cutlass_wrapper:: uint32_t nonlinear_mode, float alpha, float beta, \ float gamma, float scale, \ const GemmCoord& threadblock_shape, \ - const GemmCoord& warp_shape, cudaStream_t stream); + const GemmCoord& warp_shape, int stages, \ + cudaStream_t stream); INST(true); INST(false); #undef INST diff --git a/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cuh b/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cuh index 71c15856..0a9511d2 100644 --- a/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cuh +++ b/dnn/src/cuda/conv_bias/cutlass_convolution_wrapper.cuh @@ -56,7 +56,7 @@ void do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4( const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, - cudaStream_t stream); + int stages, cudaStream_t stream); template void do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw( @@ -65,7 +65,7 @@ void do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw( const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, - cudaStream_t stream); + int stages, cudaStream_t stream); template void do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_ncdiv32hw32( @@ -74,7 +74,7 @@ void do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_ncdiv32hw32( const convolution::ConvParam& param, uint32_t nonlinear_mode, float alpha, float beta, float gamma, float scale, const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, - cudaStream_t stream); + int stages, cudaStream_t stream); } // namespace cutlass_wrapper } // namespace cuda diff --git a/dnn/src/cuda/conv_bias/implicit_gemm_int8_nchw4_dp4a.cpp b/dnn/src/cuda/conv_bias/implicit_gemm_int8_nchw4_dp4a.cpp index 76cec7d2..ace4620a 100644 --- a/dnn/src/cuda/conv_bias/implicit_gemm_int8_nchw4_dp4a.cpp +++ b/dnn/src/cuda/conv_bias/implicit_gemm_int8_nchw4_dp4a.cpp @@ -32,8 +32,11 @@ bool ConvBiasForwardImpl::AlgoInt8NCHW4DotProdImplicitGemm::is_available( if (!conv_bias::check_bias_share_in_channel(*(args.bias_layout), param.format)) return false; - if (param.format != Format::NCHW4 && param.format != Format::NCHW4_NCHW && - param.format != Format::NCHW4_NCHW32) + if (param.format == Format::NCHW4_NCHW32) { + if (m_algo_param.threadblock_m % 32 != 0) + return false; + } else if (param.format != Format::NCHW4_NCHW && + param.format != Format::NCHW4) return false; size_t n = args.src_layout->operator[](0), ci = args.src_layout->operator[](1) * 4, @@ -187,7 +190,7 @@ void ConvBiasForwardImpl::AlgoInt8NCHW4DotProdImplicitGemm::exec( cutlass_wrapper::GemmCoord{m_algo_param.warp_m, m_algo_param.warp_n, m_algo_param.warp_k}, - stream); + m_algo_param.stage, stream); } else if (param.format == Format::NCHW4_NCHW) { cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw( @@ -205,7 +208,7 @@ void ConvBiasForwardImpl::AlgoInt8NCHW4DotProdImplicitGemm::exec( cutlass_wrapper::GemmCoord{m_algo_param.warp_m, m_algo_param.warp_n, m_algo_param.warp_k}, - stream); + m_algo_param.stage, stream); } else { megdnn_assert(param.format == Format::NCHW4_NCHW32); cutlass_wrapper:: @@ -225,7 +228,7 @@ void ConvBiasForwardImpl::AlgoInt8NCHW4DotProdImplicitGemm::exec( cutlass_wrapper::GemmCoord{m_algo_param.warp_m, m_algo_param.warp_n, m_algo_param.warp_k}, - stream); + m_algo_param.stage, stream); } } else { if (param.format == Format::NCHW4) { @@ -242,7 +245,7 @@ void ConvBiasForwardImpl::AlgoInt8NCHW4DotProdImplicitGemm::exec( cutlass_wrapper::GemmCoord{m_algo_param.warp_m, m_algo_param.warp_n, m_algo_param.warp_k}, - stream); + m_algo_param.stage, stream); } else if (param.format == Format::NCHW4_NCHW) { cutlass_wrapper:: do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw( @@ -260,7 +263,7 @@ void ConvBiasForwardImpl::AlgoInt8NCHW4DotProdImplicitGemm::exec( cutlass_wrapper::GemmCoord{m_algo_param.warp_m, m_algo_param.warp_n, m_algo_param.warp_k}, - stream); + m_algo_param.stage, stream); } else { megdnn_assert(param.format == Format::NCHW4_NCHW32); @@ -281,7 +284,7 @@ void ConvBiasForwardImpl::AlgoInt8NCHW4DotProdImplicitGemm::exec( cutlass_wrapper::GemmCoord{m_algo_param.warp_m, m_algo_param.warp_n, m_algo_param.warp_k}, - stream); + m_algo_param.stage, stream); } } after_kernel_launch(); diff --git a/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_16x128x16_16x128x16_hswish.cu b/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_16x128x16_16x128x16_hswish.cu index ab01f989dadf003d7073fb228910bdc6050460b8..0c779fe15d9856d56b82073d75e6c284fdf267af 100644 GIT binary patch delta 200 zcmbQn`-^YGIz~>P#LE2A5|`qV$v%wQo7XY!ViX5*bQCi4N{o%-feg5U&GpQVj9NMh zTna#tTw0QoSX^vnl~|OVVP)lC;9Htgl39>b>6nrdoLEv?lvpx(KTGE1bF5-qt~sf> zsd**AMah%(SS3J0yig&x%$$yb97yZ`_I diff --git a/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_16x128x16_16x128x16_id.cu b/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_16x128x16_16x128x16_id.cu index 9f901437080cba0a608bbc613716d45356c1c870..85fdecd79fda6911989011249fdb9b5c2f050410 100644 GIT binary patch delta 258 zcmbQh`-N}AIz~>P#LE2A5|`qV$v%wQo7XXJVH5{)bQCi4N{o%-feg5U&6UiKj9NMh zTna#tTw0QoSX^vnl~|OVVP)lC;9Htgl39>b>6nrdoLEv?lvpx(CrjpJel~H1oYdUZ zyprIeWG#i1_~N2us1aae^7DW;^RY>&K@_=V=9Husfz+jeS=f{+L6kaWCKiJfCIM+| a%J?A4fSv)Vs+zo)Rgu#bCN_Bv>lXmAc2f)h delta 131 zcmeyuH-UGyb97`HHr2NxylC}if97#qcx=qUIkR_2$M0L3=1Wp-qoEXkU|29&di zFPXfMRRYGm!zyhJ)uE-35}%ZrSgfPKr2qxV`FX`9AQOSQK+;u6a$wQP4QyWkYa=Pi diff --git a/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_16x128x16_16x128x16_relu.cu b/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_16x128x16_16x128x16_relu.cu index 5dfd371c8bdcd07754f633083d69edd5c36850ef..c088e6f6ec533ac73e814d8f65b420a89b129b67 100644 GIT binary patch delta 273 zcmbQj`-5-8Iz~>P#LE2A5|`qV$v%wQo7XY!U=#;(bQCi4N{o%-feg5U&9%&qj9NMh zTna#tTw0QoSX^vnl~|OVVP)lC;9Htgl39>b>6nrdoLEv?lvpx(FH5FENo7H5USe*l zf^&XeS$yb97P#LE2A5|`qV$v%wQo7XY!ViX5*bQCi4N{o%-feg5U&GpPqj9NMh zTna#tTw0QoSX^vnl~|OVVP)lC;9Htgl39>b>6nrdoLEv?lvpzP088fN^Q>ZAt~sf> zsd**AMah%(StUS1yig&x%$$yb97P#LE2A5|`qV$v%wQo7XXJVH5{)bQCi4N{o%-feg5U&6Uhfj9NMh zTna#tTw0QoSX^vnl~|OVVP)lC;9Htgl39>b>6nrdoLEv?lvpx(7fa@30XA`koYdUZ zyprIeWG#i1_~N2us1aae^7DW;^Rr2)K@_=V=9Husfz+jeS=f{+L6kaWCKiJfCIM+| a%J?A4fSv)Vs+zoyRgu#bCN_C4>lXmFcvBVt delta 150 zcmey$H<5S4I>yb97`HHr2NxylC}if97#qcx=qUIkR_2$M0L3=1Wp-kmEXA6k0+h3e zFVRv+i7y5z AhyVZp diff --git a/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_1x1_16x128x16_16x128x16_relu.cu b/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_1x1_16x128x16_16x128x16_relu.cu index 76ecaad0dd28cb71a79c83b7470737e24741710e..0ed7466912ebf3b72a88f4555ecc03afeb3652b1 100644 GIT binary patch delta 272 zcmbQr`;%|OIz~>P#LE2A5|`qV$v%wQo7XY!U=#;(bQCi4N{o%-feg5U&9%%b>6nrdoLEv?lvpx(A4{e}No7H5USe*l zf^&XeS$yb97kw diff --git a/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_16x128x16_16x128x16_hswish.cu b/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_16x128x16_16x128x16_hswish.cu new file mode 100644 index 0000000000000000000000000000000000000000..8865080e356c1386e72b302a6160bf6c88d97ebe GIT binary patch literal 1890 zcmbVNZBN@U5dPj@;T37xP@sZM2$>EELRZkBOtez>EplzQwdy#QW4DxFzq9kwl(icy zAM$ecbNAfc@;JjC%d2tK`>7tJ6UZSxQhpju8_C_T^Skq+LGWw^le~ zsC_hc%H^CLX<|sqEjR5qU~+2&wMRq(Qy>Enp{R6>qjqj+$EVV%Q|Sn0&o`fm{|uQ4 zDr_?2X$rZAOp=u$T61$uolvsfYB}#6-II0Z?9fp3t;!i=8H^Z@xXLp#h@WrA&)u(= z7h9O`LPAFSVe#|R@W-F9o80cl4C32y6=gIxoUm(|(Zz61G;gAOVi@<~9+tW%XAwF% zJ%`&_&2mhP{x9I2f)+SxMW+6ZhmaB4l%}VJ%w~>1>Ly)YS~0dEDU7e7xmS$c3&n|v zvveX9@d0m#E0NE;EwtmB%8b|$54!y2G=cZh6drZrG3Y##&bw=p@R}*j7&E>Ulw;i3 zTLcXP0?)NBBD8HP+FpkaOpDFX+1lK&)MUffQjHjI(eX4!YYl`J4Mx#Mk;KqB#*aGo zhjJ)Vh*Wzj)`saBbRxuft(*weT8Pl=?nY?Svl3r>-r`uaZsPU0Rkz}J6fwgpwD9Fc z@}i!*eeKTovW)K)6(LYF$dJG0R8``^Nh`EmgNmT@mculIiHDY%U?}A?rWDi|W`)0R z4SlCf9t8*qi5Jaifc z%KOm%9C@rC>+eH?T0QWkf%S>&3~s#QMm-L4vS`&s=GiW+_N zk7W{m6b=8o-PD40Ry+HsF%I7H%2hRP8jidYh86~Dnpgzm4PHK9?E4w%Ot%iWVq)6* E4Z|yki2wiq literal 0 HcmV?d00001 diff --git a/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_16x128x16_16x128x16_id.cu b/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_16x128x16_16x128x16_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..6a93ab6171cf7b4163c863df6d4bc59a588bcab7 GIT binary patch literal 1884 zcmbVNZBN@U5dPj@;T37xP@sZM2$>EELRZkBOtez>EplzQwdy#QW4DxFzq9kwl(icy zAM$ecb9c|(@;JjC%d2tK`>7tJ6UZSxQhpju8_C_T^Skq+LGWw^le~ zsC_hc%H^CLX<|sqEjR5qU~+2&wMRrkrjQIkgrd?hj@r4Q9Z#iGQ|Sn0&o`fm{|w9o z6*igiGzIQ~NwP9TYi^FI6H2yQE$6+Xd$P`)9U6+hRh%)F!HDsQt2{G<`1yAH-2Hla zv4#0ABrw_!i=Ur{KmLT>%6=NGnVSEkEy<+TMC{9eA zr4ylu57=#?9oJN5#D+-F-!G>LyqBi%s42&w@Ju@Is2Sd?r8HyA_)<`gabs^0G)MO1!*CUet59 zuibnv%lKYP5ds~90Qu{js!BXKX@!<+&<}Lpa+qT<>(DY245fU=l!6+=l<@bhqrWjU z=v8x}+bI9#r&@-v6Ar9}4k;L6sIdv-p5!T4es~-rmcVc-GxiF81Fq1Mhfc#Nc^}%J zAdmF}{e4JKs|UU`P=DoD?q^=)bzMN)hiH|VMXm|0TIGrFcGalc&+2DT)aavsER*ny zX!zglrWUNT+Sx~qaqyN`uBt)PaO9ORv@lB3#3C4M@bdX$-)~4~x^=)66VujjF=d4_ literal 0 HcmV?d00001 diff --git a/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_16x128x16_16x128x16_relu.cu b/dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nchw_16x128x16_16x128x16_relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..6e01aaaf1e058b2ca59ba90945c422d4a2a9c9cc GIT binary patch literal 1888 zcmbVNZBN@U5dPj@;T37xP@sZM2$>EELRZkBOtez>EplzQwdy#QW4DxFzq9kwl(icy zAM$ecbNAfc@;JjC%d2tK`>7tJ6UZSxQhpju8_C_T^Skq+LGWw^le~ zsC_hc%H^CLX<|sqEjR5qU~+2&wMRq(Qy>Enp{R6>qjqj+$EVV%Q|Sn0&o`fm{|uQ4 zDr_?2X$rZAOp=u$T61$uolvsfYB}#6-II0Z?9fp3t;!i=8H^Z@xXLp#h@WrA&)u(= z7h9O`LPAFSVe#|R@W-F9o80cl4C32y6=gIxoUm(|(Zz61G;gAOVi@<~9+tW%XAwF% zJ%`&_&2mhP{x9I2f)+SxMW+6ZhmaB4l%}VJ%w~>1>Ly)YS~0dEDU7e7xmS$c3&n|v zvveX9@d2N>bln!(aZP1LY={Ou{&JeYdua-fI_(%#o=NAOH7R(-lxB<>Ukb`GZtN|B z2Kj*JS``u6HWf{;LkFhCX6S2eZdhuvVQZ;HjJN1`8l$xaLW>5YXro7B=p5rm9s46W z6e&ciJr!%i^bGnCV!YN(glZ*3=yh)+wCPxhuRU*ZEE+fQYTT+@aX5;YVRg6?FE5f8 z_1x`icfOZpe6ObnftEpp{57Yl5)V#Vq2(Gh1f91WW*N*ow9EuUDW5T=pvEvM{C(^A zZw!s}s@c$Ol>hQmEkoD|ht@)e6pS&{*o1LU@{}t-Kn@X0U_h0Oy@KC>EA-@{(=bln zhxVt)WBpKn9}?8+fiDfzU-K*XGcWSGE}-p0w93pP*MwHB@`>+u)u`Lg>W5I&=%aru zlkl5p_}}fO7Ob<{*+-3W@RnDus$tV`AxIwB~v=BingiMMAOBzriR3uXRwsLGIvEubwz9fXN?|5(7(4>X# z!(PTdGiPRwXAi^_eR$~gZw7;8)ayT8CBIHuoer9D#SL+sp~*UYB-AsK&`d3pi6Ayv zaHA5DYH5%4Isn8>Wrm{_F-oWz%BF~VCyBMK18vAGC8!_AhzSDwtP>PzZ|D2g3TF(p zkH%hk%h{18hNRqa(|!ZSIwPn%A`+Ma8Gr~yrDq(qt)ZQeN~cbxBa}Ped?w*D&rDF^ zk{M4^$UV;_Ss9`=H^fd+>8KF&SdRoYA=J}&;(&eQUW0xm|@ijE}im`j4I5BaS zPJ|*M;O%fF?7Z7TJFcnBh|A+am%p4Q@LrnoN1b>KI?tpJ?wTZmW=b>0j4uV{7&rD7 zL306t=UNvL+BOw!uS4fdi_Os4+T5_zWb>`18Zq9Y6KIUq8VD^KjG~PqiJ^0hA9WlK z}X{aw1e~AwsXa8=+0lN__2ki(}EcNzmg~-HPK;#LQQrg)c9X z7xmojYj?hvW&EJ12!WcXL|XouR8@)xFP+eG4Kjk(I}YOvMjl#bf}xbpm{L$=7#88a zH32q;Mt;?J=r-zq`Ki_+?1XdcphXHM8ES08xF>naRhS`%h$S$iO2$FLZ@?9T3eagj zQQn6R2Pt5~Tz?-DEELRZkBOtez>EplzQwdy#QW4DxFzq9kwl(icy zAM$ecb9c|(@;JjC%d2tK`>7tJ6UZSxQhpju8_C_T^Skq+LGWw^le~ zsC_hc%H^CLX<|sqEjR5qU~+2&wMRrkrjQIkgrd?hj@r4Q9Z#iGQ|Sn0&o`fm{|w9o z6*igiGzIQ~NwP9TYi^FI6H2yQE$6+Xd$P`)9U6+hRh%)F!HDsQt2{G<`1yAH-2Hla zv4#0ABrw_!i=Ur{KmLT>%6=NGnVSEkEy<+TMC{9eA zr4ylu57=#?9oJN5#D+-F-!G>LyqBi%s42&w@Ju@Is2Sd?r8HyA_)<`gabs^0G)MO1!*CUet59 zuibnv%lKYP5ds}giOl)yq^eRpIBA8JYY-5$-f|dZFznDW6AYz%#*~60!PQuoF(Kg%&B8VW_bQEELRZkBOtez>EplzQwdy#QW4DxFzq9kwl(icy zAM$ecbNAfc@;JjC%d2tK`>7tJ6UZSxQhpju8_C_T^Skq+LGWw^le~ zsC_hc%H^CLX<|sqEjR5qU~+2&wMRq(Qy>Enp{R6>qjqj+$EVV%Q|Sn0&o`fm{|uQ4 zDr_?2X$rZAOp=u$T61$uolvsfYB}#6-II0Z?9fp3t;!i=8H^Z@xXLp#h@WrA&)u(= z7h9O`LPAFSVe#|R@W-F9o80cl4C32y6=gIxoUm(|(Zz61G;gAOVi@<~9+tW%XAwF% zJ%`&_&2mhP{x9I2f)+SxMW+6ZhmaB4l%}VJ%w~>1>Ly)YS~0dEDU7e7xmS$c3&n|v zvveX9@d2N>bln!(aZP1LY={Ou{&JeYdua-fI_(%#o=NAOH7R(-lxB<>Ukb`GZtN|B z2Kj*JS``u6HWf{;LkFhCX6S2eZdhuvVQZ;HjJN1`8l$xaLW>5YXro7B=p5rm9s46W z6e&ciJr!%i^bGnCV!YN(glZ*3=yh)+wCPxhuRU*ZEE+fQYTT+@aX5;YVRg6?FE5f8 z_1x`icfOZpe6ObnftII4=KM9OsuT}STA}3{L{bT38&UVixkW;)Yyb^Px6#2KS2%=OJG8kjJ<^4fGY&$q0=x= z-iP+b$YcFfe;*R$>VYo})L-)}_cO2Zx-KB@L$u1wBG-ggt@4TQcGalc&+3m*)aavs zER*n`X!zglrdF)8+Sx~qaqyN`uBvI%aO9ORv@lQ8#3Gn(@bdX$-@iy_x^=)66Vujj D&>x2u literal 0 HcmV?d00001 diff --git a/src/gopt/test/inference.cpp b/src/gopt/test/inference.cpp index f6e0f540..c6e040b2 100644 --- a/src/gopt/test/inference.cpp +++ b/src/gopt/test/inference.cpp @@ -3895,6 +3895,9 @@ TEST(TestGoptInference, FoldingConvDimshuffle) { .apply({{y}}) .endpoint_vars(), y_fuse); + gopt::modify_opr_algo_strategy_inplace( + {y_fuse}, + opr::mixin::AlgoChooserHelper::ExecutionPolicy::Strategy::PROFILE); graph->compile({{y_fuse, {}}}) ->to_json() ->writeto_fpath(output_file( @@ -3976,6 +3979,9 @@ TEST(TestGoptInference, FoldingConvDimshuffleNCHW4NCHW32) { .apply({{y}}) .endpoint_vars(), y_fuse); + gopt::modify_opr_algo_strategy_inplace( + {y_fuse}, + opr::mixin::AlgoChooserHelper::ExecutionPolicy::Strategy::PROFILE); graph->compile({{y_fuse, {}}}) ->to_json() ->writeto_fpath(output_file( -- GitLab