提交 319436dd 编写于 作者: M Megvii Engine Team

feat(dnn/cuda): add cutlass impls for uint4 x int4 conv bias

GitOrigin-RevId: cf4536855ac3faf5a929b1077dac91092b2f008f
上级 d28eba4e
......@@ -87,6 +87,9 @@ ConvBiasForwardImpl::AlgoPack::AlgoPack() {
for (auto&& algo : int4_int4_nchw64_imma) {
all_algos.push_back(&algo);
}
for (auto&& algo : uint4_int4_nchw64_imma) {
all_algos.push_back(&algo);
}
#endif
#endif
fill_dp4a_algos();
......@@ -231,8 +234,17 @@ void ConvBiasForwardImpl::AlgoPack::fill_imma_algos() {
{
using AlgoParam = AlgoInt4Int4NCHW64IMMAImplicitGemm::AlgoParam;
int4_int4_nchw64_imma.emplace_back(AlgoParam{128, 128, 128, 64, 64, 128});
int4_int4_nchw64_imma.emplace_back(AlgoParam{256, 128, 128, 64, 64, 128});
int4_int4_nchw64_imma.emplace_back(
AlgoParam{128, 128, 128, 64, 64, 128});
int4_int4_nchw64_imma.emplace_back(
AlgoParam{256, 128, 128, 64, 64, 128});
}
{
using AlgoParam = AlgoUInt4Int4NCHW64IMMAImplicitGemm::AlgoParam;
uint4_int4_nchw64_imma.emplace_back(
AlgoParam{128, 128, 128, 64, 64, 128});
uint4_int4_nchw64_imma.emplace_back(
AlgoParam{256, 128, 128, 64, 64, 128});
}
#endif
}
......
......@@ -62,6 +62,7 @@ public:
CUDA_IMPLICIT_GEMM_UNROLL_WIDTH_CHWN4_IMMA_INT8,
CUDA_IMPLICIT_GEMM_IMMA_NCHW32_INT8,
CUDA_IMPLICIT_GEMM_IMMA_NCHW64_INT4_INT4,
CUDA_IMPLICIT_GEMM_IMMA_NCHW64_UINT4_INT4,
CUDA_BFLOAT16,
CUDA_IMPLICIT_GEMM_SASS_NCHW4_DOTPROD_INT8,
CUDA_IMPLICIT_GEMM_1X1_SASS_NCHW4_DOTPROD_INT8,
......@@ -810,6 +811,55 @@ private:
AlgoParam m_algo_param;
std::string m_name;
};
class ConvBiasForwardImpl::AlgoUInt4Int4NCHW64IMMAImplicitGemm final
: public AlgoBase {
public:
struct AlgoParam {
int threadblock_m;
int threadblock_n;
int threadblock_k;
int warp_m;
int warp_n;
int warp_k;
};
AlgoUInt4Int4NCHW64IMMAImplicitGemm(AlgoParam algo_param)
: m_algo_param{algo_param} {
m_name = ConvBias::algo_name<ConvBias::DirectParam>(
ssprintf("UINT4_INT4_NCHW64_IMMA_IMPLICIT_GEMM_%s",
to_string(m_algo_param).c_str()),
ConvBias::DirectParam{});
}
bool is_available(const SizeArgs& args) const override;
size_t get_workspace_in_bytes(const SizeArgs& args) const override;
void exec(const ExecArgs& args) const override;
const char* name() const override { return m_name.c_str(); }
AlgoAttribute attribute() const override {
return AlgoAttribute::REPRODUCIBLE;
}
static std::string to_string(AlgoParam algo_param);
size_t get_preprocess_workspace_in_bytes(
const SizeArgs& args) const override;
SmallVector<TensorLayout> deduce_preprocessed_filter_layout(
const SizeArgs& args) const override;
void exec_preprocess(const ExecArgs& args) const override;
MEGDNN_DECL_ALGO_TYPE(CUDA_IMPLICIT_GEMM_IMMA_NCHW64_UINT4_INT4)
std::string param() const override {
std::string ret;
serialize_write_pod(m_algo_param, ret);
return ret;
}
private:
WorkspaceBundle get_workspace_bundle(dt_byte* raw_ptr,
const SizeArgs& args) const;
void reorder_filter_bias(const ExecArgs& args, void* reduce_filter,
void* reordered_filter,
void* reordered_bias) const;
AlgoParam m_algo_param;
std::string m_name;
};
#endif
class ConvBiasForwardImpl::AlgoBFloat16 final : public AlgoBase {
......@@ -868,6 +918,7 @@ public:
#if CUDA_VERSION >= 10020
std::vector<AlgoInt8NCHW32IMMAImplicitGemm> int8_nchw32_imma;
std::vector<AlgoInt4Int4NCHW64IMMAImplicitGemm> int4_int4_nchw64_imma;
std::vector<AlgoUInt4Int4NCHW64IMMAImplicitGemm> uint4_int4_nchw64_imma;
#endif
std::vector<std::unique_ptr<AlgoGroupConvGeneral>> gconv_refhold;
AlgoBFloat16 bfloat16;
......
......@@ -662,7 +662,7 @@ INST(true);
INST(false);
#undef INST
/* ====== cutlass kernel wrapper for int4 nchw64 layout ====== */
/* ====== cutlass kernel wrapper for int4 x int4 nchw64 layout ====== */
#if MEGDNN_TEGRA_X1
template <bool NeedLoadFromConstMem>
......@@ -783,4 +783,132 @@ void megdnn::cuda::cutlass_wrapper::
INST(true);
#undef INST
/* ====== cutlass kernel wrapper for uint4 x int4 nchw64 layout ====== */
#if MEGDNN_TEGRA_X1
template <bool NeedLoadFromConstMem>
void megdnn::cuda::cutlass_wrapper::
do_conv_bias_uint4_int4_implicit_gemm_imma_ncdiv64hw64(
const uint8_t* /* d_src */, const int8_t* /* d_filter */,
const int32_t* /* d_bias */, const uint8_t* /* d_z */,
uint8_t* /* d_dst */, int* /* workspace */,
const convolution::ConvParam& /* param */,
uint32_t /* nonlinear_mode */, float /* alpha */,
float /* beta */, float /* gamma */, float /* delta */,
float /* theta */, float /* scale */,
uint8_t /* src_zero_point */,
const GemmCoord& /* threadblock_shape */,
const GemmCoord& /* warp_shape */, cudaStream_t /* stream */) {}
#else
template <bool NeedLoadFromConstMem>
void megdnn::cuda::cutlass_wrapper::
do_conv_bias_uint4_int4_implicit_gemm_imma_ncdiv64hw64(
const uint8_t* d_src, const int8_t* d_filter,
const int32_t* d_bias, const uint8_t* d_z, uint8_t* d_dst,
int* workspace, const convolution::ConvParam& param,
uint32_t nonlinear_mode, float alpha, float beta, float gamma,
float delta, float theta, float scale, uint8_t src_zero_point,
const GemmCoord& threadblock_shape, const GemmCoord& warp_shape,
cudaStream_t stream) {
#define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \
threadblock_k_, warp_m_, warp_n_, \
warp_k_) \
if (threadblock_shape.m() == threadblock_m_ && \
threadblock_shape.n() == threadblock_n_ && \
threadblock_shape.k() == threadblock_k_ && \
warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \
warp_shape.k() == warp_k_) { \
using ThreadBlockShape = \
cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \
threadblock_k_>; \
using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; \
using Convolution = cutlass::conv::device::Convolution< \
cutlass::uint4b_t, cutlass::layout::TensorNCxHWx<64>, \
cutlass::int4b_t, cutlass::layout::TensorCxRSKx<64>, \
ElementOutput, cutlass::layout::TensorNCxHWx<64>, int32_t, \
cutlass::layout::TensorNCxHWx<64>, int32_t, \
cutlass::conv::ConvType::kConvolution, \
cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, \
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \
cutlass::conv::threadblock:: \
ConvolutionFpropNCxHWxThreadblockSwizzle, \
2, 32, 32, NeedLoadFromConstMem>; \
typename Convolution::ConvolutionParameter conv_param( \
param.n, param.hi, param.wi, param.ci, param.co, param.fh, \
param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \
param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \
return cutlass_convolution_wrapper<Convolution>( \
reinterpret_cast<const cutlass::uint4b_t*>(d_src), \
reinterpret_cast<const cutlass::int4b_t*>(d_filter), d_bias, \
reinterpret_cast<const cutlass::uint4b_t*>(d_z), \
reinterpret_cast<cutlass::uint4b_t*>(d_dst), workspace, \
conv_param, epilogue, stream, {src_zero_point}); \
}
#define DISPATCH_KERNEL \
DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 128, 128, 64, 64, 128); \
DISPATCH_KERNEL_WITH_TILE_SHAPE(256, 128, 128, 64, 64, 128); \
megdnn_assert(false, \
"unsupported threadblock shape (%dx%dx%d) and warp shape " \
"(%dx%dx%d)", \
threadblock_shape.m(), threadblock_shape.n(), \
threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \
warp_shape.k());
using ElementOutput = cutlass::uint4b_t;
using ElementAccumulator = int32_t;
using ElementBias = int32_t;
using ElementCompute = float;
using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode;
switch (nonlinear_mode) {
case NonlineMode::IDENTITY: {
using EpilogueOp =
cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
ElementOutput, 16, ElementAccumulator, ElementBias,
ElementCompute>;
typename EpilogueOp::Params epilogue{alpha, beta, gamma,
delta + theta};
DISPATCH_KERNEL;
}
case NonlineMode::RELU: {
using EpilogueOp = cutlass::epilogue::thread::
BiasAddLinearCombinationReluClamp<
ElementOutput, 16, ElementAccumulator, ElementBias,
ElementCompute>;
typename EpilogueOp::Params epilogue{alpha, beta, gamma,
0, delta, theta};
DISPATCH_KERNEL;
}
case NonlineMode::H_SWISH: {
using EpilogueOp = cutlass::epilogue::thread::
BiasAddLinearCombinationHSwishClamp<
ElementOutput, 16, ElementAccumulator, ElementBias,
ElementCompute>;
typename EpilogueOp::Params epilogue{alpha, beta, gamma,
scale, delta, theta};
DISPATCH_KERNEL;
}
default:
megdnn_assert(false,
"unsupported nonlinear mode for conv bias operator");
}
#undef DISPATCH_KERNEL_WITH_TILE_SHAPE
#undef DISPATCH_KERNEL
}
#endif
#define INST(need_load_from_const_mem) \
template void megdnn::cuda::cutlass_wrapper:: \
do_conv_bias_uint4_int4_implicit_gemm_imma_ncdiv64hw64< \
need_load_from_const_mem>( \
const uint8_t* d_src, const int8_t* d_filter, \
const int32_t* d_bias, const uint8_t* d_z, uint8_t* d_dst, \
int* workspace, const convolution::ConvParam& param, \
uint32_t nonlinear_mode, float alpha, float beta, \
float gamma, float delta, float theta, float scale, \
uint8_t src_zero_point, \
const GemmCoord& threadblock_shape, \
const GemmCoord& warp_shape, cudaStream_t stream);
INST(true);
#undef INST
// vim: syntax=cuda.doxygen
......@@ -29,7 +29,7 @@ void cutlass_convolution_wrapper(
typename Convolution::ElementDst* d_dst, int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
cudaStream_t stream, typename Convolution::ExtraParam extra_param = {});
template <bool NeedLoadFromConstMem>
void do_conv_bias_int8_implicit_gemm_imma_ncdiv32hw32(
......@@ -85,6 +85,15 @@ void do_conv_bias_int4_int4_implicit_gemm_imma_ncdiv64hw64(
const GemmCoord& threadblock_shape, const GemmCoord& warp_shape,
cudaStream_t stream);
template <bool NeedLoadFromConstMem>
void do_conv_bias_uint4_int4_implicit_gemm_imma_ncdiv64hw64(
const uint8_t* d_src, const int8_t* d_filter, const int32_t* d_bias,
const uint8_t* d_z, uint8_t* d_dst, int* workspace,
const convolution::ConvParam& param, uint32_t nonlinear_mode,
float alpha, float beta, float gamma, float delta, float theta,
float scale, uint8_t src_zero_point, const GemmCoord& threadblock_shape,
const GemmCoord& warp_shape, cudaStream_t stream);
} // namespace cutlass_wrapper
} // namespace cuda
} // namespace megdnn
......
/**
* \file dnn/src/cuda/conv_bias/implicit_gemm_int4_nchw64_imma.cpp
* \file dnn/src/cuda/conv_bias/implicit_gemm_int4_int4_nchw64_imma.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
......@@ -77,7 +77,6 @@ ConvBiasForwardImpl::AlgoInt4Int4NCHW64IMMAImplicitGemm::get_workspace_in_bytes(
void ConvBiasForwardImpl::AlgoInt4Int4NCHW64IMMAImplicitGemm::exec(
const ExecArgs& args) const {
using Format = Param::Format;
auto&& param = args.opr->param();
auto&& fm = args.filter_meta;
size_t n = args.src_layout->operator[](0),
......
/**
* \file dnn/src/cuda/conv_bias/implicit_gemm_uint4_int4_nchw64_imma.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "./algo.h"
#include "src/common/conv_bias.h"
#include "src/cuda/conv_bias/cutlass_convolution_wrapper.cuh"
#include "src/cuda/conv_bias/reduce_filter.cuh"
#include "src/cuda/convolution_helper/parameter.cuh"
#include "src/cuda/utils.h"
using namespace megdnn;
using namespace cuda;
using namespace convolution;
#if CUDA_VERSION >= 10020
bool ConvBiasForwardImpl::AlgoUInt4Int4NCHW64IMMAImplicitGemm::is_available(
const SizeArgs& args) const {
if (args.bias_layout->ndim <= 0)
return false;
using Param = param::ConvBias;
using Format = Param::Format;
using Sparse = Param::Sparse;
using Mode = Param::Mode;
using NonlineMode = megdnn::param::ConvBias::NonlineMode;
auto&& param = args.opr->param();
if (!check_bias_share_in_channel(*(args.bias_layout), param.format))
return false;
if (param.format != Format::NCHW64 || param.sparse != Sparse::DENSE ||
param.mode != Mode::CROSS_CORRELATION)
return false;
if (param.nonlineMode != NonlineMode::IDENTITY &&
param.nonlineMode != NonlineMode::RELU &&
param.nonlineMode != NonlineMode::H_SWISH)
return false;
if (args.src_layout->dtype.enumv() != DTypeEnum::Quantized4Asymm ||
args.filter_layout->dtype.enumv() != DTypeEnum::QuantizedS4 ||
args.bias_layout->dtype.enumv() != DTypeEnum::QuantizedS32 ||
args.dst_layout->dtype.enumv() != DTypeEnum::Quantized4Asymm)
return false;
if (!is_compute_capability_required(7, 5))
return false;
return true;
}
WorkspaceBundle
ConvBiasForwardImpl::AlgoUInt4Int4NCHW64IMMAImplicitGemm::get_workspace_bundle(
dt_byte* raw_ptr, const SizeArgs& args) const {
if (args.preprocessed_filter) {
return WorkspaceBundle{raw_ptr, {}};
} else {
size_t ws_filter = args.filter_layout->span().dist_byte(),
ws_bias = args.bias_layout->span().dist_byte(),
ws_reduce_filter = get_preprocess_workspace_in_bytes(args);
return WorkspaceBundle{raw_ptr,
{ws_filter + ws_bias + ws_reduce_filter}};
}
}
size_t ConvBiasForwardImpl::AlgoUInt4Int4NCHW64IMMAImplicitGemm::
get_workspace_in_bytes(const SizeArgs& args) const {
return get_workspace_bundle(nullptr, args).total_size_in_bytes();
}
void ConvBiasForwardImpl::AlgoUInt4Int4NCHW64IMMAImplicitGemm::exec(
const ExecArgs& args) const {
auto&& param = args.opr->param();
auto&& fm = args.filter_meta;
size_t n = args.src_layout->operator[](0),
ci = args.src_layout->operator[](1) * 64,
hi = args.src_layout->operator[](2),
wi = args.src_layout->operator[](3);
size_t co = args.dst_layout->operator[](1) * 64,
ho = args.dst_layout->operator[](2),
wo = args.dst_layout->operator[](3);
UNPACK_CONV_PARAMETER(fm, param);
MARK_USED_VAR
auto&& stream = cuda_stream(args.opr->handle());
void* filter_ptr = nullptr;
void* bias_ptr = nullptr;
if (args.preprocessed_filter) {
megdnn_assert(args.preprocessed_filter->tensors.size() == 2);
filter_ptr = args.preprocessed_filter->tensors[0].raw_ptr;
bias_ptr = args.preprocessed_filter->tensors[1].raw_ptr;
} else {
// reorder filter and bias
filter_ptr = reinterpret_cast<void*>(args.workspace.raw_ptr);
bias_ptr =
reinterpret_cast<void*>(args.workspace.raw_ptr +
args.filter_layout->span().dist_byte());
void* reduce_filter_ptr =
reinterpret_cast<void*>(args.workspace.raw_ptr +
args.filter_layout->span().dist_byte() +
args.bias_layout->span().dist_byte());
reorder_filter_bias(args, reduce_filter_ptr, filter_ptr, bias_ptr);
}
ConvParam kern_param;
kern_param.n = n, kern_param.co = co, kern_param.ci = ci,
kern_param.hi = hi, kern_param.wi = wi, kern_param.ho = ho,
kern_param.wo = wo, kern_param.ph = ph, kern_param.pw = pw,
kern_param.sh = sh, kern_param.sw = sw, kern_param.fh = fh,
kern_param.fw = fw;
float src_scale =
args.src_layout->dtype.param<dtype::Quantized4Asymm>().scale,
filter_scale =
args.filter_layout->dtype.param<dtype::QuantizedS4>().scale,
bias_scale =
args.bias_layout->dtype.param<dtype::QuantizedS32>().scale,
dst_scale =
args.dst_layout->dtype.param<dtype::Quantized4Asymm>().scale;
uint8_t src_zero = args.src_layout->dtype.param<dtype::Quantized4Asymm>()
.zero_point,
dst_zero = args.dst_layout->dtype.param<dtype::Quantized4Asymm>()
.zero_point;
float alpha = src_scale * filter_scale / dst_scale;
float beta = bias_scale / dst_scale;
float gamma = 0.f;
float delta = 0.f;
float theta = dst_zero;
uint8_t* z_dev_ptr = nullptr;
if (args.z_layout->ndim > 0) {
z_dev_ptr = reinterpret_cast<uint8_t*>(args.z_tensor->raw_ptr);
float z_scale =
args.z_layout->dtype.param<dtype::Quantized4Asymm>().scale;
uint8_t z_zero =
args.z_layout->dtype.param<dtype::Quantized4Asymm>().zero_point;
gamma = z_scale / dst_scale;
delta = -z_zero * gamma;
}
uint32_t nonlinear_mode = static_cast<uint32_t>(param.nonlineMode);
cutlass_wrapper::do_conv_bias_uint4_int4_implicit_gemm_imma_ncdiv64hw64<
true>(
reinterpret_cast<uint8_t*>(args.src_tensor->raw_ptr),
reinterpret_cast<int8_t*>(filter_ptr),
reinterpret_cast<int32_t*>(bias_ptr), z_dev_ptr,
reinterpret_cast<uint8_t*>(args.dst_tensor->raw_ptr), nullptr,
kern_param, nonlinear_mode, alpha, beta, gamma, delta, theta,
dst_scale, src_zero,
cutlass_wrapper::GemmCoord{m_algo_param.threadblock_m,
m_algo_param.threadblock_n,
m_algo_param.threadblock_k},
cutlass_wrapper::GemmCoord{m_algo_param.warp_m, m_algo_param.warp_n,
m_algo_param.warp_k},
stream);
}
std::string ConvBiasForwardImpl::AlgoUInt4Int4NCHW64IMMAImplicitGemm::to_string(
AlgoParam algo_param) {
return ssprintf("%uX%uX%u_%uX%uX%u", algo_param.threadblock_m,
algo_param.threadblock_n, algo_param.threadblock_k,
algo_param.warp_m, algo_param.warp_n, algo_param.warp_k);
}
size_t ConvBiasForwardImpl::AlgoUInt4Int4NCHW64IMMAImplicitGemm::
get_preprocess_workspace_in_bytes(const SizeArgs& args) const {
size_t co = args.filter_layout->operator[](0),
ci = args.filter_layout->operator[](1) * 64,
fh = args.filter_layout->operator[](2),
fw = args.filter_layout->operator[](3);
size_t ws_size_reduce_filter = co * sizeof(int32_t);
size_t A = co, B = ci * fh * fw / 8, C = 1;
ws_size_reduce_filter += do_dispatch_reduce_workspace_in_bytes(A, B, C);
return ws_size_reduce_filter;
}
SmallVector<TensorLayout> ConvBiasForwardImpl::
AlgoUInt4Int4NCHW64IMMAImplicitGemm::deduce_preprocessed_filter_layout(
const SizeArgs& args) const {
return {args.filter_layout->collapse_contiguous(),
args.bias_layout->collapse_contiguous()};
}
void ConvBiasForwardImpl::AlgoUInt4Int4NCHW64IMMAImplicitGemm::exec_preprocess(
const ExecArgs& args) const {
megdnn_assert(args.preprocessed_filter->tensors.size() == 2);
reorder_filter_bias(args, args.workspace.raw_ptr,
args.preprocessed_filter->tensors[0].raw_ptr,
args.preprocessed_filter->tensors[1].raw_ptr);
}
void ConvBiasForwardImpl::AlgoUInt4Int4NCHW64IMMAImplicitGemm::
reorder_filter_bias(const ExecArgs& args, void* reduce_filter,
void* reordered_filter,
void* reordered_bias) const {
auto&& param = args.opr->param();
auto&& fm = args.filter_meta;
size_t n = args.src_layout->operator[](0),
ci = args.src_layout->operator[](1) * 64,
hi = args.src_layout->operator[](2),
wi = args.src_layout->operator[](3);
size_t co = args.dst_layout->operator[](1) * 64,
ho = args.dst_layout->operator[](2),
wo = args.dst_layout->operator[](3);
UNPACK_CONV_PARAMETER(fm, param);
MARK_USED_VAR;
auto&& stream = cuda_stream(args.opr->handle());
// filter: KCRS64 => CRSK64
TensorLayout src{{co, ci / 64, fh, fw, 64}, dtype::QuantizedS4()};
src.init_contiguous_stride();
TensorLayout dst = src;
dst.stride[0] = 64;
dst.stride[1] = co * fh * fw * 64;
dst.stride[2] = co * fw * 64;
dst.stride[3] = co * 64;
dst.stride[4] = 1;
TensorND ts_src, ts_dst;
ts_src.raw_ptr = args.filter_tensor->raw_ptr;
ts_src.layout = src;
ts_dst.raw_ptr = reordered_filter;
ts_dst.layout = dst;
auto&& transpose = args.opr->handle()->create_operator<RelayoutForward>();
transpose->exec(ts_src, ts_dst);
// reduce filter and update bias
int32_t* workspace = reinterpret_cast<int32_t*>(reordered_bias) +
args.bias_layout->span().dist_byte();
int src_zero_point =
args.src_tensor->layout.dtype.param<dtype::Quantized4Asymm>()
.zero_point;
do_dispatch_reduce_filter_and_update_bias_4bit<true>(
reinterpret_cast<uint8_t*>(args.filter_tensor->raw_ptr),
args.bias_tensor->compatible_ptr<int32_t>(), co, ci * fh * fw / 8,
reinterpret_cast<int32_t*>(reordered_bias), workspace,
src_zero_point, stream);
}
#endif
// vim: syntax=cpp.doxygen
../int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl
\ No newline at end of file
/**
* \file
* dnn/src/cuda/conv_bias/int4/conv_bias_int4_implicit_gemm_cutlass_wrapper.cuinl
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "cutlass/convolution/device/convolution.h"
#include "src/cuda/conv_bias/cutlass_convolution_wrapper.cuh"
using namespace megdnn;
using namespace cuda;
using namespace cutlass_wrapper;
template <typename Convolution>
void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst, int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, typename Convolution::ExtraParam extra_param) {
typename Convolution::TensorRefSrc tensor_src{
const_cast<typename Convolution::ElementSrc*>(d_src),
Convolution::LayoutSrc::packed(
{conv_param.N, conv_param.H, conv_param.W, conv_param.C})};
typename Convolution::TensorRefFilter tensor_filter{
const_cast<typename Convolution::ElementFilter*>(d_filter),
Convolution::LayoutFilter::packed(
{conv_param.K, conv_param.R, conv_param.S, conv_param.C})};
typename Convolution::TensorRefBias tensor_bias{
const_cast<typename Convolution::ElementBias*>(d_bias),
Convolution::LayoutBias::packed({1, 1, 1, conv_param.K})};
typename Convolution::TensorRefDst tensor_z{
const_cast<typename Convolution::ElementDst*>(d_z),
Convolution::LayoutDst::packed(
{conv_param.N, conv_param.P, conv_param.Q, conv_param.K})};
typename Convolution::TensorRefDst tensor_dst{
d_dst,
Convolution::LayoutDst::packed(
{conv_param.N, conv_param.P, conv_param.Q, conv_param.K})};
typename Convolution::Arguments arguments{conv_param,
tensor_src.non_const_ref(),
tensor_filter.non_const_ref(),
tensor_bias.non_const_ref(),
tensor_z.non_const_ref(),
tensor_dst.non_const_ref(),
epilogue,
{},
{},
extra_param};
Convolution conv_op;
cutlass_check(conv_op.initialize(arguments, workspace));
cutlass_check(conv_op(stream));
after_kernel_launch();
}
// vim: syntax=cuda.doxygen
......@@ -26,7 +26,7 @@ void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper(
typename Convolution::ElementDst* d_dst, int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream) {
cudaStream_t stream, typename Convolution::ExtraParam extra_param) {
typename Convolution::TensorRefSrc tensor_src{
const_cast<typename Convolution::ElementSrc*>(d_src),
Convolution::LayoutSrc::packed(
......@@ -52,7 +52,10 @@ void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper(
tensor_bias.non_const_ref(),
tensor_z.non_const_ref(),
tensor_dst.non_const_ref(),
epilogue};
epilogue,
{},
{},
extra_param};
Convolution conv_op;
cutlass_check(conv_op.initialize(arguments, workspace));
cutlass_check(conv_op(stream));
......
......@@ -65,6 +65,7 @@ public:
class AlgoInt8CHWN4IMMAImplicitGemmUnrollWidth;
class AlgoInt8NCHW32IMMAImplicitGemm;
class AlgoInt4Int4NCHW64IMMAImplicitGemm;
class AlgoUInt4Int4NCHW64IMMAImplicitGemm;
class AlgoBFloat16;
class AlgoPack;
......
......@@ -689,7 +689,7 @@ TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_1x1_ALGO_2) {
}
TEST_F(CUDA, CUTLASS_INT8_WEIGHT_PREPROCESS) {
TEST_F(CUDA, CUTLASS_CONV_BIAS_INT8_WEIGHT_PREPROCESS) {
require_compute_capability(6, 1);
Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
handle_cuda());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册