diff --git a/dnn/src/cuda/convolution/backward_data/chanwise.cpp b/dnn/src/cuda/convolution/backward_data/chanwise.cpp index d9bbc1a78c6a7f21e181601438ad374155ef3c54..d156f605de66319ee542e1362a28c8c17450e270 100644 --- a/dnn/src/cuda/convolution/backward_data/chanwise.cpp +++ b/dnn/src/cuda/convolution/backward_data/chanwise.cpp @@ -20,6 +20,12 @@ using namespace convolution; bool ConvolutionBackwardDataImpl::AlgoChanwise::is_available( const SizeArgs& args) const { + auto kparam = chanwise::Param::from_fwd_args(args.as_fwd_args()); + auto&& device_prop = cuda::current_device_prop(); + if (device_prop.sharedMemPerBlock < + kparam.chl_mul * kparam.flt_h * kparam.flt_w * args.diff_layout->dtype.size()) { + return false; + } if (!args.grad_layout->is_contiguous() || !args.diff_layout->is_contiguous()) { return false; } diff --git a/dnn/src/cuda/utils.cuh b/dnn/src/cuda/utils.cuh index a4b9386019f888d887bbf3a582707650c54cd35b..732f531ad853c611aa9a7674e05efaa2eace71c6 100644 --- a/dnn/src/cuda/utils.cuh +++ b/dnn/src/cuda/utils.cuh @@ -24,12 +24,18 @@ #include "src/cuda/atomic_add.cuh" #include "src/cuda/cudnn_with_check.h" -#define cuda_check(_x) \ - do { \ - cudaError_t _err = (_x); \ - if (_err != cudaSuccess) { \ - ::megdnn::cuda::__throw_cuda_error__(_err, #_x); \ - } \ +#define cuda_check(_x) \ + do { \ + cudaError_t _err = (_x); \ + if (_err != cudaSuccess) { \ + std::string x = std::string(#_x); \ + char line[10]; \ + sprintf(line, "%d", __LINE__); \ + ::megdnn::cuda::__throw_cuda_error__( \ + _err, (x + " error file:" + std::string(__FILE__) + ":" + \ + std::string(line)) \ + .c_str()); \ + } \ } while (0) #define cublas_check(_x) \