diff --git a/dnn/src/common/pooling.cpp b/dnn/src/common/pooling.cpp index ef74bfd6bce06b8ec7ebacb143808e9ce9af7e03..634aed4d8a7b6d5c61ce917306031c0bcd03e4c5 100644 --- a/dnn/src/common/pooling.cpp +++ b/dnn/src/common/pooling.cpp @@ -92,6 +92,10 @@ void PoolingBase::deduce_layout_fwd(const TensorLayout& src, size_t sw = this->param().stride_w; size_t ph = this->param().pad_h; size_t pw = this->param().pad_w; + megdnn_assert(ph < fh && pw < fw, + "pooling padding size (%zu %zu) should not be bigger than " + "window size (%zu %zu)", + pw, ph, fw, fh); infer_conv_shape2d(ih, iw, fh, fw, sh, sw, ph, pw, oh, ow); if (param().format == Param::Format::NCHW) { dst = TensorLayout(TensorShape({n, c, oh, ow}), src.dtype); diff --git a/src/opr/test/dnn/pooling.cpp b/src/opr/test/dnn/pooling.cpp index 8defe15b226be8df3e3d4bbb4b5dc33e487d031c..16ffe1a3544e8fd12331fa69c82096ddb1b31ca9 100644 --- a/src/opr/test/dnn/pooling.cpp +++ b/src/opr/test/dnn/pooling.cpp @@ -104,6 +104,17 @@ TEST(TestOprDNN, PoolingBackward) } } +TEST(TestOprDNN, PoolingForwardPadding) { + auto graph = ComputingGraph::make(); + Param param(Param::Mode::MAX, 2, 2, 2, 2, 2, 2); + SymbolVarArray symbol_inputs; + HostTensorGenerator<> gen; + auto host_tensor = gen({2, 3, 23, 24}); + symbol_inputs.push_back( + mgb::opr::Host2DeviceCopy::make(*graph, host_tensor)); + ASSERT_THROW(opr::Pooling::make(symbol_inputs[0], param), MegDNNError); +} + } // anonymous namespace // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}