diff --git a/modules/dnn/include/opencv2/dnn/shape_utils.hpp b/modules/dnn/include/opencv2/dnn/shape_utils.hpp index c975fcff0476012ad3ba7b94279e1c1fcc5d97f2..609809e110eb5e655c216ad80f642791b8b4aaa0 100644 --- a/modules/dnn/include/opencv2/dnn/shape_utils.hpp +++ b/modules/dnn/include/opencv2/dnn/shape_utils.hpp @@ -205,21 +205,33 @@ static inline std::ostream& operator<<(std::ostream &out, const MatShape& shape) return out; } -inline int clamp(int ax, int dims) +/// @brief Converts axis from `[-dims; dims)` (similar to Python's slice notation) to `[0; dims)` range. +static inline +int normalize_axis(int axis, int dims) { - return ax < 0 ? ax + dims : ax; + CV_Check(axis, axis >= -dims && axis < dims, ""); + axis = (axis < 0) ? (dims + axis) : axis; + CV_DbgCheck(axis, axis >= 0 && axis < dims, ""); + return axis; } -inline int clamp(int ax, const MatShape& shape) +static inline +int normalize_axis(int axis, const MatShape& shape) { - return clamp(ax, (int)shape.size()); + return normalize_axis(axis, (int)shape.size()); } -inline Range clamp(const Range& r, int axisSize) +static inline +Range normalize_axis_range(const Range& r, int axisSize) { - Range clamped(std::max(r.start, 0), + if (r == Range::all()) + return Range(0, axisSize); + CV_CheckGE(r.start, 0, ""); + Range clamped(r.start, r.end > 0 ? std::min(r.end, axisSize) : axisSize + r.end + 1); - CV_Assert_N(clamped.start < clamped.end, clamped.end <= axisSize); + CV_DbgCheckGE(clamped.start, 0, ""); + CV_CheckLT(clamped.start, clamped.end, ""); + CV_CheckLE(clamped.end, axisSize, ""); return clamped; } diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index efafd5d325ff61b2b21a369a9746915218aeb4c2..34222b9547960e3a2e1d31071a87612a5ffcf1e4 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -2598,7 +2598,7 @@ struct Net::Impl : public detail::NetImplBase // the concatenation optimization is applied with batch_size > 1. // so, for now, we only apply this optimization in the most popular // case batch_size == 1. - int axis = clamp(concatLayer->axis, output.dims); + int axis = normalize_axis(concatLayer->axis, output.dims); if( output.total(0, axis) == 1 ) { size_t i, ninputs = ld.inputBlobsId.size(); diff --git a/modules/dnn/src/layers/concat_layer.cpp b/modules/dnn/src/layers/concat_layer.cpp index d85d9e4b01c03a0cfb82213bdfb36612fe725cc3..2f92c69e05641b93f7a232ac9e3dc624c88edc46 100644 --- a/modules/dnn/src/layers/concat_layer.cpp +++ b/modules/dnn/src/layers/concat_layer.cpp @@ -72,7 +72,7 @@ public: { CV_Assert(inputs.size() > 0); outputs.resize(1, inputs[0]); - int cAxis = clamp(axis, inputs[0]); + int cAxis = normalize_axis(axis, inputs[0]); int axisSum = 0; for (size_t i = 0; i < inputs.size(); i++) @@ -192,7 +192,7 @@ public: inps.getUMatVector(inputs); outs.getUMatVector(outputs); - int cAxis = clamp(axis, inputs[0].dims); + int cAxis = normalize_axis(axis, inputs[0].dims); if (padding) return false; @@ -246,7 +246,7 @@ public: inputs_arr.getMatVector(inputs); outputs_arr.getMatVector(outputs); - int cAxis = clamp(axis, inputs[0].dims); + int cAxis = normalize_axis(axis, inputs[0].dims); Mat& outMat = outputs[0]; if (padding) @@ -306,7 +306,7 @@ public: InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::Builder::ConcatLayer ieLayer(name); - ieLayer.setAxis(clamp(axis, input->getDims().size())); + ieLayer.setAxis(normalize_axis(axis, input->getDims().size())); ieLayer.setInputPorts(std::vector(inputs.size())); return Ptr(new InfEngineBackendNode(ieLayer)); } @@ -319,7 +319,7 @@ public: { InferenceEngine::DataPtr data = ngraphDataNode(inputs[0]); const int numDims = data->getDims().size(); - const int cAxis = clamp(axis, numDims); + const int cAxis = normalize_axis(axis, numDims); std::vector maxDims(numDims, 0); CV_Assert(inputs.size() == nodes.size()); diff --git a/modules/dnn/src/layers/flatten_layer.cpp b/modules/dnn/src/layers/flatten_layer.cpp index f3434a57518599398d7b211dc93f026baa3abd7a..c59b71248e5a4eaac72dc2b0f96421f5fbd7567f 100644 --- a/modules/dnn/src/layers/flatten_layer.cpp +++ b/modules/dnn/src/layers/flatten_layer.cpp @@ -82,8 +82,8 @@ public: } int numAxes = inputs[0].size(); - int startAxis = clamp(_startAxis, numAxes); - int endAxis = clamp(_endAxis, numAxes); + int startAxis = normalize_axis(_startAxis, numAxes); + int endAxis = normalize_axis(_endAxis, numAxes); CV_Assert(startAxis >= 0); CV_Assert(endAxis >= startAxis && endAxis < (int)numAxes); @@ -113,8 +113,8 @@ public: inputs_arr.getMatVector(inputs); int numAxes = inputs[0].dims; - _startAxis = clamp(_startAxis, numAxes); - _endAxis = clamp(_endAxis, numAxes); + _startAxis = normalize_axis(_startAxis, numAxes); + _endAxis = normalize_axis(_endAxis, numAxes); } #ifdef HAVE_OPENCL @@ -186,8 +186,8 @@ virtual Ptr initNgraph(const std::vector >& inp std::vector dims = ieInpNode->get_shape(); int numAxes = dims.size(); - int startAxis = clamp(_startAxis, numAxes); - int endAxis = clamp(_endAxis, numAxes); + int startAxis = normalize_axis(_startAxis, numAxes); + int endAxis = normalize_axis(_endAxis, numAxes); CV_Assert(startAxis >= 0); CV_Assert(endAxis >= startAxis && endAxis < numAxes); diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp index d9c6f6edb18b359147f5ff2a74405e0d675f1eff..e25ca5a68f90f365d677f9f51e05de68dbf71c8f 100644 --- a/modules/dnn/src/layers/fully_connected_layer.cpp +++ b/modules/dnn/src/layers/fully_connected_layer.cpp @@ -129,7 +129,7 @@ public: CV_CheckEQ(blobs[0].dims, 2, ""); numOutput = blobs[0].size[0]; CV_Assert(!bias || (size_t)numOutput == blobs[1].total()); - cAxis = clamp(axis, inputs[0]); + cAxis = normalize_axis(axis, inputs[0]); } MatShape outShape(cAxis + 1); @@ -352,7 +352,7 @@ public: return true; } - int axisCan = clamp(axis, inputs[0].dims); + int axisCan = normalize_axis(axis, inputs[0].dims); int numOutput = blobs[0].size[0]; int innerSize = blobs[0].size[1]; int outerSize = total(shape(inputs[0]), 0, axisCan); @@ -473,7 +473,7 @@ public: if (!blobs.empty()) { - int axisCan = clamp(axis, input[0].dims); + int axisCan = normalize_axis(axis, input[0].dims); int outerSize = input[0].total(0, axisCan); for (size_t i = 0; i < input.size(); i++) diff --git a/modules/dnn/src/layers/normalize_bbox_layer.cpp b/modules/dnn/src/layers/normalize_bbox_layer.cpp index 7ce7b37d1efdf208c6251fbc33c07b7f13a4ffa8..5def78f22103b84b9c2a7e221820e72bb8cfad57 100644 --- a/modules/dnn/src/layers/normalize_bbox_layer.cpp +++ b/modules/dnn/src/layers/normalize_bbox_layer.cpp @@ -118,8 +118,8 @@ public: const UMat& inp0 = inputs[0]; UMat& buffer = internals[0]; - startAxis = clamp(startAxis, inp0.dims); - endAxis = clamp(endAxis, inp0.dims); + startAxis = normalize_axis(startAxis, inp0.dims); + endAxis = normalize_axis(endAxis, inp0.dims); size_t num = total(shape(inp0.size), 0, startAxis); size_t numPlanes = total(shape(inp0.size), startAxis, endAxis + 1); @@ -203,8 +203,8 @@ public: const Mat& inp0 = inputs[0]; Mat& buffer = internals[0]; - startAxis = clamp(startAxis, inp0.dims); - endAxis = clamp(endAxis, inp0.dims); + startAxis = normalize_axis(startAxis, inp0.dims); + endAxis = normalize_axis(endAxis, inp0.dims); const float* inpData = inp0.ptr(); float* outData = outputs[0].ptr(); diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index 642e7c52f695b2be66af8035199c3a73d9394329..2b7cb40eb5a72788f05f3e6f5a14d875cac06295 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -60,14 +60,7 @@ static void computeShapeByReshapeMask(const MatShape &srcShape, int srcShapeSize = (int)srcShape.size(); int maskShapeSize = (int)maskShape.size(); - if (srcRange == Range::all()) - srcRange = Range(0, srcShapeSize); - else - { - int sz = srcRange.size(); - srcRange.start = clamp(srcRange.start, srcShapeSize); - srcRange.end = srcRange.end == INT_MAX ? srcShapeSize : srcRange.start + sz; - } + srcRange = normalize_axis_range(srcRange, srcShapeSize); bool explicitMask = !maskShape.empty(); // All mask values are positive. for (int i = 0, n = maskShape.size(); i < n && explicitMask; ++i) diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index 9452e4e85ea846fe493cc2cebe8facf505ac782b..058140235bdc3177336a2cbde0f78318d6cb3e68 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -240,7 +240,7 @@ public: numChannels = blobs[0].total(); std::vector shape(ieInpNode0->get_shape().size(), 1); - int cAxis = clamp(axis, shape.size()); + int cAxis = normalize_axis(axis, shape.size()); shape[cAxis] = numChannels; auto node = ieInpNode0; diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp index fd314b7c57d78e0b989c628efd702e128b65442b..52236015d2264f553b1498457bd39139cf1368ff 100644 --- a/modules/dnn/src/layers/slice_layer.cpp +++ b/modules/dnn/src/layers/slice_layer.cpp @@ -146,7 +146,7 @@ public: for (int j = 0; j < sliceRanges[i].size(); ++j) { if (shapesInitialized || inpShape[j] > 0) - outputs[i][j] = clamp(sliceRanges[i][j], inpShape[j]).size(); + outputs[i][j] = normalize_axis_range(sliceRanges[i][j], inpShape[j]).size(); } } } @@ -209,7 +209,7 @@ public: // Clamp. for (int j = 0; j < finalSliceRanges[i].size(); ++j) { - finalSliceRanges[i][j] = clamp(finalSliceRanges[i][j], inpShape[j]); + finalSliceRanges[i][j] = normalize_axis_range(finalSliceRanges[i][j], inpShape[j]); } } @@ -601,7 +601,7 @@ public: CV_Assert(inputs.size() == 2); MatShape dstShape = inputs[0]; - int start = clamp(axis, dstShape); + int start = normalize_axis(axis, dstShape); for (int i = start; i < dstShape.size(); i++) { dstShape[i] = inputs[1][i]; @@ -620,7 +620,7 @@ public: const Mat &inpSzBlob = inputs[1]; int dims = inpBlob.dims; - int start_axis = clamp(axis, dims); + int start_axis = normalize_axis(axis, dims); std::vector offset_final(dims, 0); if (offset.size() == 1) diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp index a0e2b420205bbef3d363f789aa2a81e9b351f99c..29acd06402f53b35c90a20e276ad4892d29f9d57 100644 --- a/modules/dnn/src/layers/softmax_layer.cpp +++ b/modules/dnn/src/layers/softmax_layer.cpp @@ -82,7 +82,7 @@ public: { bool inplace = Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals); MatShape shape = inputs[0]; - int cAxis = clamp(axisRaw, shape.size()); + int cAxis = normalize_axis(axisRaw, shape.size()); shape[cAxis] = 1; internals.assign(1, shape); return inplace; @@ -115,7 +115,7 @@ public: UMat& src = inputs[0]; UMat& dstMat = outputs[0]; - int axis = clamp(axisRaw, src.dims); + int axis = normalize_axis(axisRaw, src.dims); if (softmaxOp.empty()) { @@ -207,7 +207,7 @@ public: const Mat &src = inputs[0]; Mat &dst = outputs[0]; - int axis = clamp(axisRaw, src.dims); + int axis = normalize_axis(axisRaw, src.dims); size_t outerSize = src.total(0, axis), channels = src.size[axis], innerSize = src.total(axis + 1); @@ -318,7 +318,7 @@ public: InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::Builder::SoftMaxLayer ieLayer(name); - ieLayer.setAxis(clamp(axisRaw, input->getDims().size())); + ieLayer.setAxis(normalize_axis(axisRaw, input->getDims().size())); return Ptr(new InfEngineBackendNode(ieLayer)); } @@ -329,7 +329,7 @@ public: const std::vector >& nodes) CV_OVERRIDE { auto& ieInpNode = nodes[0].dynamicCast()->node; - int axis = clamp(axisRaw, ieInpNode->get_shape().size()); + int axis = normalize_axis(axisRaw, ieInpNode->get_shape().size()); auto softmax = std::make_shared(ieInpNode, axis); if (logSoftMax) return Ptr(new InfEngineNgraphNode(std::make_shared(softmax))); diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index fd01ad147889b83169ff4c478cc2f08d8cc638b1..93497a6126e18b4787cd5f0050a20f3b4698c272 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -503,7 +503,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) MatShape targetShape; std::vector shouldDelete(inpShape.size(), false); for (int i = 0; i < axes.size(); i++) { - int axis = clamp(axes.get(i), inpShape.size()); + int axis = normalize_axis(axes.get(i), inpShape.size()); shouldDelete[axis] = true; } for (int axis = 0; axis < inpShape.size(); ++axis){ @@ -515,7 +515,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) if (inpShape.size() == 3 && axes.size() <= 2) { - int axis = clamp(axes.get(0), inpShape.size()); + int axis = normalize_axis(axes.get(0), inpShape.size()); CV_CheckNE(axis, 0, ""); LayerParams reshapeLp; @@ -539,8 +539,8 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) avgLp.set("pool", pool); if (axes.size() == 2) { - CV_CheckEQ(clamp(axes.get(0), inpShape.size()), 1, "Unsupported mode"); - CV_CheckEQ(clamp(axes.get(1), inpShape.size()), 2, "Unsupported mode"); + CV_CheckEQ(normalize_axis(axes.get(0), inpShape.size()), 1, "Unsupported mode"); + CV_CheckEQ(normalize_axis(axes.get(1), inpShape.size()), 2, "Unsupported mode"); avgLp.set("global_pooling", true); } else @@ -560,9 +560,9 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) CV_Assert(axes.size() <= inpShape.size() - 2); std::vector kernel_size(inpShape.size() - 2, 1); - if (axes.size() == 1 && (clamp(axes.get(0), inpShape.size()) <= 1)) + if (axes.size() == 1 && (normalize_axis(axes.get(0), inpShape.size()) <= 1)) { - int axis = clamp(axes.get(0), inpShape.size()); + int axis = normalize_axis(axes.get(0), inpShape.size()); MatShape newShape = inpShape; newShape[axis + 1] = total(newShape, axis + 1); newShape.resize(axis + 2); @@ -584,7 +584,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) else { for (int i = 0; i < axes.size(); i++) { - int axis = clamp(axes.get(i), inpShape.size()); + int axis = normalize_axis(axes.get(i), inpShape.size()); CV_Assert_N(axis >= 2 + i, axis < inpShape.size()); kernel_size[axis - 2] = inpShape[axis]; } @@ -1376,7 +1376,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) if (constBlobs.find(node_proto.input(0)) != constBlobs.end()) { Mat input = getBlob(node_proto, 0); - int axis = clamp(layerParams.get("axis", 1), input.dims); + int axis = normalize_axis(layerParams.get("axis", 1), input.dims); std::vector out_size(&input.size[0], &input.size[0] + axis); out_size.push_back(input.total(axis));