提交 544908d0 编写于 作者: A Alexander Alekhin

dnn: some minor fixes in docs, indentation, unused code

上级 520da7aa
......@@ -44,7 +44,7 @@
// This is an umbrealla header to include into you project.
// We are free to change headers layout in dnn subfolder, so please include
// this header for future compartibility
// this header for future compatibility
/** @defgroup dnn Deep Neural Network module
......
......@@ -152,7 +152,19 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
int outputNameToIndex(String outputName);
};
//! Classical recurrent layer
/** @brief Classical recurrent layer
Accepts two inputs @f$x_t@f$ and @f$h_{t-1}@f$ and compute two outputs @f$o_t@f$ and @f$h_t@f$.
- input: should contain packed input @f$x_t@f$.
- output: should contain output @f$o_t@f$ (and @f$h_t@f$ if setProduceHiddenOutput() is set to true).
input[0] should have shape [`T`, `N`, `data_dims`] where `T` and `N` is number of timestamps and number of independent samples of @f$x_t@f$ respectively.
output[0] will have shape [`T`, `N`, @f$N_o@f$], where @f$N_o@f$ is number of rows in @f$ W_{xo} @f$ matrix.
If setProduceHiddenOutput() is set to true then @p output[1] will contain a Mat with shape [`T`, `N`, @f$N_h@f$], where @f$N_h@f$ is number of rows in @f$ W_{hh} @f$ matrix.
*/
class CV_EXPORTS RNNLayer : public Layer
{
public:
......@@ -180,17 +192,6 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
*/
virtual void setProduceHiddenOutput(bool produce = false) = 0;
/** Accepts two inputs @f$x_t@f$ and @f$h_{t-1}@f$ and compute two outputs @f$o_t@f$ and @f$h_t@f$.
@param input should contain packed input @f$x_t@f$.
@param output should contain output @f$o_t@f$ (and @f$h_t@f$ if setProduceHiddenOutput() is set to true).
@p input[0] should have shape [`T`, `N`, `data_dims`] where `T` and `N` is number of timestamps and number of independent samples of @f$x_t@f$ respectively.
@p output[0] will have shape [`T`, `N`, @f$N_o@f$], where @f$N_o@f$ is number of rows in @f$ W_{xo} @f$ matrix.
If setProduceHiddenOutput() is set to true then @p output[1] will contain a Mat with shape [`T`, `N`, @f$N_h@f$], where @f$N_h@f$ is number of rows in @f$ W_{hh} @f$ matrix.
*/
};
class CV_EXPORTS BaseConvolutionLayer : public Layer
......
......@@ -371,28 +371,28 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
/** @brief Runs forward pass to compute output of layer with name @p outputName.
* @param outputName name for layer which output is needed to get
* @return blob for first output of specified layer.
* @details By default runs forward pass for the whole network.
*/
* @details By default runs forward pass for the whole network.
*/
CV_WRAP Mat forward(const String& outputName = String());
/** @brief Runs forward pass to compute output of layer with name @p outputName.
* @param outputBlobs contains all output blobs for specified layer.
* @param outputName name for layer which output is needed to get
* @details If @p outputName is empty, runs forward pass for the whole network.
*/
* @details If @p outputName is empty, runs forward pass for the whole network.
*/
CV_WRAP void forward(std::vector<Mat>& outputBlobs, const String& outputName = String());
/** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames.
* @param outputBlobs contains blobs for first outputs of specified layers.
* @param outBlobNames names for layers which outputs are needed to get
*/
*/
CV_WRAP void forward(std::vector<Mat>& outputBlobs,
const std::vector<String>& outBlobNames);
/** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames.
* @param outputBlobs contains all output blobs for each layer specified in @p outBlobNames.
* @param outBlobNames names for layers which outputs are needed to get
*/
*/
CV_WRAP void forward(std::vector<std::vector<Mat> >& outputBlobs,
const std::vector<String>& outBlobNames);
......@@ -460,103 +460,103 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
*/
CV_WRAP std::vector<int> getUnconnectedOutLayers() const;
/** @brief Returns input and output shapes for all layers in loaded model;
* preliminary inferencing isn't necessary.
* @param netInputShapes shapes for all input blobs in net input layer.
* @param layersIds output parameter for layer IDs.
* @param inLayersShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayersShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
CV_WRAP void getLayersShapes(const std::vector<MatShape>& netInputShapes,
std::vector<int>* layersIds,
std::vector<std::vector<MatShape> >* inLayersShapes,
std::vector<std::vector<MatShape> >* outLayersShapes) const;
/** @overload */
CV_WRAP void getLayersShapes(const MatShape& netInputShape,
std::vector<int>* layersIds,
std::vector<std::vector<MatShape> >* inLayersShapes,
std::vector<std::vector<MatShape> >* outLayersShapes) const;
/** @brief Returns input and output shapes for layer with specified
* id in loaded model; preliminary inferencing isn't necessary.
* @param netInputShape shape input blob in net input layer.
* @param layerId id for layer.
* @param inLayerShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayerShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
CV_WRAP void getLayerShapes(const MatShape& netInputShape,
const int layerId,
std::vector<MatShape>* inLayerShapes,
std::vector<MatShape>* outLayerShapes) const;
* preliminary inferencing isn't necessary.
* @param netInputShapes shapes for all input blobs in net input layer.
* @param layersIds output parameter for layer IDs.
* @param inLayersShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayersShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
CV_WRAP void getLayersShapes(const std::vector<MatShape>& netInputShapes,
std::vector<int>* layersIds,
std::vector<std::vector<MatShape> >* inLayersShapes,
std::vector<std::vector<MatShape> >* outLayersShapes) const;
/** @overload */
CV_WRAP void getLayersShapes(const MatShape& netInputShape,
std::vector<int>* layersIds,
std::vector<std::vector<MatShape> >* inLayersShapes,
std::vector<std::vector<MatShape> >* outLayersShapes) const;
/** @brief Returns input and output shapes for layer with specified
* id in loaded model; preliminary inferencing isn't necessary.
* @param netInputShape shape input blob in net input layer.
* @param layerId id for layer.
* @param inLayerShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayerShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
CV_WRAP void getLayerShapes(const MatShape& netInputShape,
const int layerId,
std::vector<MatShape>* inLayerShapes,
std::vector<MatShape>* outLayerShapes) const;
/** @overload */
CV_WRAP void getLayerShapes(const std::vector<MatShape>& netInputShapes,
/** @overload */
CV_WRAP void getLayerShapes(const std::vector<MatShape>& netInputShapes,
const int layerId,
std::vector<MatShape>* inLayerShapes,
std::vector<MatShape>* outLayerShapes) const;
/** @brief Computes FLOP for whole loaded model with specified input shapes.
* @param netInputShapes vector of shapes for all net inputs.
* @returns computed FLOP.
*/
CV_WRAP int64 getFLOPS(const std::vector<MatShape>& netInputShapes) const;
/** @overload */
CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const;
/** @overload */
CV_WRAP int64 getFLOPS(const int layerId,
const std::vector<MatShape>& netInputShapes) const;
/** @overload */
CV_WRAP int64 getFLOPS(const int layerId,
const MatShape& netInputShape) const;
/** @brief Returns list of types for layer used in model.
* @param layersTypes output parameter for returning types.
*/
CV_WRAP void getLayerTypes(CV_OUT std::vector<String>& layersTypes) const;
/** @brief Returns count of layers of specified type.
* @param layerType type.
* @returns count of layers
*/
CV_WRAP int getLayersCount(const String& layerType) const;
/** @brief Computes bytes number which are requered to store
* all weights and intermediate blobs for model.
* @param netInputShapes vector of shapes for all net inputs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const std::vector<MatShape>& netInputShapes,
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const MatShape& netInputShape,
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @brief Computes bytes number which are requered to store
* all weights and intermediate blobs for each layer.
* @param netInputShapes vector of shapes for all net inputs.
* @param layerIds output vector to save layer IDs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
CV_OUT std::vector<int>& layerIds, CV_OUT std::vector<size_t>& weights,
CV_OUT std::vector<size_t>& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
CV_OUT std::vector<int>& layerIds, CV_OUT std::vector<size_t>& weights,
CV_OUT std::vector<size_t>& blobs) const;
/** @brief Computes FLOP for whole loaded model with specified input shapes.
* @param netInputShapes vector of shapes for all net inputs.
* @returns computed FLOP.
*/
CV_WRAP int64 getFLOPS(const std::vector<MatShape>& netInputShapes) const;
/** @overload */
CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const;
/** @overload */
CV_WRAP int64 getFLOPS(const int layerId,
const std::vector<MatShape>& netInputShapes) const;
/** @overload */
CV_WRAP int64 getFLOPS(const int layerId,
const MatShape& netInputShape) const;
/** @brief Returns list of types for layer used in model.
* @param layersTypes output parameter for returning types.
*/
CV_WRAP void getLayerTypes(CV_OUT std::vector<String>& layersTypes) const;
/** @brief Returns count of layers of specified type.
* @param layerType type.
* @returns count of layers
*/
CV_WRAP int getLayersCount(const String& layerType) const;
/** @brief Computes bytes number which are requered to store
* all weights and intermediate blobs for model.
* @param netInputShapes vector of shapes for all net inputs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const std::vector<MatShape>& netInputShapes,
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const MatShape& netInputShape,
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @brief Computes bytes number which are requered to store
* all weights and intermediate blobs for each layer.
* @param netInputShapes vector of shapes for all net inputs.
* @param layerIds output vector to save layer IDs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
CV_OUT std::vector<int>& layerIds, CV_OUT std::vector<size_t>& weights,
CV_OUT std::vector<size_t>& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
CV_OUT std::vector<int>& layerIds, CV_OUT std::vector<size_t>& weights,
CV_OUT std::vector<size_t>& blobs) const;
private:
struct Impl;
......
......@@ -969,9 +969,6 @@ struct Net::Impl
}
}
#define CV_RETHROW_ERROR(err, newmsg)\
cv::error(err.code, newmsg, err.func.c_str(), err.file.c_str(), err.line)
void allocateLayer(int lid, const LayersShapesMap& layersShapes)
{
CV_TRACE_FUNCTION();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册