From 544908d06c7a9788950b3ee5f2f8eb88fe88cd70 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 30 Jun 2017 18:46:00 +0300 Subject: [PATCH] dnn: some minor fixes in docs, indentation, unused code --- modules/dnn/include/opencv2/dnn.hpp | 2 +- modules/dnn/include/opencv2/dnn/all_layers.hpp | 25 ++-- modules/dnn/include/opencv2/dnn/dnn.hpp | 198 ++++++++++++------------- modules/dnn/src/dnn.cpp | 3 - 4 files changed, 113 insertions(+), 115 deletions(-) diff --git a/modules/dnn/include/opencv2/dnn.hpp b/modules/dnn/include/opencv2/dnn.hpp index 7bad750..690a82a 100644 --- a/modules/dnn/include/opencv2/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn.hpp @@ -44,7 +44,7 @@ // This is an umbrealla header to include into you project. // We are free to change headers layout in dnn subfolder, so please include -// this header for future compartibility +// this header for future compatibility /** @defgroup dnn Deep Neural Network module diff --git a/modules/dnn/include/opencv2/dnn/all_layers.hpp b/modules/dnn/include/opencv2/dnn/all_layers.hpp index 3e1fbae..4f01227 100644 --- a/modules/dnn/include/opencv2/dnn/all_layers.hpp +++ b/modules/dnn/include/opencv2/dnn/all_layers.hpp @@ -152,7 +152,19 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN int outputNameToIndex(String outputName); }; - //! Classical recurrent layer + /** @brief Classical recurrent layer + + Accepts two inputs @f$x_t@f$ and @f$h_{t-1}@f$ and compute two outputs @f$o_t@f$ and @f$h_t@f$. + + - input: should contain packed input @f$x_t@f$. + - output: should contain output @f$o_t@f$ (and @f$h_t@f$ if setProduceHiddenOutput() is set to true). + + input[0] should have shape [`T`, `N`, `data_dims`] where `T` and `N` is number of timestamps and number of independent samples of @f$x_t@f$ respectively. + + output[0] will have shape [`T`, `N`, @f$N_o@f$], where @f$N_o@f$ is number of rows in @f$ W_{xo} @f$ matrix. + + If setProduceHiddenOutput() is set to true then @p output[1] will contain a Mat with shape [`T`, `N`, @f$N_h@f$], where @f$N_h@f$ is number of rows in @f$ W_{hh} @f$ matrix. + */ class CV_EXPORTS RNNLayer : public Layer { public: @@ -180,17 +192,6 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN */ virtual void setProduceHiddenOutput(bool produce = false) = 0; - /** Accepts two inputs @f$x_t@f$ and @f$h_{t-1}@f$ and compute two outputs @f$o_t@f$ and @f$h_t@f$. - - @param input should contain packed input @f$x_t@f$. - @param output should contain output @f$o_t@f$ (and @f$h_t@f$ if setProduceHiddenOutput() is set to true). - - @p input[0] should have shape [`T`, `N`, `data_dims`] where `T` and `N` is number of timestamps and number of independent samples of @f$x_t@f$ respectively. - - @p output[0] will have shape [`T`, `N`, @f$N_o@f$], where @f$N_o@f$ is number of rows in @f$ W_{xo} @f$ matrix. - - If setProduceHiddenOutput() is set to true then @p output[1] will contain a Mat with shape [`T`, `N`, @f$N_h@f$], where @f$N_h@f$ is number of rows in @f$ W_{hh} @f$ matrix. - */ }; class CV_EXPORTS BaseConvolutionLayer : public Layer diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index 432bcf8..f4369ee 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -371,28 +371,28 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN /** @brief Runs forward pass to compute output of layer with name @p outputName. * @param outputName name for layer which output is needed to get * @return blob for first output of specified layer. - * @details By default runs forward pass for the whole network. - */ + * @details By default runs forward pass for the whole network. + */ CV_WRAP Mat forward(const String& outputName = String()); /** @brief Runs forward pass to compute output of layer with name @p outputName. * @param outputBlobs contains all output blobs for specified layer. * @param outputName name for layer which output is needed to get - * @details If @p outputName is empty, runs forward pass for the whole network. - */ + * @details If @p outputName is empty, runs forward pass for the whole network. + */ CV_WRAP void forward(std::vector& outputBlobs, const String& outputName = String()); /** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames. * @param outputBlobs contains blobs for first outputs of specified layers. * @param outBlobNames names for layers which outputs are needed to get - */ + */ CV_WRAP void forward(std::vector& outputBlobs, const std::vector& outBlobNames); /** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames. * @param outputBlobs contains all output blobs for each layer specified in @p outBlobNames. * @param outBlobNames names for layers which outputs are needed to get - */ + */ CV_WRAP void forward(std::vector >& outputBlobs, const std::vector& outBlobNames); @@ -460,103 +460,103 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN */ CV_WRAP std::vector getUnconnectedOutLayers() const; /** @brief Returns input and output shapes for all layers in loaded model; - * preliminary inferencing isn't necessary. - * @param netInputShapes shapes for all input blobs in net input layer. - * @param layersIds output parameter for layer IDs. - * @param inLayersShapes output parameter for input layers shapes; - * order is the same as in layersIds - * @param outLayersShapes output parameter for output layers shapes; - * order is the same as in layersIds - */ - CV_WRAP void getLayersShapes(const std::vector& netInputShapes, - std::vector* layersIds, - std::vector >* inLayersShapes, - std::vector >* outLayersShapes) const; - - /** @overload */ - CV_WRAP void getLayersShapes(const MatShape& netInputShape, - std::vector* layersIds, - std::vector >* inLayersShapes, - std::vector >* outLayersShapes) const; - - /** @brief Returns input and output shapes for layer with specified - * id in loaded model; preliminary inferencing isn't necessary. - * @param netInputShape shape input blob in net input layer. - * @param layerId id for layer. - * @param inLayerShapes output parameter for input layers shapes; - * order is the same as in layersIds - * @param outLayerShapes output parameter for output layers shapes; - * order is the same as in layersIds - */ - CV_WRAP void getLayerShapes(const MatShape& netInputShape, - const int layerId, - std::vector* inLayerShapes, - std::vector* outLayerShapes) const; + * preliminary inferencing isn't necessary. + * @param netInputShapes shapes for all input blobs in net input layer. + * @param layersIds output parameter for layer IDs. + * @param inLayersShapes output parameter for input layers shapes; + * order is the same as in layersIds + * @param outLayersShapes output parameter for output layers shapes; + * order is the same as in layersIds + */ + CV_WRAP void getLayersShapes(const std::vector& netInputShapes, + std::vector* layersIds, + std::vector >* inLayersShapes, + std::vector >* outLayersShapes) const; + + /** @overload */ + CV_WRAP void getLayersShapes(const MatShape& netInputShape, + std::vector* layersIds, + std::vector >* inLayersShapes, + std::vector >* outLayersShapes) const; + + /** @brief Returns input and output shapes for layer with specified + * id in loaded model; preliminary inferencing isn't necessary. + * @param netInputShape shape input blob in net input layer. + * @param layerId id for layer. + * @param inLayerShapes output parameter for input layers shapes; + * order is the same as in layersIds + * @param outLayerShapes output parameter for output layers shapes; + * order is the same as in layersIds + */ + CV_WRAP void getLayerShapes(const MatShape& netInputShape, + const int layerId, + std::vector* inLayerShapes, + std::vector* outLayerShapes) const; - /** @overload */ - CV_WRAP void getLayerShapes(const std::vector& netInputShapes, + /** @overload */ + CV_WRAP void getLayerShapes(const std::vector& netInputShapes, const int layerId, std::vector* inLayerShapes, std::vector* outLayerShapes) const; - /** @brief Computes FLOP for whole loaded model with specified input shapes. - * @param netInputShapes vector of shapes for all net inputs. - * @returns computed FLOP. - */ - CV_WRAP int64 getFLOPS(const std::vector& netInputShapes) const; - /** @overload */ - CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const; - /** @overload */ - CV_WRAP int64 getFLOPS(const int layerId, - const std::vector& netInputShapes) const; - /** @overload */ - CV_WRAP int64 getFLOPS(const int layerId, - const MatShape& netInputShape) const; - - /** @brief Returns list of types for layer used in model. - * @param layersTypes output parameter for returning types. - */ - CV_WRAP void getLayerTypes(CV_OUT std::vector& layersTypes) const; - - /** @brief Returns count of layers of specified type. - * @param layerType type. - * @returns count of layers - */ - CV_WRAP int getLayersCount(const String& layerType) const; - - /** @brief Computes bytes number which are requered to store - * all weights and intermediate blobs for model. - * @param netInputShapes vector of shapes for all net inputs. - * @param weights output parameter to store resulting bytes for weights. - * @param blobs output parameter to store resulting bytes for intermediate blobs. - */ - CV_WRAP void getMemoryConsumption(const std::vector& netInputShapes, - CV_OUT size_t& weights, CV_OUT size_t& blobs) const; - /** @overload */ - CV_WRAP void getMemoryConsumption(const MatShape& netInputShape, - CV_OUT size_t& weights, CV_OUT size_t& blobs) const; - /** @overload */ - CV_WRAP void getMemoryConsumption(const int layerId, - const std::vector& netInputShapes, - CV_OUT size_t& weights, CV_OUT size_t& blobs) const; - /** @overload */ - CV_WRAP void getMemoryConsumption(const int layerId, - const MatShape& netInputShape, - CV_OUT size_t& weights, CV_OUT size_t& blobs) const; - - /** @brief Computes bytes number which are requered to store - * all weights and intermediate blobs for each layer. - * @param netInputShapes vector of shapes for all net inputs. - * @param layerIds output vector to save layer IDs. - * @param weights output parameter to store resulting bytes for weights. - * @param blobs output parameter to store resulting bytes for intermediate blobs. - */ - CV_WRAP void getMemoryConsumption(const std::vector& netInputShapes, - CV_OUT std::vector& layerIds, CV_OUT std::vector& weights, - CV_OUT std::vector& blobs) const; - /** @overload */ - CV_WRAP void getMemoryConsumption(const MatShape& netInputShape, - CV_OUT std::vector& layerIds, CV_OUT std::vector& weights, - CV_OUT std::vector& blobs) const; + /** @brief Computes FLOP for whole loaded model with specified input shapes. + * @param netInputShapes vector of shapes for all net inputs. + * @returns computed FLOP. + */ + CV_WRAP int64 getFLOPS(const std::vector& netInputShapes) const; + /** @overload */ + CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const; + /** @overload */ + CV_WRAP int64 getFLOPS(const int layerId, + const std::vector& netInputShapes) const; + /** @overload */ + CV_WRAP int64 getFLOPS(const int layerId, + const MatShape& netInputShape) const; + + /** @brief Returns list of types for layer used in model. + * @param layersTypes output parameter for returning types. + */ + CV_WRAP void getLayerTypes(CV_OUT std::vector& layersTypes) const; + + /** @brief Returns count of layers of specified type. + * @param layerType type. + * @returns count of layers + */ + CV_WRAP int getLayersCount(const String& layerType) const; + + /** @brief Computes bytes number which are requered to store + * all weights and intermediate blobs for model. + * @param netInputShapes vector of shapes for all net inputs. + * @param weights output parameter to store resulting bytes for weights. + * @param blobs output parameter to store resulting bytes for intermediate blobs. + */ + CV_WRAP void getMemoryConsumption(const std::vector& netInputShapes, + CV_OUT size_t& weights, CV_OUT size_t& blobs) const; + /** @overload */ + CV_WRAP void getMemoryConsumption(const MatShape& netInputShape, + CV_OUT size_t& weights, CV_OUT size_t& blobs) const; + /** @overload */ + CV_WRAP void getMemoryConsumption(const int layerId, + const std::vector& netInputShapes, + CV_OUT size_t& weights, CV_OUT size_t& blobs) const; + /** @overload */ + CV_WRAP void getMemoryConsumption(const int layerId, + const MatShape& netInputShape, + CV_OUT size_t& weights, CV_OUT size_t& blobs) const; + + /** @brief Computes bytes number which are requered to store + * all weights and intermediate blobs for each layer. + * @param netInputShapes vector of shapes for all net inputs. + * @param layerIds output vector to save layer IDs. + * @param weights output parameter to store resulting bytes for weights. + * @param blobs output parameter to store resulting bytes for intermediate blobs. + */ + CV_WRAP void getMemoryConsumption(const std::vector& netInputShapes, + CV_OUT std::vector& layerIds, CV_OUT std::vector& weights, + CV_OUT std::vector& blobs) const; + /** @overload */ + CV_WRAP void getMemoryConsumption(const MatShape& netInputShape, + CV_OUT std::vector& layerIds, CV_OUT std::vector& weights, + CV_OUT std::vector& blobs) const; private: struct Impl; diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 200c150..a371b18 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -969,9 +969,6 @@ struct Net::Impl } } - #define CV_RETHROW_ERROR(err, newmsg)\ - cv::error(err.code, newmsg, err.func.c_str(), err.file.c_str(), err.line) - void allocateLayer(int lid, const LayersShapesMap& layersShapes) { CV_TRACE_FUNCTION(); -- 2.7.4