1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of the copyright holders may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
42 #ifndef OPENCV_DNN_DNN_HPP
43 #define OPENCV_DNN_DNN_HPP
46 #include <opencv2/core.hpp>
48 #include "../dnn/version.hpp"
50 #include <opencv2/dnn/dict.hpp>
54 CV__DNN_INLINE_NS_BEGIN
58 typedef std::vector<int> MatShape;
61 * @brief Enum of computation backends supported by layers.
62 * @see Net::setPreferableBackend
66 //! DNN_BACKEND_DEFAULT equals to DNN_BACKEND_INFERENCE_ENGINE if
67 //! OpenCV is built with Intel's Inference Engine library or
68 //! DNN_BACKEND_OPENCV otherwise.
71 DNN_BACKEND_INFERENCE_ENGINE,
77 * @brief Enum of target devices for computations.
78 * @see Net::setPreferableTarget
84 DNN_TARGET_OPENCL_FP16,
87 //! FPGA device with CPU fallbacks using Inference Engine's Heterogeneous plugin.
91 CV_EXPORTS std::vector< std::pair<Backend, Target> > getAvailableBackends();
92 CV_EXPORTS std::vector<Target> getAvailableTargets(Backend be);
94 /** @brief This class provides all data needed to initialize layer.
96 * It includes dictionary with scalar params (which can be read by using Dict interface),
97 * blob params #blobs and optional meta information: #name and #type of layer instance.
99 class CV_EXPORTS LayerParams : public Dict
102 //TODO: Add ability to name blob params
103 std::vector<Mat> blobs; //!< List of learned parameters stored as blobs.
105 String name; //!< Name of the layer instance (optional, can be used internal purposes).
106 String type; //!< Type name which was used for creating layer by layer factory (optional).
110 * @brief Derivatives of this class encapsulates functions of certain backends.
115 BackendNode(int backendId);
117 virtual ~BackendNode(); //!< Virtual destructor to make polymorphism.
119 int backendId; //!< Backend identifier.
123 * @brief Derivatives of this class wraps cv::Mat for different backends and targets.
128 BackendWrapper(int backendId, int targetId);
131 * @brief Wrap cv::Mat for specific backend and target.
132 * @param[in] targetId Target identifier.
133 * @param[in] m cv::Mat for wrapping.
135 * Make CPU->GPU data transfer if it's require for the target.
137 BackendWrapper(int targetId, const cv::Mat& m);
140 * @brief Make wrapper for reused cv::Mat.
141 * @param[in] base Wrapper of cv::Mat that will be reused.
142 * @param[in] shape Specific shape.
144 * Initialize wrapper from another one. It'll wrap the same host CPU
145 * memory and mustn't allocate memory on device(i.e. GPU). It might
146 * has different shape. Use in case of CPU memory reusing for reuse
147 * associated memory on device too.
149 BackendWrapper(const Ptr<BackendWrapper>& base, const MatShape& shape);
151 virtual ~BackendWrapper(); //!< Virtual destructor to make polymorphism.
154 * @brief Transfer data to CPU host memory.
156 virtual void copyToHost() = 0;
159 * @brief Indicate that an actual data is on CPU.
161 virtual void setHostDirty() = 0;
163 int backendId; //!< Backend identifier.
164 int targetId; //!< Target identifier.
167 class CV_EXPORTS ActivationLayer;
169 /** @brief This interface class allows to build new Layers - are building blocks of networks.
171 * Each class, derived from Layer, must implement allocate() methods to declare own outputs and forward() to compute outputs.
172 * Also before using the new layer into networks you must register your layer by using one of @ref dnnLayerFactory "LayerFactory" macros.
174 class CV_EXPORTS_W Layer : public Algorithm
178 //! List of learned parameters must be stored here to allow read them by using Net::getParam().
179 CV_PROP_RW std::vector<Mat> blobs;
181 /** @brief Computes and sets internal parameters according to inputs, outputs and blobs.
182 * @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
183 * @param[in] input vector of already allocated input blobs
184 * @param[out] output vector of already allocated output blobs
186 * If this method is called after network has allocated all memory for input and output blobs
187 * and before inferencing.
189 CV_DEPRECATED_EXTERNAL
190 virtual void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output);
192 /** @brief Computes and sets internal parameters according to inputs, outputs and blobs.
193 * @param[in] inputs vector of already allocated input blobs
194 * @param[out] outputs vector of already allocated output blobs
196 * If this method is called after network has allocated all memory for input and output blobs
197 * and before inferencing.
199 CV_WRAP virtual void finalize(InputArrayOfArrays inputs, OutputArrayOfArrays outputs);
201 /** @brief Given the @p input blobs, computes the output @p blobs.
202 * @deprecated Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead
203 * @param[in] input the input blobs.
204 * @param[out] output allocated output blobs, which will store results of the computation.
205 * @param[out] internals allocated internal blobs
207 CV_DEPRECATED_EXTERNAL
208 virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals);
210 /** @brief Given the @p input blobs, computes the output @p blobs.
211 * @param[in] inputs the input blobs.
212 * @param[out] outputs allocated output blobs, which will store results of the computation.
213 * @param[out] internals allocated internal blobs
215 virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals);
217 /** @brief Given the @p input blobs, computes the output @p blobs.
218 * @param[in] inputs the input blobs.
219 * @param[out] outputs allocated output blobs, which will store results of the computation.
220 * @param[out] internals allocated internal blobs
222 void forward_fallback(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals);
226 * @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
228 CV_DEPRECATED_EXTERNAL
229 void finalize(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
233 * @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
235 CV_DEPRECATED std::vector<Mat> finalize(const std::vector<Mat> &inputs);
237 /** @brief Allocates layer and computes output.
238 * @deprecated This method will be removed in the future release.
240 CV_DEPRECATED CV_WRAP void run(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs,
241 CV_IN_OUT std::vector<Mat> &internals);
243 /** @brief Returns index of input blob into the input array.
244 * @param inputName label of input blob
246 * Each layer input and output can be labeled to easily identify them using "%<layer_name%>[.output_name]" notation.
247 * This method maps label of input blob to its index into input vector.
249 virtual int inputNameToIndex(String inputName);
250 /** @brief Returns index of output blob in output array.
251 * @see inputNameToIndex()
253 CV_WRAP virtual int outputNameToIndex(const String& outputName);
256 * @brief Ask layer if it support specific backend for doing computations.
257 * @param[in] backendId computation backend identifier.
260 virtual bool supportBackend(int backendId);
263 * @brief Returns Halide backend node.
264 * @param[in] inputs Input Halide buffers.
265 * @see BackendNode, BackendWrapper
267 * Input buffers should be exactly the same that will be used in forward invocations.
268 * Despite we can use Halide::ImageParam based on input shape only,
269 * it helps prevent some memory management issues (if something wrong,
270 * Halide tests will be failed).
272 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs);
274 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs);
276 virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs);
278 * @brief Automatic Halide scheduling based on layer hyper-parameters.
279 * @param[in] node Backend node with Halide functions.
280 * @param[in] inputs Blobs that will be used in forward invocations.
281 * @param[in] outputs Blobs that will be used in forward invocations.
282 * @param[in] targetId Target identifier
283 * @see BackendNode, Target
285 * Layer don't use own Halide::Func members because we can have applied
286 * layers fusing. In this way the fused function should be scheduled.
288 virtual void applyHalideScheduler(Ptr<BackendNode>& node,
289 const std::vector<Mat*> &inputs,
290 const std::vector<Mat> &outputs,
294 * @brief Implement layers fusing.
295 * @param[in] node Backend node of bottom layer.
298 * Actual for graph-based backends. If layer attached successfully,
299 * returns non-empty cv::Ptr to node of the same backend.
300 * Fuse only over the last function.
302 virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node);
305 * @brief Tries to attach to the layer the subsequent activation layer, i.e. do the layer fusion in a partial case.
306 * @param[in] layer The subsequent activation layer.
308 * Returns true if the activation layer has been attached successfully.
310 virtual bool setActivation(const Ptr<ActivationLayer>& layer);
313 * @brief Try to fuse current layer with a next one
314 * @param[in] top Next layer to be fused.
315 * @returns True if fusion was performed.
317 virtual bool tryFuse(Ptr<Layer>& top);
320 * @brief Returns parameters of layers with channel-wise multiplication and addition.
321 * @param[out] scale Channel-wise multipliers. Total number of values should
322 * be equal to number of channels.
323 * @param[out] shift Channel-wise offsets. Total number of values should
324 * be equal to number of channels.
326 * Some layers can fuse their transformations with further layers.
327 * In example, convolution + batch normalization. This way base layer
328 * use weights from layer after it. Fused layer is skipped.
329 * By default, @p scale and @p shift are empty that means layer has no
330 * element-wise multiplications or additions.
332 virtual void getScaleShift(Mat& scale, Mat& shift) const;
335 * @brief "Deattaches" all the layers, attached to particular layer.
337 virtual void unsetAttached();
339 virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
340 const int requiredOutputs,
341 std::vector<MatShape> &outputs,
342 std::vector<MatShape> &internals) const;
343 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
344 const std::vector<MatShape> &outputs) const {CV_UNUSED(inputs); CV_UNUSED(outputs); return 0;}
346 CV_PROP String name; //!< Name of the layer instance, can be used for logging or other internal purposes.
347 CV_PROP String type; //!< Type name which was used for creating layer by layer factory.
348 CV_PROP int preferableTarget; //!< prefer target for layer forwarding
351 explicit Layer(const LayerParams ¶ms); //!< Initializes only #name, #type and #blobs fields.
352 void setParamsFrom(const LayerParams ¶ms); //!< Initializes only #name, #type and #blobs fields.
356 /** @brief This class allows to create and manipulate comprehensive artificial neural networks.
358 * Neural network is presented as directed acyclic graph (DAG), where vertices are Layer instances,
359 * and edges specify relationships between layers inputs and outputs.
361 * Each network layer has unique integer id and unique string name inside its network.
362 * LayerId can store either layer name or layer id.
364 * This class supports reference counting of its instances, i. e. copies point to the same instance.
366 class CV_EXPORTS_W_SIMPLE Net
370 CV_WRAP Net(); //!< Default constructor.
371 CV_WRAP ~Net(); //!< Destructor frees the net only if there aren't references to the net anymore.
373 /** @brief Create a network from Intel's Model Optimizer intermediate representation.
374 * @param[in] xml XML configuration file with network's topology.
375 * @param[in] bin Binary file with trained weights.
376 * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
379 CV_WRAP static Net readFromModelOptimizer(const String& xml, const String& bin);
381 /** Returns true if there are no layers in the network. */
382 CV_WRAP bool empty() const;
384 /** @brief Adds new layer to the net.
385 * @param name unique name of the adding layer.
386 * @param type typename of the adding layer (type must be registered in LayerRegister).
387 * @param params parameters which will be used to initialize the creating layer.
388 * @returns unique identifier of created layer, or -1 if a failure will happen.
390 int addLayer(const String &name, const String &type, LayerParams ¶ms);
391 /** @brief Adds new layer and connects its first input to the first output of previously added layer.
394 int addLayerToPrev(const String &name, const String &type, LayerParams ¶ms);
396 /** @brief Converts string name of the layer to the integer identifier.
397 * @returns id of the layer, or -1 if the layer wasn't found.
399 CV_WRAP int getLayerId(const String &layer);
401 CV_WRAP std::vector<String> getLayerNames() const;
403 /** @brief Container for strings and integers. */
404 typedef DictValue LayerId;
406 /** @brief Returns pointer to layer with specified id or name which the network use. */
407 CV_WRAP Ptr<Layer> getLayer(LayerId layerId);
409 /** @brief Returns pointers to input layers of specific layer. */
410 std::vector<Ptr<Layer> > getLayerInputs(LayerId layerId); // FIXIT: CV_WRAP
412 /** @brief Connects output of the first layer to input of the second layer.
413 * @param outPin descriptor of the first layer output.
414 * @param inpPin descriptor of the second layer input.
416 * Descriptors have the following template <DFN><layer_name>[.input_number]</DFN>:
417 * - the first part of the template <DFN>layer_name</DFN> is sting name of the added layer.
418 * If this part is empty then the network input pseudo layer will be used;
419 * - the second optional part of the template <DFN>input_number</DFN>
420 * is either number of the layer input, either label one.
421 * If this part is omitted then the first layer input will be used.
423 * @see setNetInputs(), Layer::inputNameToIndex(), Layer::outputNameToIndex()
425 CV_WRAP void connect(String outPin, String inpPin);
427 /** @brief Connects #@p outNum output of the first layer to #@p inNum input of the second layer.
428 * @param outLayerId identifier of the first layer
429 * @param outNum number of the first layer output
430 * @param inpLayerId identifier of the second layer
431 * @param inpNum number of the second layer input
433 void connect(int outLayerId, int outNum, int inpLayerId, int inpNum);
435 /** @brief Sets outputs names of the network input pseudo layer.
437 * Each net always has special own the network input pseudo layer with id=0.
438 * This layer stores the user blobs only and don't make any computations.
439 * In fact, this layer provides the only way to pass user data into the network.
440 * As any other layer, this layer can label its outputs and this function provides an easy way to do this.
442 CV_WRAP void setInputsNames(const std::vector<String> &inputBlobNames);
444 /** @brief Runs forward pass to compute output of layer with name @p outputName.
445 * @param outputName name for layer which output is needed to get
446 * @return blob for first output of specified layer.
447 * @details By default runs forward pass for the whole network.
449 CV_WRAP Mat forward(const String& outputName = String());
451 /** @brief Runs forward pass to compute output of layer with name @p outputName.
452 * @param outputBlobs contains all output blobs for specified layer.
453 * @param outputName name for layer which output is needed to get
454 * @details If @p outputName is empty, runs forward pass for the whole network.
456 CV_WRAP void forward(OutputArrayOfArrays outputBlobs, const String& outputName = String());
458 /** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames.
459 * @param outputBlobs contains blobs for first outputs of specified layers.
460 * @param outBlobNames names for layers which outputs are needed to get
462 CV_WRAP void forward(OutputArrayOfArrays outputBlobs,
463 const std::vector<String>& outBlobNames);
465 /** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames.
466 * @param outputBlobs contains all output blobs for each layer specified in @p outBlobNames.
467 * @param outBlobNames names for layers which outputs are needed to get
469 CV_WRAP_AS(forwardAndRetrieve) void forward(CV_OUT std::vector<std::vector<Mat> >& outputBlobs,
470 const std::vector<String>& outBlobNames);
473 * @brief Compile Halide layers.
474 * @param[in] scheduler Path to YAML file with scheduling directives.
475 * @see setPreferableBackend
477 * Schedule layers that support Halide backend. Then compile them for
478 * specific target. For layers that not represented in scheduling file
479 * or if no manual scheduling used at all, automatic scheduling will be applied.
481 CV_WRAP void setHalideScheduler(const String& scheduler);
484 * @brief Ask network to use specific computation backend where it supported.
485 * @param[in] backendId backend identifier.
488 * If OpenCV is compiled with Intel's Inference Engine library, DNN_BACKEND_DEFAULT
489 * means DNN_BACKEND_INFERENCE_ENGINE. Otherwise it equals to DNN_BACKEND_OPENCV.
491 CV_WRAP void setPreferableBackend(int backendId);
494 * @brief Ask network to make computations on specific target device.
495 * @param[in] targetId target identifier.
498 * List of supported combinations backend / target:
499 * | | DNN_BACKEND_OPENCV | DNN_BACKEND_INFERENCE_ENGINE | DNN_BACKEND_HALIDE |
500 * |------------------------|--------------------|------------------------------|--------------------|
501 * | DNN_TARGET_CPU | + | + | + |
502 * | DNN_TARGET_OPENCL | + | + | + |
503 * | DNN_TARGET_OPENCL_FP16 | + | + | |
504 * | DNN_TARGET_MYRIAD | | + | |
505 * | DNN_TARGET_FPGA | | + | |
507 CV_WRAP void setPreferableTarget(int targetId);
509 /** @brief Sets the new input value for the network
510 * @param blob A new blob. Should have CV_32F or CV_8U depth.
511 * @param name A name of input layer.
512 * @param scalefactor An optional normalization scale.
513 * @param mean An optional mean subtraction values.
514 * @see connect(String, String) to know format of the descriptor.
516 * If scale or mean values are specified, a final input blob is computed
518 * \f[input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)\f]
520 CV_WRAP void setInput(InputArray blob, const String& name = "",
521 double scalefactor = 1.0, const Scalar& mean = Scalar());
523 /** @brief Sets the new value for the learned param of the layer.
524 * @param layer name or id of the layer.
525 * @param numParam index of the layer parameter in the Layer::blobs array.
526 * @param blob the new value.
528 * @note If shape of the new blob differs from the previous shape,
529 * then the following forward pass may fail.
531 CV_WRAP void setParam(LayerId layer, int numParam, const Mat &blob);
533 /** @brief Returns parameter blob of the layer.
534 * @param layer name or id of the layer.
535 * @param numParam index of the layer parameter in the Layer::blobs array.
538 CV_WRAP Mat getParam(LayerId layer, int numParam = 0);
540 /** @brief Returns indexes of layers with unconnected outputs.
542 CV_WRAP std::vector<int> getUnconnectedOutLayers() const;
544 /** @brief Returns names of layers with unconnected outputs.
546 CV_WRAP std::vector<String> getUnconnectedOutLayersNames() const;
548 /** @brief Returns input and output shapes for all layers in loaded model;
549 * preliminary inferencing isn't necessary.
550 * @param netInputShapes shapes for all input blobs in net input layer.
551 * @param layersIds output parameter for layer IDs.
552 * @param inLayersShapes output parameter for input layers shapes;
553 * order is the same as in layersIds
554 * @param outLayersShapes output parameter for output layers shapes;
555 * order is the same as in layersIds
557 CV_WRAP void getLayersShapes(const std::vector<MatShape>& netInputShapes,
558 CV_OUT std::vector<int>& layersIds,
559 CV_OUT std::vector<std::vector<MatShape> >& inLayersShapes,
560 CV_OUT std::vector<std::vector<MatShape> >& outLayersShapes) const;
563 CV_WRAP void getLayersShapes(const MatShape& netInputShape,
564 CV_OUT std::vector<int>& layersIds,
565 CV_OUT std::vector<std::vector<MatShape> >& inLayersShapes,
566 CV_OUT std::vector<std::vector<MatShape> >& outLayersShapes) const;
568 /** @brief Returns input and output shapes for layer with specified
569 * id in loaded model; preliminary inferencing isn't necessary.
570 * @param netInputShape shape input blob in net input layer.
571 * @param layerId id for layer.
572 * @param inLayerShapes output parameter for input layers shapes;
573 * order is the same as in layersIds
574 * @param outLayerShapes output parameter for output layers shapes;
575 * order is the same as in layersIds
577 void getLayerShapes(const MatShape& netInputShape,
579 CV_OUT std::vector<MatShape>& inLayerShapes,
580 CV_OUT std::vector<MatShape>& outLayerShapes) const; // FIXIT: CV_WRAP
583 void getLayerShapes(const std::vector<MatShape>& netInputShapes,
585 CV_OUT std::vector<MatShape>& inLayerShapes,
586 CV_OUT std::vector<MatShape>& outLayerShapes) const; // FIXIT: CV_WRAP
588 /** @brief Computes FLOP for whole loaded model with specified input shapes.
589 * @param netInputShapes vector of shapes for all net inputs.
590 * @returns computed FLOP.
592 CV_WRAP int64 getFLOPS(const std::vector<MatShape>& netInputShapes) const;
594 CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const;
596 CV_WRAP int64 getFLOPS(const int layerId,
597 const std::vector<MatShape>& netInputShapes) const;
599 CV_WRAP int64 getFLOPS(const int layerId,
600 const MatShape& netInputShape) const;
602 /** @brief Returns list of types for layer used in model.
603 * @param layersTypes output parameter for returning types.
605 CV_WRAP void getLayerTypes(CV_OUT std::vector<String>& layersTypes) const;
607 /** @brief Returns count of layers of specified type.
608 * @param layerType type.
609 * @returns count of layers
611 CV_WRAP int getLayersCount(const String& layerType) const;
613 /** @brief Computes bytes number which are required to store
614 * all weights and intermediate blobs for model.
615 * @param netInputShapes vector of shapes for all net inputs.
616 * @param weights output parameter to store resulting bytes for weights.
617 * @param blobs output parameter to store resulting bytes for intermediate blobs.
619 void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
620 CV_OUT size_t& weights, CV_OUT size_t& blobs) const; // FIXIT: CV_WRAP
622 CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
623 CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
625 CV_WRAP void getMemoryConsumption(const int layerId,
626 const std::vector<MatShape>& netInputShapes,
627 CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
629 CV_WRAP void getMemoryConsumption(const int layerId,
630 const MatShape& netInputShape,
631 CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
633 /** @brief Computes bytes number which are required to store
634 * all weights and intermediate blobs for each layer.
635 * @param netInputShapes vector of shapes for all net inputs.
636 * @param layerIds output vector to save layer IDs.
637 * @param weights output parameter to store resulting bytes for weights.
638 * @param blobs output parameter to store resulting bytes for intermediate blobs.
640 void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
641 CV_OUT std::vector<int>& layerIds,
642 CV_OUT std::vector<size_t>& weights,
643 CV_OUT std::vector<size_t>& blobs) const; // FIXIT: CV_WRAP
645 void getMemoryConsumption(const MatShape& netInputShape,
646 CV_OUT std::vector<int>& layerIds,
647 CV_OUT std::vector<size_t>& weights,
648 CV_OUT std::vector<size_t>& blobs) const; // FIXIT: CV_WRAP
650 /** @brief Enables or disables layer fusion in the network.
651 * @param fusion true to enable the fusion, false to disable. The fusion is enabled by default.
653 CV_WRAP void enableFusion(bool fusion);
655 /** @brief Returns overall time for inference and timings (in ticks) for layers.
656 * Indexes in returned vector correspond to layers ids. Some layers can be fused with others,
657 * in this case zero ticks count will be return for that skipped layers.
658 * @param timings vector for tick timings for all layers.
659 * @return overall ticks for model inference.
661 CV_WRAP int64 getPerfProfile(CV_OUT std::vector<double>& timings);
668 /** @brief Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
669 * @param cfgFile path to the .cfg file with text description of the network architecture.
670 * @param darknetModel path to the .weights file with learned network.
671 * @returns Network object that ready to do forward, throw an exception in failure cases.
672 * @returns Net object.
674 CV_EXPORTS_W Net readNetFromDarknet(const String &cfgFile, const String &darknetModel = String());
676 /** @brief Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
677 * @param bufferCfg A buffer contains a content of .cfg file with text description of the network architecture.
678 * @param bufferModel A buffer contains a content of .weights file with learned network.
679 * @returns Net object.
681 CV_EXPORTS_W Net readNetFromDarknet(const std::vector<uchar>& bufferCfg,
682 const std::vector<uchar>& bufferModel = std::vector<uchar>());
684 /** @brief Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
685 * @param bufferCfg A buffer contains a content of .cfg file with text description of the network architecture.
686 * @param lenCfg Number of bytes to read from bufferCfg
687 * @param bufferModel A buffer contains a content of .weights file with learned network.
688 * @param lenModel Number of bytes to read from bufferModel
689 * @returns Net object.
691 CV_EXPORTS Net readNetFromDarknet(const char *bufferCfg, size_t lenCfg,
692 const char *bufferModel = NULL, size_t lenModel = 0);
694 /** @brief Reads a network model stored in <a href="http://caffe.berkeleyvision.org">Caffe</a> framework's format.
695 * @param prototxt path to the .prototxt file with text description of the network architecture.
696 * @param caffeModel path to the .caffemodel file with learned network.
697 * @returns Net object.
699 CV_EXPORTS_W Net readNetFromCaffe(const String &prototxt, const String &caffeModel = String());
701 /** @brief Reads a network model stored in Caffe model in memory.
702 * @param bufferProto buffer containing the content of the .prototxt file
703 * @param bufferModel buffer containing the content of the .caffemodel file
704 * @returns Net object.
706 CV_EXPORTS_W Net readNetFromCaffe(const std::vector<uchar>& bufferProto,
707 const std::vector<uchar>& bufferModel = std::vector<uchar>());
709 /** @brief Reads a network model stored in Caffe model in memory.
710 * @details This is an overloaded member function, provided for convenience.
711 * It differs from the above function only in what argument(s) it accepts.
712 * @param bufferProto buffer containing the content of the .prototxt file
713 * @param lenProto length of bufferProto
714 * @param bufferModel buffer containing the content of the .caffemodel file
715 * @param lenModel length of bufferModel
716 * @returns Net object.
718 CV_EXPORTS Net readNetFromCaffe(const char *bufferProto, size_t lenProto,
719 const char *bufferModel = NULL, size_t lenModel = 0);
721 /** @brief Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
722 * @param model path to the .pb file with binary protobuf description of the network architecture
723 * @param config path to the .pbtxt file that contains text graph definition in protobuf format.
724 * Resulting Net object is built by text graph using weights from a binary one that
725 * let us make it more flexible.
726 * @returns Net object.
728 CV_EXPORTS_W Net readNetFromTensorflow(const String &model, const String &config = String());
730 /** @brief Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
731 * @param bufferModel buffer containing the content of the pb file
732 * @param bufferConfig buffer containing the content of the pbtxt file
733 * @returns Net object.
735 CV_EXPORTS_W Net readNetFromTensorflow(const std::vector<uchar>& bufferModel,
736 const std::vector<uchar>& bufferConfig = std::vector<uchar>());
738 /** @brief Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
739 * @details This is an overloaded member function, provided for convenience.
740 * It differs from the above function only in what argument(s) it accepts.
741 * @param bufferModel buffer containing the content of the pb file
742 * @param lenModel length of bufferModel
743 * @param bufferConfig buffer containing the content of the pbtxt file
744 * @param lenConfig length of bufferConfig
746 CV_EXPORTS Net readNetFromTensorflow(const char *bufferModel, size_t lenModel,
747 const char *bufferConfig = NULL, size_t lenConfig = 0);
750 * @brief Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format.
751 * @param model path to the file, dumped from Torch by using torch.save() function.
752 * @param isBinary specifies whether the network was serialized in ascii mode or binary.
753 * @param evaluate specifies testing phase of network. If true, it's similar to evaluate() method in Torch.
754 * @returns Net object.
756 * @note Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type of C language,
757 * which has various bit-length on different systems.
759 * The loading file must contain serialized <a href="https://github.com/torch/nn/blob/master/doc/module.md">nn.Module</a> object
760 * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors.
762 * List of supported layers (i.e. object instances derived from Torch nn.Module class):
767 * - nn.SpatialConvolution
768 * - nn.SpatialMaxPooling, nn.SpatialAveragePooling
769 * - nn.ReLU, nn.TanH, nn.Sigmoid
771 * - nn.SoftMax, nn.LogSoftMax
773 * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
775 CV_EXPORTS_W Net readNetFromTorch(const String &model, bool isBinary = true, bool evaluate = true);
778 * @brief Read deep learning network represented in one of the supported formats.
779 * @param[in] model Binary file contains trained weights. The following file
780 * extensions are expected for models from different frameworks:
781 * * `*.caffemodel` (Caffe, http://caffe.berkeleyvision.org/)
782 * * `*.pb` (TensorFlow, https://www.tensorflow.org/)
783 * * `*.t7` | `*.net` (Torch, http://torch.ch/)
784 * * `*.weights` (Darknet, https://pjreddie.com/darknet/)
785 * * `*.bin` (DLDT, https://software.intel.com/openvino-toolkit)
786 * @param[in] config Text file contains network configuration. It could be a
787 * file with the following extensions:
788 * * `*.prototxt` (Caffe, http://caffe.berkeleyvision.org/)
789 * * `*.pbtxt` (TensorFlow, https://www.tensorflow.org/)
790 * * `*.cfg` (Darknet, https://pjreddie.com/darknet/)
791 * * `*.xml` (DLDT, https://software.intel.com/openvino-toolkit)
792 * @param[in] framework Explicit framework name tag to determine a format.
793 * @returns Net object.
795 * This function automatically detects an origin framework of trained model
796 * and calls an appropriate function such @ref readNetFromCaffe, @ref readNetFromTensorflow,
797 * @ref readNetFromTorch or @ref readNetFromDarknet. An order of @p model and @p config
798 * arguments does not matter.
800 CV_EXPORTS_W Net readNet(const String& model, const String& config = "", const String& framework = "");
803 * @brief Read deep learning network represented in one of the supported formats.
804 * @details This is an overloaded member function, provided for convenience.
805 * It differs from the above function only in what argument(s) it accepts.
806 * @param[in] framework Name of origin framework.
807 * @param[in] bufferModel A buffer with a content of binary file with weights
808 * @param[in] bufferConfig A buffer with a content of text file contains network configuration.
809 * @returns Net object.
811 CV_EXPORTS_W Net readNet(const String& framework, const std::vector<uchar>& bufferModel,
812 const std::vector<uchar>& bufferConfig = std::vector<uchar>());
814 /** @brief Loads blob which was serialized as torch.Tensor object of Torch7 framework.
815 * @warning This function has the same limitations as readNetFromTorch().
817 CV_EXPORTS_W Mat readTorchBlob(const String &filename, bool isBinary = true);
819 /** @brief Load a network from Intel's Model Optimizer intermediate representation.
820 * @param[in] xml XML configuration file with network's topology.
821 * @param[in] bin Binary file with trained weights.
822 * @returns Net object.
823 * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
826 CV_EXPORTS_W Net readNetFromModelOptimizer(const String &xml, const String &bin);
828 /** @brief Reads a network model <a href="https://onnx.ai/">ONNX</a>.
829 * @param onnxFile path to the .onnx file with text description of the network architecture.
830 * @returns Network object that ready to do forward, throw an exception in failure cases.
832 CV_EXPORTS_W Net readNetFromONNX(const String &onnxFile);
834 /** @brief Creates blob from .pb file.
835 * @param path to the .pb file with input tensor.
838 CV_EXPORTS_W Mat readTensorFromONNX(const String& path);
840 /** @brief Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
841 * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
842 * @param image input image (with 1-, 3- or 4-channels).
843 * @param size spatial size for output image
844 * @param mean scalar with mean values which are subtracted from channels. Values are intended
845 * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
846 * @param scalefactor multiplier for @p image values.
847 * @param swapRB flag which indicates that swap first and last channels
848 * in 3-channel image is necessary.
849 * @param crop flag which indicates whether image will be cropped after resize or not
850 * @param ddepth Depth of output blob. Choose CV_32F or CV_8U.
851 * @details if @p crop is true, input image is resized so one side after resize is equal to corresponding
852 * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
853 * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
854 * @returns 4-dimensional Mat with NCHW dimensions order.
856 CV_EXPORTS_W Mat blobFromImage(InputArray image, double scalefactor=1.0, const Size& size = Size(),
857 const Scalar& mean = Scalar(), bool swapRB=false, bool crop=false,
860 /** @brief Creates 4-dimensional blob from image.
861 * @details This is an overloaded member function, provided for convenience.
862 * It differs from the above function only in what argument(s) it accepts.
864 CV_EXPORTS void blobFromImage(InputArray image, OutputArray blob, double scalefactor=1.0,
865 const Size& size = Size(), const Scalar& mean = Scalar(),
866 bool swapRB=false, bool crop=false, int ddepth=CV_32F);
869 /** @brief Creates 4-dimensional blob from series of images. Optionally resizes and
870 * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
871 * swap Blue and Red channels.
872 * @param images input images (all with 1-, 3- or 4-channels).
873 * @param size spatial size for output image
874 * @param mean scalar with mean values which are subtracted from channels. Values are intended
875 * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
876 * @param scalefactor multiplier for @p images values.
877 * @param swapRB flag which indicates that swap first and last channels
878 * in 3-channel image is necessary.
879 * @param crop flag which indicates whether image will be cropped after resize or not
880 * @param ddepth Depth of output blob. Choose CV_32F or CV_8U.
881 * @details if @p crop is true, input image is resized so one side after resize is equal to corresponding
882 * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
883 * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
884 * @returns 4-dimensional Mat with NCHW dimensions order.
886 CV_EXPORTS_W Mat blobFromImages(InputArrayOfArrays images, double scalefactor=1.0,
887 Size size = Size(), const Scalar& mean = Scalar(), bool swapRB=false, bool crop=false,
890 /** @brief Creates 4-dimensional blob from series of images.
891 * @details This is an overloaded member function, provided for convenience.
892 * It differs from the above function only in what argument(s) it accepts.
894 CV_EXPORTS void blobFromImages(InputArrayOfArrays images, OutputArray blob,
895 double scalefactor=1.0, Size size = Size(),
896 const Scalar& mean = Scalar(), bool swapRB=false, bool crop=false,
899 /** @brief Parse a 4D blob and output the images it contains as 2D arrays through a simpler data structure
900 * (std::vector<cv::Mat>).
901 * @param[in] blob_ 4 dimensional array (images, channels, height, width) in floating point precision (CV_32F) from
902 * which you would like to extract the images.
903 * @param[out] images_ array of 2D Mat containing the images extracted from the blob in floating point precision
904 * (CV_32F). They are non normalized neither mean added. The number of returned images equals the first dimension
905 * of the blob (batch size). Every image has a number of channels equals to the second dimension of the blob (depth).
907 CV_EXPORTS_W void imagesFromBlob(const cv::Mat& blob_, OutputArrayOfArrays images_);
909 /** @brief Convert all weights of Caffe network to half precision floating point.
910 * @param src Path to origin model from Caffe framework contains single
911 * precision floating point weights (usually has `.caffemodel` extension).
912 * @param dst Path to destination model with updated weights.
913 * @param layersTypes Set of layers types which parameters will be converted.
914 * By default, converts only Convolutional and Fully-Connected layers'
917 * @note Shrinked model has no origin float32 weights so it can't be used
918 * in origin Caffe framework anymore. However the structure of data
919 * is taken from NVidia's Caffe fork: https://github.com/NVIDIA/caffe.
920 * So the resulting model may be used there.
922 CV_EXPORTS_W void shrinkCaffeModel(const String& src, const String& dst,
923 const std::vector<String>& layersTypes = std::vector<String>());
925 /** @brief Create a text representation for a binary network stored in protocol buffer format.
926 * @param[in] model A path to binary network.
927 * @param[in] output A path to output text file to be created.
929 * @note To reduce output file size, trained weights are not included.
931 CV_EXPORTS_W void writeTextGraph(const String& model, const String& output);
933 /** @brief Performs non maximum suppression given boxes and corresponding scores.
935 * @param bboxes a set of bounding boxes to apply NMS.
936 * @param scores a set of corresponding confidences.
937 * @param score_threshold a threshold used to filter boxes by score.
938 * @param nms_threshold a threshold used in non maximum suppression.
939 * @param indices the kept indices of bboxes after NMS.
940 * @param eta a coefficient in adaptive threshold formula: \f$nms\_threshold_{i+1}=eta\cdot nms\_threshold_i\f$.
941 * @param top_k if `>0`, keep at most @p top_k picked indices.
943 CV_EXPORTS_W void NMSBoxes(const std::vector<Rect>& bboxes, const std::vector<float>& scores,
944 const float score_threshold, const float nms_threshold,
945 CV_OUT std::vector<int>& indices,
946 const float eta = 1.f, const int top_k = 0);
948 CV_EXPORTS_W void NMSBoxes(const std::vector<Rect2d>& bboxes, const std::vector<float>& scores,
949 const float score_threshold, const float nms_threshold,
950 CV_OUT std::vector<int>& indices,
951 const float eta = 1.f, const int top_k = 0);
953 CV_EXPORTS_AS(NMSBoxesRotated) void NMSBoxes(const std::vector<RotatedRect>& bboxes, const std::vector<float>& scores,
954 const float score_threshold, const float nms_threshold,
955 CV_OUT std::vector<int>& indices,
956 const float eta = 1.f, const int top_k = 0);
959 CV__DNN_INLINE_NS_END
963 #include <opencv2/dnn/layer.hpp>
964 #include <opencv2/dnn/dnn.inl.hpp>
966 /// @deprecated Include this header directly from application. Automatic inclusion will be removed
967 #include <opencv2/dnn/utils/inference_engine.hpp>
969 #endif /* OPENCV_DNN_DNN_HPP */