Merge pull request #19693 from LupusSanctus:onnx_diagnostic
authorAnastasia M <anastasia.murzova@xperience.ai>
Mon, 29 Mar 2021 16:38:28 +0000 (19:38 +0300)
committerGitHub <noreply@github.com>
Mon, 29 Mar 2021 16:38:28 +0000 (16:38 +0000)
ONNX diagnostic tool

* Final

* Add forgotten Normalize layer to the set of supported types

* ONNX diagnostic tool corrections

* Fixed CI test warnings

* Added code minor corrections

Co-authored-by: Sergey Slashchinin <sergei.slashchinin@xperience.ai>
apps/CMakeLists.txt
apps/model-diagnostics/CMakeLists.txt [new file with mode: 0644]
apps/model-diagnostics/model_diagnostics.cpp [new file with mode: 0644]
modules/dnn/include/opencv2/dnn/dnn.hpp
modules/dnn/include/opencv2/dnn/layer_reg.private.hpp [new file with mode: 0644]
modules/dnn/src/dnn.cpp
modules/dnn/src/layers/slice_layer.cpp
modules/dnn/src/onnx/onnx_graph_simplifier.cpp
modules/dnn/src/onnx/onnx_importer.cpp

index 1504fa6..e9a7be7 100644 (file)
@@ -59,3 +59,4 @@ ocv_add_app(annotation)
 ocv_add_app(visualisation)
 ocv_add_app(interactive-calibration)
 ocv_add_app(version)
+ocv_add_app(model-diagnostics)
diff --git a/apps/model-diagnostics/CMakeLists.txt b/apps/model-diagnostics/CMakeLists.txt
new file mode 100644 (file)
index 0000000..b48f826
--- /dev/null
@@ -0,0 +1,3 @@
+ocv_add_application(opencv_model_diagnostics
+    MODULES opencv_core opencv_dnn
+    SRCS model_diagnostics.cpp)
diff --git a/apps/model-diagnostics/model_diagnostics.cpp b/apps/model-diagnostics/model_diagnostics.cpp
new file mode 100644 (file)
index 0000000..ed20ec0
--- /dev/null
@@ -0,0 +1,62 @@
+/*************************************************
+USAGE:
+./model_diagnostics -m <onnx file location>
+**************************************************/
+#include <opencv2/dnn.hpp>
+#include <opencv2/core/utils/filesystem.hpp>
+
+#include <iostream>
+
+
+using namespace cv;
+using namespace dnn;
+
+
+static void diagnosticsErrorCallback(const Exception& exc)
+{
+    CV_UNUSED(exc);
+    fflush(stdout);
+    fflush(stderr);
+}
+
+static std::string checkFileExists(const std::string& fileName)
+{
+    if (fileName.empty() || utils::fs::exists(fileName))
+        return fileName;
+
+    CV_Error(Error::StsObjectNotFound, "File " + fileName + " was not found! "
+         "Please, specify a full path to the file.");
+}
+
+std::string diagnosticKeys =
+        "{ model m     | | Path to the model .onnx file. }"
+        "{ config c    | | Path to the model configuration file. }"
+        "{ framework f | | [Optional] Name of the model framework. }";
+
+
+
+int main( int argc, const char** argv )
+{
+    CommandLineParser argParser(argc, argv, diagnosticKeys);
+    argParser.about("Use this tool to run the diagnostics of provided ONNX model"
+                    "to obtain the information about its support (supported layers).");
+
+    if (argc == 1)
+    {
+        argParser.printMessage();
+        return 0;
+    }
+
+    std::string model = checkFileExists(argParser.get<std::string>("model"));
+    std::string config = checkFileExists(argParser.get<std::string>("config"));
+    std::string frameworkId = argParser.get<std::string>("framework");
+
+    CV_Assert(!model.empty());
+
+    enableModelDiagnostics(true);
+    redirectError((ErrorCallback)diagnosticsErrorCallback, NULL);
+
+    Net ocvNet = readNet(model, config, frameworkId);
+
+    return 0;
+}
index 7722494..0743de0 100644 (file)
@@ -100,6 +100,18 @@ CV__DNN_INLINE_NS_BEGIN
     CV_EXPORTS std::vector< std::pair<Backend, Target> > getAvailableBackends();
     CV_EXPORTS_W std::vector<Target> getAvailableTargets(dnn::Backend be);
 
+    /**
+     * @brief Enables detailed logging of the DNN model loading with CV DNN API.
+     * @param[in] isDiagnosticsMode Indicates whether diagnostic mode should be set.
+     *
+     * Diagnostic mode provides detailed logging of the model loading stage to explore
+     * potential problems (ex.: not implemented layer type).
+     *
+     * @note In diagnostic mode series of assertions will be skipped, it can lead to the
+     * expected application crash.
+     */
+    CV_EXPORTS void enableModelDiagnostics(bool isDiagnosticsMode);
+
     /** @brief This class provides all data needed to initialize layer.
      *
      * It includes dictionary with scalar params (which can be read by using Dict interface),
diff --git a/modules/dnn/include/opencv2/dnn/layer_reg.private.hpp b/modules/dnn/include/opencv2/dnn/layer_reg.private.hpp
new file mode 100644 (file)
index 0000000..46a58f0
--- /dev/null
@@ -0,0 +1,23 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef OPENCV_DNN_LAYER_REG_HPP
+#define OPENCV_DNN_LAYER_REG_HPP
+#include <opencv2/dnn.hpp>
+
+namespace cv {
+namespace dnn {
+CV__DNN_INLINE_NS_BEGIN
+//! @addtogroup dnn
+//! @{
+
+//! Register layer types of DNN model.
+typedef std::map<std::string, std::vector<LayerFactory::Constructor> > LayerFactory_Impl;
+LayerFactory_Impl& getLayerFactoryImpl();
+
+//! @}
+CV__DNN_INLINE_NS_END
+}
+}
+#endif
index 06ed6d3..668cce8 100644 (file)
@@ -63,6 +63,7 @@
 #include <memory>
 #include <opencv2/dnn/shape_utils.hpp>
 #include <opencv2/imgproc.hpp>
+#include <opencv2/dnn/layer_reg.private.hpp>
 
 #include <opencv2/core/utils/configuration.private.hpp>
 #include <opencv2/core/utils/logger.hpp>
@@ -93,6 +94,13 @@ static bool DNN_CHECK_NAN_INF = utils::getConfigurationParameterBool("OPENCV_DNN
 static bool DNN_CHECK_NAN_INF_DUMP = utils::getConfigurationParameterBool("OPENCV_DNN_CHECK_NAN_INF_DUMP", false);
 static bool DNN_CHECK_NAN_INF_RAISE_ERROR = utils::getConfigurationParameterBool("OPENCV_DNN_CHECK_NAN_INF_RAISE_ERROR", false);
 
+bool DNN_DIAGNOSTICS_RUN = false;
+
+void enableModelDiagnostics(bool isDiagnosticsMode)
+{
+    DNN_DIAGNOSTICS_RUN = isDiagnosticsMode;
+}
+
 using std::vector;
 using std::map;
 using std::make_pair;
@@ -5310,15 +5318,13 @@ static Mutex& getLayerFactoryMutex()
     return *instance;
 }
 
-typedef std::map<String, std::vector<LayerFactory::Constructor> > LayerFactory_Impl;
-
 static LayerFactory_Impl& getLayerFactoryImpl_()
 {
     static LayerFactory_Impl impl;
     return impl;
 }
 
-static LayerFactory_Impl& getLayerFactoryImpl()
+LayerFactory_Impl& getLayerFactoryImpl()
 {
     static LayerFactory_Impl* volatile instance = NULL;
     if (instance == NULL)
index ff997c3..54e2340 100644 (file)
@@ -80,7 +80,7 @@ public:
             CV_Assert(!params.has("begin") && !params.has("size") && !params.has("end"));
             const DictValue &indicesValue = params.get("slice_point");
             sliceRanges.resize(indicesValue.size() + 1,
-                               std::vector<Range>(axis + 1, Range::all()));
+                               std::vector<Range>(std::max(axis,0) + 1, Range::all()));
             int prevSlice = 0;
             for (int i = 0; i < indicesValue.size(); ++i)
             {
index f2923fe..7826f2b 100644 (file)
 #include "../graph_simplifier.hpp"
 #include "onnx_graph_simplifier.hpp"
 
+#include <opencv2/core/utils/logger.hpp>
 #include <queue>
 
 namespace cv { namespace dnn {
 CV__DNN_INLINE_NS_BEGIN
 
+extern bool DNN_DIAGNOSTICS_RUN;
+
 // This wrapper can behave differently for fake input nodes and real graph nodes.
 class ONNXNodeWrapper : public ImportNodeWrapper
 {
@@ -639,8 +642,17 @@ Mat getMatFromTensor(opencv_onnx::TensorProto& tensor_proto)
         }
     }
     else
-        CV_Error(Error::StsUnsupportedFormat, "Unsupported data type: " +
-                        opencv_onnx::TensorProto_DataType_Name(datatype));
+    {
+        std::string errorMsg = "Unsupported data type: " +
+                            opencv_onnx::TensorProto_DataType_Name(datatype);
+
+        if (!DNN_DIAGNOSTICS_RUN)
+        {
+            CV_Error(Error::StsUnsupportedFormat, errorMsg);
+        }
+        CV_LOG_ERROR(NULL, errorMsg);
+        return blob;
+    }
     if (tensor_proto.dims_size() == 0)
         blob.dims = 1;  // To force 1-dimensional cv::Mat for scalars.
     return blob;
index b18c999..98714bb 100644 (file)
@@ -8,6 +8,8 @@
 #include "../precomp.hpp"
 #include <opencv2/dnn/shape_utils.hpp>
 
+#include <opencv2/dnn/layer_reg.private.hpp>
+
 #include <opencv2/core/utils/logger.defines.hpp>
 #undef CV_LOG_STRIP_LEVEL
 #define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_DEBUG + 1
@@ -37,6 +39,7 @@ namespace cv {
 namespace dnn {
 CV__DNN_INLINE_NS_BEGIN
 
+extern bool DNN_DIAGNOSTICS_RUN;
 
 class ONNXImporter
 {
@@ -58,11 +61,12 @@ class ONNXImporter
     void addConstant(const std::string& name, const Mat& blob);
     void addLayer(LayerParams& layerParams,
                   const opencv_onnx::NodeProto& node_proto);
+    static const std::set<String>& getSupportedTypes();
 
 public:
 
     ONNXImporter(Net& net, const char *onnxFile)
-        : dstNet(net)
+        : dstNet(net), utilNet()
     {
         hasDynamicShapes = false;
         CV_Assert(onnxFile);
@@ -83,7 +87,7 @@ public:
     }
 
     ONNXImporter(Net& net, const char* buffer, size_t sizeBuffer)
-        : dstNet(net)
+        : dstNet(net), utilNet()
     {
         hasDynamicShapes = false;
         CV_LOG_DEBUG(NULL, "DNN/ONNX: processing in-memory ONNX model (" << sizeBuffer << " bytes)");
@@ -110,6 +114,7 @@ public:
 
 protected:
     Net& dstNet;
+    Net utilNet;
 
     opencv_onnx::GraphProto graph_proto;
     std::string framework_name;
@@ -182,6 +187,10 @@ std::map<std::string, Mat> ONNXImporter::getGraphTensors(
     tensor_proto = graph_proto.initializer(i);
     Mat mat = getMatFromTensor(tensor_proto);
     releaseONNXTensor(tensor_proto);
+
+    if (DNN_DIAGNOSTICS_RUN && mat.empty())
+        continue;
+
     layers_weights.insert(std::make_pair(tensor_proto.name(), mat));
   }
   return layers_weights;
@@ -201,118 +210,132 @@ LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_prot
         opencv_onnx::AttributeProto attribute_proto = node_proto.attribute(i);
         std::string attribute_name = attribute_proto.name();
 
-        if(attribute_name == "kernel_shape")
-        {
-            CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
-            lp.set("kernel_size", parse(attribute_proto.ints()));
-        }
-        else if(attribute_name == "strides")
-        {
-            CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
-            lp.set("stride", parse(attribute_proto.ints()));
-        }
-        else if(attribute_name == "pads")
+        try
         {
-            if (node_proto.op_type() == "Pad")
+            if(attribute_name == "kernel_shape")
             {
-                // Padding layer.
-                // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
-                // We need to shuffle it to begin0, end0, begin1, end1, ...
-                CV_Assert(attribute_proto.ints_size() % 2 == 0);
-                const int dims = attribute_proto.ints_size() / 2;
-                std::vector<int32_t> paddings;
-                paddings.reserve(attribute_proto.ints_size());
-                for (int i = 0; i < dims; ++i)
+                CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
+                lp.set("kernel_size", parse(attribute_proto.ints()));
+            }
+            else if(attribute_name == "strides")
+            {
+                CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
+                lp.set("stride", parse(attribute_proto.ints()));
+            }
+            else if(attribute_name == "pads")
+            {
+                if (node_proto.op_type() == "Pad")
                 {
-                    paddings.push_back(attribute_proto.ints(i));
-                    paddings.push_back(attribute_proto.ints(dims + i));
+                    // Padding layer.
+                    // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
+                    // We need to shuffle it to begin0, end0, begin1, end1, ...
+                    CV_Assert(attribute_proto.ints_size() % 2 == 0);
+                    const int dims = attribute_proto.ints_size() / 2;
+                    std::vector<int32_t> paddings;
+                    paddings.reserve(attribute_proto.ints_size());
+                    for (int i = 0; i < dims; ++i)
+                    {
+                        paddings.push_back(attribute_proto.ints(i));
+                        paddings.push_back(attribute_proto.ints(dims + i));
+                    }
+                    lp.set("paddings", DictValue::arrayInt(&paddings[0], paddings.size()));
+                }
+                else
+                {
+                    // Convolution or pooling.
+                    CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6);
+                    lp.set("pad", parse(attribute_proto.ints()));
                 }
-                lp.set("paddings", DictValue::arrayInt(&paddings[0], paddings.size()));
             }
-            else
+            else if(attribute_name == "auto_pad")
             {
-                // Convolution or pooling.
-                CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6);
-                lp.set("pad", parse(attribute_proto.ints()));
+                if (attribute_proto.s() == "SAME_UPPER" || attribute_proto.s() == "SAME_LOWER") {
+                    lp.set("pad_mode",  "SAME");
+                }
+                else if (attribute_proto.s() == "VALID") {
+                    lp.set("pad_mode", "VALID");
+                }
             }
-        }
-        else if(attribute_name == "auto_pad")
-        {
-            if (attribute_proto.s() == "SAME_UPPER" || attribute_proto.s() == "SAME_LOWER") {
-                lp.set("pad_mode",  "SAME");
+            else if(attribute_name == "dilations")
+            {
+                CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
+                lp.set("dilation", parse(attribute_proto.ints()));
             }
-            else if (attribute_proto.s() == "VALID") {
-                lp.set("pad_mode", "VALID");
+            else if (attribute_proto.has_i())
+            {
+                ::google::protobuf::int64 src = attribute_proto.i();
+                if (src < std::numeric_limits<int32_t>::min() || src > std::numeric_limits<int32_t>::max())
+                    CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
+                else
+                    lp.set(attribute_name, saturate_cast<int32_t>(src));
+            }
+            else if (attribute_proto.has_f())
+            {
+                lp.set(attribute_name, attribute_proto.f());
+            }
+            else if (attribute_proto.has_s())
+            {
+                lp.set(attribute_name, attribute_proto.s());
+            }
+            else if (attribute_proto.floats_size() > 0)
+            {
+                lp.set(attribute_name, DictValue::arrayReal(
+                    attribute_proto.floats().data(), attribute_proto.floats_size()));
+            }
+            else if (attribute_proto.ints_size() > 0)
+            {
+                lp.set(attribute_name, parse(attribute_proto.ints()));
+            }
+            else if (attribute_proto.has_t())
+            {
+                opencv_onnx::TensorProto tensor = attribute_proto.t();
+                Mat blob = getMatFromTensor(tensor);
+                lp.blobs.push_back(blob);
+            }
+            else if (attribute_proto.has_g())
+            {
+                CV_Error(Error::StsNotImplemented, cv::format("DNN/ONNX/Attribute[%s]: 'Graph' is not supported", attribute_name.c_str()));
+            }
+            else if (attribute_proto.graphs_size() > 0)
+            {
+                CV_Error(Error::StsNotImplemented,
+                        cv::format("DNN/ONNX/Attribute[%s]: 'Graphs' (%d) in attributes is not supported",
+                                attribute_name.c_str(), attribute_proto.graphs_size())
+                );
+            }
+            else if (attribute_proto.strings_size() > 0)
+            {
+                std::string msg = cv::format("DNN/ONNX/Attribute[%s]: 'Strings' (%d) are not supported",
+                        attribute_name.c_str(), attribute_proto.strings_size());
+                CV_LOG_ERROR(NULL, msg);
+                for (int i = 0; i < attribute_proto.strings_size(); i++)
+                {
+                    CV_LOG_ERROR(NULL, "    Attribute[" << attribute_name << "].string(" << i << ") = '" << attribute_proto.strings(i) << "'");
+                }
+                CV_Error(Error::StsNotImplemented, msg);
+            }
+            else if (attribute_proto.tensors_size() > 0)
+            {
+                CV_Error(Error::StsNotImplemented,
+                        cv::format("DNN/ONNX/Attribute[%s]: 'Tensors' (%d) in attributes are not supported",
+                                attribute_name.c_str(), attribute_proto.tensors_size())
+                );
             }
-        }
-        else if(attribute_name == "dilations")
-        {
-            CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
-            lp.set("dilation", parse(attribute_proto.ints()));
-        }
-        else if (attribute_proto.has_i())
-        {
-            ::google::protobuf::int64 src = attribute_proto.i();
-            if (src < std::numeric_limits<int32_t>::min() || src > std::numeric_limits<int32_t>::max())
-                CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
             else
-                lp.set(attribute_name, saturate_cast<int32_t>(src));
-        }
-        else if (attribute_proto.has_f())
-        {
-            lp.set(attribute_name, attribute_proto.f());
-        }
-        else if (attribute_proto.has_s())
-        {
-            lp.set(attribute_name, attribute_proto.s());
-        }
-        else if (attribute_proto.floats_size() > 0)
-        {
-            lp.set(attribute_name, DictValue::arrayReal(
-                attribute_proto.floats().data(), attribute_proto.floats_size()));
-        }
-        else if (attribute_proto.ints_size() > 0)
-        {
-            lp.set(attribute_name, parse(attribute_proto.ints()));
-        }
-        else if (attribute_proto.has_t())
-        {
-            opencv_onnx::TensorProto tensor = attribute_proto.t();
-            Mat blob = getMatFromTensor(tensor);
-            lp.blobs.push_back(blob);
-        }
-        else if (attribute_proto.has_g())
-        {
-            CV_Error(Error::StsNotImplemented, cv::format("DNN/ONNX/Attribute[%s]: 'Graph' is not supported", attribute_name.c_str()));
-        }
-        else if (attribute_proto.graphs_size() > 0)
-        {
-            CV_Error(Error::StsNotImplemented,
-                    cv::format("DNN/ONNX/Attribute[%s]: 'Graphs' (%d) in attributes is not supported",
-                            attribute_name.c_str(), attribute_proto.graphs_size())
-            );
-        }
-        else if (attribute_proto.strings_size() > 0)
-        {
-            std::string msg = cv::format("DNN/ONNX/Attribute[%s]: 'Strings' (%d) are not supported",
-                    attribute_name.c_str(), attribute_proto.strings_size());
-            CV_LOG_ERROR(NULL, msg);
-            for (int i = 0; i < attribute_proto.strings_size(); i++)
             {
-                CV_LOG_ERROR(NULL, "    Attribute[" << attribute_name << "].string(" << i << ") = '" << attribute_proto.strings(i) << "'");
+                CV_Error(Error::StsNotImplemented, cv::format("DNN/ONNX/Attribute[%s]: unsupported attribute format", attribute_name.c_str()));
             }
-            CV_Error(Error::StsNotImplemented, msg);
         }
-        else if (attribute_proto.tensors_size() > 0)
+        catch (const cv::Exception& e)
         {
-            CV_Error(Error::StsNotImplemented,
-                    cv::format("DNN/ONNX/Attribute[%s]: 'Tensors' (%d) in attributes are not supported",
-                            attribute_name.c_str(), attribute_proto.tensors_size())
-            );
-        }
-        else
-        {
-            CV_Error(Error::StsNotImplemented, cv::format("DNN/ONNX/Attribute[%s]: unsupported attribute format", attribute_name.c_str()));
+            CV_UNUSED(e);
+            if (DNN_DIAGNOSTICS_RUN)
+            {
+                CV_LOG_ERROR(NULL, "DNN/ONNX: Potential problem with processing attributes for node " << node_proto.name() << " Attribute " << attribute_name.c_str()
+                );
+                continue;
+            }
+            throw;
         }
     }
     return lp;
@@ -338,7 +361,11 @@ Mat ONNXImporter::getBlob(const std::string& input_name)
 void ONNXImporter::addLayer(LayerParams& layerParams,
                             const opencv_onnx::NodeProto& node_proto)
 {
-    int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
+    int id;
+    if (DNN_DIAGNOSTICS_RUN)
+        id = utilNet.addLayer(layerParams.name, layerParams.type, layerParams);
+    else
+        id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
     for (int i = 0; i < node_proto.output_size(); ++i)
     {
         layer_id.insert(std::make_pair(node_proto.output(i), LayerInfo(id, i)));
@@ -351,7 +378,10 @@ void ONNXImporter::addLayer(LayerParams& layerParams,
         const std::string& input_name = node_proto.input(j);
         IterLayerId_t layerId = layer_id.find(input_name);
         if (layerId != layer_id.end()) {
-            dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, inpNum);
+            if (DNN_DIAGNOSTICS_RUN)
+                utilNet.connect(layerId->second.layerId, layerId->second.outputId, id, inpNum);
+            else
+                dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, inpNum);
             ++inpNum;
             // Collect input shapes.
             IterShape_t shapeIt = outShapes.find(input_name);
@@ -360,7 +390,11 @@ void ONNXImporter::addLayer(LayerParams& layerParams,
         }
     }
     // Compute shape of output blob for this layer.
-    Ptr<Layer> layer = dstNet.getLayer(id);  // FIXIT: avoid instantiation of layers during the import stage
+    Ptr<Layer> layer;
+    if (DNN_DIAGNOSTICS_RUN)
+        layer = utilNet.getLayer(id);
+    else
+        layer = dstNet.getLayer(id);  // FIXIT: avoid instantiation of layers during the import stage
     layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes);
     for (int i = 0; i < node_proto.output_size() && i < (int)layerOutShapes.size(); ++i)
     {
@@ -437,8 +471,37 @@ void ONNXImporter::populateNet()
             layer_id.insert(std::make_pair(name, LayerInfo(0, netInputs.size() - 1)));
         }
     }
+    utilNet.setInputsNames(netInputs);
     dstNet.setInputsNames(netInputs);
 
+    if (DNN_DIAGNOSTICS_RUN) {
+        auto &supportedTypes = getSupportedTypes();
+        for (int li = 0; li < layersSize; li++) {
+            const opencv_onnx::NodeProto &node_proto = graph_proto.node(li);
+            std::string name = node_proto.output(0);
+            std::string layer_type = node_proto.op_type();
+            auto registered = supportedTypes.find(layer_type);
+            if (registered == supportedTypes.end()) {
+                CV_LOG_ERROR(NULL, "DNN/ONNX: NOTE: Potential problem with creating node " << name<< " with type " << layer_type << ".\n Type "
+                    << layer_type << " IS NOT SUPPORTED!\n"
+                );
+            }
+        }
+        auto oldConstBlobs = constBlobs;
+        auto oldOutShapes = outShapes;
+        auto oldLayerId = layer_id;
+        CV_LOG_INFO(NULL, "DNN/ONNX: start diagnostic run!");
+        for (int li = 0; li < layersSize; li++) {
+            const opencv_onnx::NodeProto &node_proto = graph_proto.node(li);
+            handleNode(node_proto);
+        }
+        CV_LOG_INFO(NULL, "DNN/ONNX: diagnostic run completed!");
+        constBlobs = oldConstBlobs;
+        outShapes = oldOutShapes;
+        layer_id = oldLayerId;
+        enableModelDiagnostics(false);
+    }
+
     for(int li = 0; li < layersSize; li++)
     {
         const opencv_onnx::NodeProto& node_proto = graph_proto.node(li);
@@ -448,6 +511,80 @@ void ONNXImporter::populateNet()
     CV_LOG_DEBUG(NULL, "DNN/ONNX: import completed!");
 }
 
+const std::set<String>& ONNXImporter::getSupportedTypes()
+{
+    static const std::set<String> layerTypes = {
+        "MaxPool",
+        "AveragePool",
+        "GlobalAveragePool",
+        "GlobalMaxPool",
+        "ReduceMean",
+        "ReduceSum",
+        "ReduceMax",
+        "Slice",
+        "Split",
+        "Add",
+        "Sum",
+        "Sub",
+        "Pow",
+        "Max",
+        "Neg",
+        "Constant",
+        "LSTM",
+        "ImageScaler",
+        "Clip",
+        "LeakyRelu",
+        "Relu",
+        "Elu",
+        "Tanh",
+        "PRelu",
+        "LRN",
+        "InstanceNormalization",
+        "BatchNormalization",
+        "Gemm",
+        "MatMul",
+        "Mul",
+        "Div",
+        "Conv",
+        "ConvTranspose",
+        "Transpose",
+        "Squeeze",
+        "Flatten",
+        "Unsqueeze",
+        "Expand",
+        "Reshape",
+        "Pad",
+        "Shape",
+        "Cast",
+        "ConstantOfShape",
+        "ConstantFill",
+        "Gather",
+        "Concat",
+        "Resize",
+        "Upsample",
+        "SoftMax",
+        "Softmax",
+        "LogSoftmax",
+        "DetectionOutput",
+        "Interp",
+        "CropAndResize",
+        "ROIPooling",
+        "PSROIPooling",
+        "ChannelsPReLU",
+        "Sigmoid",
+        "Swish",
+        "Mish",
+        "AbsVal",
+        "BNLL",
+        "MaxUnpool",
+        "Dropout",
+        "Identity",
+        "Crop",
+        "Normalize"
+    };
+    return layerTypes;
+}
+
 void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
 {
     opencv_onnx::NodeProto node_proto = node_proto_;  // TODO FIXIT
@@ -458,11 +595,11 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
     CV_LOG_DEBUG(NULL, "DNN/ONNX: processing node with " << node_proto.input_size() << " inputs and " << node_proto.output_size() << " outputs: "
             << cv::format("[%s]:(%s)", layer_type.c_str(), name.c_str())
     );
-
+    LayerParams layerParams;
     try
     {
         // FIXIT not all cases can be repacked into "LayerParams". Importer should handle such cases directly for each "layer_type"
-        LayerParams layerParams = getLayerParams(node_proto);
+        layerParams = getLayerParams(node_proto);
 
         layerParams.name = name;
         layerParams.type = layer_type;
@@ -798,7 +935,11 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
                         constParams.name = layerParams.name + "/const";
                         constParams.type = "Const";
                         constParams.blobs.push_back((isSub ? -1 : 1) * blob);
-                        int id = dstNet.addLayer(constParams.name, constParams.type, constParams);
+                        int id;
+                        if (DNN_DIAGNOSTICS_RUN)
+                            id = utilNet.addLayer(constParams.name, constParams.type, constParams);
+                        else
+                            id = dstNet.addLayer(constParams.name, constParams.type, constParams);
                         layer_id.insert(std::make_pair(constParams.name, LayerInfo(id, 0)));
                         outShapes[constParams.name] = shape(blob);
 
@@ -843,12 +984,19 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
                     powerParams.type = "Power";
                     powerParams.set("scale", -1);
 
+                    int id;
                     //Create Power layer
-                    int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
+                    if (DNN_DIAGNOSTICS_RUN)
+                        id = utilNet.addLayer(powerParams.name, powerParams.type, powerParams);
+                    else
+                        id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
                     //Connect to input
                     IterLayerId_t layerId = layer_id.find(node_proto.input(1));
                     CV_Assert(layerId != layer_id.end());
-                    dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
+                    if (DNN_DIAGNOSTICS_RUN)
+                        utilNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
+                    else
+                        dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
                     //Add shape
                     layer_id.insert(std::make_pair(powerParams.name, LayerInfo(id, 0)));
                     outShapes[powerParams.name] = outShapes[node_proto.input(1)];
@@ -1035,11 +1183,18 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
             layerParams.erase("epsilon");
 
             //Create MVN layer
-            int id = dstNet.addLayer(mvnParams.name, mvnParams.type, mvnParams);
+            int id;
+            if (DNN_DIAGNOSTICS_RUN)
+                id = utilNet.addLayer(mvnParams.name, mvnParams.type, mvnParams);
+            else
+                id = dstNet.addLayer(mvnParams.name, mvnParams.type, mvnParams);
             //Connect to input
             IterLayerId_t layerId = layer_id.find(node_proto.input(0));
             CV_Assert(layerId != layer_id.end());
-            dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
+            if (DNN_DIAGNOSTICS_RUN)
+                utilNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
+            else
+                dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
             //Add shape
             layer_id.insert(std::make_pair(mvnParams.name, LayerInfo(id, 0)));
             outShapes[mvnParams.name] = outShapes[node_proto.input(0)];
@@ -1232,12 +1387,19 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
                     powerParams.type = "Power";
                     powerParams.set("power", -1);
 
+                    int id;
                     //Create Power layer
-                    int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
+                    if (DNN_DIAGNOSTICS_RUN)
+                        id = utilNet.addLayer(powerParams.name, powerParams.type, powerParams);
+                    else
+                        id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
                     //Connect to input
                     IterLayerId_t layerId = layer_id.find(node_proto.input(1));
                     CV_Assert(layerId != layer_id.end());
-                    dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
+                    if (DNN_DIAGNOSTICS_RUN)
+                        utilNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
+                    else
+                        dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
                     //Add shape
                     layer_id.insert(std::make_pair(powerParams.name, LayerInfo(id, 0)));
                     outShapes[powerParams.name] = outShapes[node_proto.input(1)];
@@ -1922,9 +2084,31 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
     }
     catch (const cv::Exception& e)
     {
-        CV_LOG_ERROR(NULL, "DNN/ONNX: ERROR during processing node with " << node_proto.input_size() << " inputs and " << node_proto.output_size() << " outputs: "
-                << cv::format("[%s]:(%s)", layer_type.c_str(), name.c_str())
-        );
+        if (DNN_DIAGNOSTICS_RUN)
+        {
+            CV_LOG_ERROR(NULL, "DNN/ONNX: Potential problem during processing node with " << node_proto.input_size() << " inputs and " << node_proto.output_size() << " outputs: "
+                    << cv::format("[%s]:(%s)", layer_type.c_str(), name.c_str()) << "\n" << e.msg
+            );
+            auto registeredLayers = getLayerFactoryImpl();
+            if (registeredLayers.find(layerParams.type) != registeredLayers.end())
+            {
+                try
+                {
+                    Ptr<Layer> layer = LayerFactory::createLayerInstance(layerParams.type, layerParams);
+                }
+                catch (const std::exception& e)
+                {
+                    CV_LOG_ERROR(NULL, "DNN/ONNX: Layer of type " << layerParams.type << "(" << layer_type << ") cannot be created with parameters " << layerParams << ". Error: " << e.what()
+                    );
+                }
+            }
+        }
+        else
+        {
+            CV_LOG_ERROR(NULL, "DNN/ONNX: ERROR during processing node with " << node_proto.input_size() << " inputs and " << node_proto.output_size() << " outputs: "
+                    << cv::format("[%s]:(%s)", layer_type.c_str(), name.c_str())
+            );
+        }
         for (int i = 0; i < node_proto.input_size(); i++)
         {
             CV_LOG_INFO(NULL, "    Input[" << i << "] = '" << node_proto.input(i) << "'");
@@ -1933,7 +2117,16 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
         {
             CV_LOG_INFO(NULL, "    Output[" << i << "] = '" << node_proto.output(i) << "'");
         }
-        CV_Error(Error::StsError, cv::format("Node [%s]:(%s) parse error: %s", layer_type.c_str(), name.c_str(), e.what()));
+        if (DNN_DIAGNOSTICS_RUN)
+        {
+            for (int i = 0; i < node_proto.output_size(); ++i)
+            {
+                layer_id.insert(std::make_pair(node_proto.output(i), LayerInfo(0, i)));
+                outShapes[node_proto.output(i)] = outShapes[node_proto.input(0)];
+            }
+        }
+        else
+            CV_Error(Error::StsError, cv::format("Node [%s]:(%s) parse error: %s", layer_type.c_str(), name.c_str(), e.what()));
     }
 }