Blank and L2-normalization layers from Intel's Inference Engine
authorDmitry Kurtaev <dmitry.kurtaev+github@gmail.com>
Thu, 12 Apr 2018 12:21:08 +0000 (15:21 +0300)
committerDmitry Kurtaev <dmitry.kurtaev+github@gmail.com>
Thu, 12 Apr 2018 12:21:08 +0000 (15:21 +0300)
modules/dnn/src/layers/blank_layer.cpp
modules/dnn/src/layers/normalize_bbox_layer.cpp
modules/dnn/src/op_inf_engine.cpp

index 1ed17bb..0794eff 100644 (file)
@@ -40,6 +40,7 @@
 //
 //M*/
 #include "../precomp.hpp"
+#include "../op_inf_engine.hpp"
 
 namespace cv
 {
@@ -53,6 +54,12 @@ public:
         setParamsFrom(params);
     }
 
+    virtual bool supportBackend(int backendId) CV_OVERRIDE
+    {
+        return backendId == DNN_BACKEND_DEFAULT ||
+               backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
+    }
+
     bool getMemoryShapes(const std::vector<MatShape> &inputs,
                          const int requiredOutputs,
                          std::vector<MatShape> &outputs,
@@ -104,6 +111,19 @@ public:
             if (outputs[i].data != inputs[i]->data)
                 inputs[i]->copyTo(outputs[i]);
     }
+
+    virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
+    {
+#ifdef HAVE_INF_ENGINE
+        InferenceEngine::LayerParams lp;
+        lp.name = name;
+        lp.type = "Split";
+        lp.precision = InferenceEngine::Precision::FP32;
+        std::shared_ptr<InferenceEngine::SplitLayer> ieLayer(new InferenceEngine::SplitLayer(lp));
+        return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
+#endif  // HAVE_INF_ENGINE
+        return Ptr<BackendNode>();
+    }
 };
 
 Ptr<Layer> BlankLayer::create(const LayerParams& params)
index 580b6b3..5e8ed65 100644 (file)
@@ -42,6 +42,7 @@
 
 #include "../precomp.hpp"
 #include "layers_common.hpp"
+#include "../op_inf_engine.hpp"
 
 namespace cv { namespace dnn {
 
@@ -60,6 +61,13 @@ public:
         CV_Assert(pnorm > 0);
     }
 
+    virtual bool supportBackend(int backendId) CV_OVERRIDE
+    {
+        return backendId == DNN_BACKEND_DEFAULT ||
+               backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() &&
+               pnorm == 2 && !blobs.empty();
+    }
+
     bool getMemoryShapes(const std::vector<MatShape> &inputs,
                          const int requiredOutputs,
                          std::vector<MatShape> &outputs,
@@ -228,6 +236,28 @@ public:
         }
     }
 
+    virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
+    {
+#ifdef HAVE_INF_ENGINE
+        InferenceEngine::LayerParams lp;
+        lp.name = name;
+        lp.type = "Normalize";
+        lp.precision = InferenceEngine::Precision::FP32;
+        std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
+
+        CV_Assert(!blobs.empty());
+
+        ieLayer->params["eps"] = format("%f", epsilon);
+        ieLayer->params["across_spatial"] = acrossSpatial ? "1" : "0";
+        ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0";
+
+        const int numChannels = blobs[0].total();
+        ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
+        return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
+#endif  // HAVE_INF_ENGINE
+        return Ptr<BackendNode>();
+    }
+
 private:
     int startAxis, endAxis;
 };
index 1514573..129ed94 100644 (file)
@@ -18,6 +18,11 @@ namespace cv { namespace dnn {
 
 #ifdef HAVE_INF_ENGINE
 
+static int infEngineVersion()
+{
+    return std::atoi(InferenceEngine::GetInferenceEngineVersion()->buildNumber);
+}
+
 InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& _layer)
     : BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {}
 
@@ -58,9 +63,23 @@ static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std:
 {
     std::vector<size_t> reversedShape(&m.size[0], &m.size[0] + m.dims);
     std::reverse(reversedShape.begin(), reversedShape.end());
-    return InferenceEngine::DataPtr(
-      new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32)
-    );
+    if (infEngineVersion() > 5855)
+    {
+        InferenceEngine::Layout l = InferenceEngine::Layout::ANY;
+        if (m.dims == 4)
+            l = InferenceEngine::Layout::NCHW;
+        else if (m.dims == 2)
+            l = InferenceEngine::Layout::NC;
+        return InferenceEngine::DataPtr(
+            new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32, l)
+        );
+    }
+    else
+    {
+        return InferenceEngine::DataPtr(
+            new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32)
+        );
+    }
 }
 
 InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape,
@@ -336,10 +355,9 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
 
     InferenceEngine::StatusCode status;
     InferenceEngine::ResponseDesc resp;
-    const InferenceEngine::Version* v = InferenceEngine::GetInferenceEngineVersion();
 
     plugin = InferenceEngine::PluginDispatcher({""}).getSuitablePlugin(targetDevice);
-    if (std::atoi(v->buildNumber) > 5855)
+    if (infEngineVersion() > 5855 && targetDevice == InferenceEngine::TargetDevice::eCPU)
     {
 #ifdef _WIN32
         InferenceEngine::IExtensionPtr extension =