Remove Shift deep learning layer
authorDmitry Kurtaev <dmitry.kurtaev+github@gmail.com>
Mon, 28 May 2018 15:13:32 +0000 (18:13 +0300)
committerDmitry Kurtaev <dmitry.kurtaev+github@gmail.com>
Mon, 28 May 2018 15:18:56 +0000 (18:18 +0300)
modules/dnn/include/opencv2/dnn/all_layers.hpp
modules/dnn/src/layers/scale_layer.cpp
modules/dnn/src/layers/shift_layer.cpp [deleted file]
modules/dnn/src/tensorflow/tf_importer.cpp

index f4e93b7..f2124dd 100644 (file)
@@ -503,7 +503,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
     class CV_EXPORTS ShiftLayer : public Layer
     {
     public:
-        static Ptr<ShiftLayer> create(const LayerParams& params);
+        static Ptr<Layer> create(const LayerParams& params);
     };
 
     class CV_EXPORTS PriorBoxLayer : public Layer
index f4b2bae..3c7d63a 100644 (file)
@@ -28,6 +28,7 @@ public:
         setParamsFrom(params);
         hasBias = params.get<bool>("bias_term", false);
         axis = params.get<int>("axis", 1);
+        hasWeights = false;
     }
 
     bool getMemoryShapes(const std::vector<MatShape> &inputs,
@@ -35,11 +36,16 @@ public:
                          std::vector<MatShape> &outputs,
                          std::vector<MatShape> &internals) const CV_OVERRIDE
     {
-        CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == 1 + hasBias);
         outputs.assign(1, inputs[0]);
         return true;
     }
 
+    virtual void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
+    {
+        hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias);
+        CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == (int)hasWeights + (int)hasBias);
+    }
+
     virtual bool supportBackend(int backendId) CV_OVERRIDE
     {
         return backendId == DNN_BACKEND_DEFAULT ||
@@ -63,10 +69,15 @@ public:
 
         Mat &inpBlob = *inputs[0];
         Mat &outBlob = outputs[0];
-        Mat &weights = blobs.empty() ? *inputs[1] : blobs[0];
-        Mat bias = hasBias ? blobs.back() : Mat();
+        // There is a mode when we multiply a first blob by a second one
+        // instead of trainable weights.
+        Mat weights = blobs.empty() ? *inputs[1] : (hasWeights ? blobs[0] : Mat());
+        Mat bias = hasBias ? blobs.back().reshape(1, 1) : Mat();
+        if (!weights.empty())
+            weights = weights.reshape(1, 1);
         MatShape inpShape = shape(inpBlob);
-        const int numWeights = weights.total();
+        const int numWeights = !weights.empty() ? weights.total() : bias.total();
+        CV_Assert(numWeights != 0, !hasWeights || !hasBias || weights.total() == bias.total());
 
         int endAxis;
         for (endAxis = axis + 1; endAxis <= inpBlob.dims; ++endAxis)
@@ -84,15 +95,15 @@ public:
 
         if (endAxis != inpBlob.dims)
         {
-            float* weightsData = (float*)weights.data;
+            float* weightsData = !weights.empty() ? (float*)weights.data : 0;
             float* biasesData = hasBias ? (float*)bias.data : 0;
             int spatialSize = total(inpShape, endAxis);  // spatialSize != 1
             for (int i = 0; i < numSlices; ++i)
             {
                 for (int j = 0; j < numWeights; ++j)
                 {
-                    float w = weightsData[j];
-                    float b = hasBias ? biasesData[j] : 0;
+                    float w = weightsData ? weightsData[j] : 1;
+                    float b = biasesData ? biasesData[j] : 0;
                     Mat inpSlice(1, spatialSize, CV_32F, inpData);
                     Mat outSlice(1, spatialSize, CV_32F, outData);
                     inpSlice.convertTo(outSlice, CV_32F, w, b);
@@ -105,12 +116,16 @@ public:
         {
             for (int i = 0; i < numSlices; ++i)
             {
-                Mat inpSlice(weights.dims, weights.size, CV_32F, inpData);
-                Mat outSlice(weights.dims, weights.size, CV_32F, outData);
-                multiply(inpSlice, weights, outSlice);
-                if (hasBias)
-                    add(outSlice, bias, outSlice);
-
+                Mat inpSlice(1, numWeights, CV_32F, inpData);
+                Mat outSlice(1, numWeights, CV_32F, outData);
+                if (!weights.empty())
+                {
+                    multiply(inpSlice, weights, outSlice);
+                    if (hasBias)
+                        add(outSlice, bias, outSlice);
+                }
+                else if (hasBias)
+                    add(inpSlice, bias, outSlice);
                 inpData += numWeights;
                 outData += numWeights;
             }
@@ -157,11 +172,15 @@ public:
 
         const int numChannels = blobs[0].total();
 
-        auto weights = wrapToHalideBuffer(blobs[0], {numChannels});
-        Halide::Expr topExpr = input * weights(c);
+        Halide::Expr topExpr = input;
+        if (hasWeights)
+        {
+            auto weights = wrapToHalideBuffer(blobs[0], {numChannels});
+            topExpr *= weights(c);
+        }
         if (hasBias)
         {
-            auto bias = wrapToHalideBuffer(blobs[1], {numChannels});
+            auto bias = wrapToHalideBuffer(blobs.back(), {numChannels});
             topExpr += bias(c);
         }
         top(x, y, c, n) = topExpr;
@@ -178,10 +197,24 @@ public:
         lp.precision = InferenceEngine::Precision::FP32;
         std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
 
+        CV_Assert(!blobs.empty());
         const size_t numChannels = blobs[0].total();
-        ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
+        if (hasWeights)
+        {
+            ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
+        }
+        else
+        {
+            auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
+                                                                    {numChannels});
+            weights->allocate();
+
+            std::vector<float> ones(numChannels, 1);
+            weights->set(ones);
+            ieLayer->_weights = weights;
+        }
         if (hasBias)
-            ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {numChannels}, InferenceEngine::Layout::C);
+            ieLayer->_biases = wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C);
 
         return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
 #endif  // HAVE_INF_ENGINE
@@ -190,8 +223,8 @@ public:
 
     void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
     {
-        scale = !blobs.empty() ? blobs[0] : Mat();
-        shift = hasBias ? blobs[1] : Mat();
+        scale = hasWeights ? blobs[0] : Mat();
+        shift = hasBias ? blobs.back() : Mat();
     }
 
     virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
@@ -205,6 +238,9 @@ public:
         }
         return flops;
     }
+
+private:
+    bool hasWeights;
 };
 
 
@@ -213,5 +249,16 @@ Ptr<ScaleLayer> ScaleLayer::create(const LayerParams& params)
     return Ptr<ScaleLayer>(new ScaleLayerImpl(params));
 }
 
+Ptr<Layer> ShiftLayer::create(const LayerParams& params)
+{
+    LayerParams scaleParams;
+    scaleParams.name = params.name;
+    scaleParams.type = "Scale";
+    scaleParams.blobs = params.blobs;
+    scaleParams.set("bias_term", true);
+    scaleParams.set("axis", 0);
+    return Ptr<ScaleLayer>(new ScaleLayerImpl(scaleParams));
+}
+
 }  // namespace dnn
 }  // namespace cv
diff --git a/modules/dnn/src/layers/shift_layer.cpp b/modules/dnn/src/layers/shift_layer.cpp
deleted file mode 100644 (file)
index 7c3bb14..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-// This file is part of OpenCV project.
-// It is subject to the license terms in the LICENSE file found in the top-level directory
-// of this distribution and at http://opencv.org/license.html.
-
-// Copyright (C) 2016, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-
-/*
-Implementation of shift layer, which adds up const values to blob.
-*/
-
-#include "../precomp.hpp"
-#include "../op_inf_engine.hpp"
-#include <opencv2/dnn/shape_utils.hpp>
-
-namespace cv
-{
-namespace dnn
-{
-
-class ShiftLayerImpl CV_FINAL : public ShiftLayer
-{
-public:
-    ShiftLayerImpl(const LayerParams &params)
-    {
-        setParamsFrom(params);
-        CV_Assert(blobs.size() == 1);
-    }
-
-    virtual bool supportBackend(int backendId) CV_OVERRIDE
-    {
-        return backendId == DNN_BACKEND_DEFAULT ||
-               backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
-    }
-
-    bool getMemoryShapes(const std::vector<MatShape> &inputs,
-                         const int requiredOutputs,
-                         std::vector<MatShape> &outputs,
-                         std::vector<MatShape> &internals) const CV_OVERRIDE
-    {
-        Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
-        internals.assign(1, shape(1, total(inputs[0], 2)));
-        return true;
-    }
-
-    void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
-    {
-        CV_TRACE_FUNCTION();
-        CV_TRACE_ARG_VALUE(name, "name", name.c_str());
-
-        Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
-    }
-
-    virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
-    {
-        CV_TRACE_FUNCTION();
-        CV_TRACE_ARG_VALUE(name, "name", name.c_str());
-
-        CV_Assert(inputs.size() > 0);
-        CV_Assert(blobs.size() > 0);
-
-        if(inputs[0]->dims == blobs[0].dims)
-        {
-            for (size_t ii = 0; ii < outputs.size(); ii++)
-            {
-                Mat &inpBlob = *inputs[ii];
-                Mat &outBlob = outputs[ii];
-
-                outBlob = inpBlob + blobs[0];
-            }
-        }
-        else
-        {
-            Mat biasOnesMat = internals[0];
-            biasOnesMat.setTo(1);
-            for (size_t ii = 0; ii < outputs.size(); ii++)
-            {
-                Mat &inpBlob = *inputs[ii];
-                Mat &outBlob = outputs[ii];
-
-                inpBlob.copyTo(outBlob);
-
-                for (int n = 0; n < inpBlob.size[0]; n++)
-                {
-                    Mat dstMat(inpBlob.size[1], inpBlob.size[2] * inpBlob.size[3],
-                               outBlob.type(), outBlob.ptr(n));
-                    gemm(blobs[0], biasOnesMat, 1, dstMat, 1, dstMat); //TODO: gemv
-                }
-            }
-        }
-    }
-
-    virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
-    {
-#ifdef HAVE_INF_ENGINE
-        // Inference Engine has no layer just for biases. Create a linear
-        // transformation layer with ones weights.
-        InferenceEngine::LayerParams lp;
-        lp.name = name;
-        lp.type = "ScaleShift";
-        lp.precision = InferenceEngine::Precision::FP32;
-        std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
-
-        auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
-                                                                {blobs[0].total()});
-        weights->allocate();
-
-        std::vector<float> ones(blobs[0].total(), 1);
-        weights->set(ones);
-        ieLayer->_weights = weights;
-
-        ieLayer->_biases = wrapToInfEngineBlob(blobs[0]);
-        return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
-#endif  // HAVE_INF_ENGINE
-        return Ptr<BackendNode>();
-    }
-
-    void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
-    {
-        scale = Mat();
-        shift = blobs[0];
-    }
-
-    virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
-                           const std::vector<MatShape> &outputs) const CV_OVERRIDE
-    {
-        (void)outputs; // suppress unused variable warning
-        long flops = 0;
-
-        for(int i= 0; i < inputs.size(); i++)
-        {
-           flops += total(inputs[i]);
-        }
-
-        return flops;
-    }
-};
-
-Ptr<ShiftLayer> ShiftLayer::create(const LayerParams& params)
-{
-    return Ptr<ShiftLayer>(new ShiftLayerImpl(params));
-}
-
-}
-}
index efedbce..195b516 100644 (file)
@@ -743,10 +743,20 @@ void TFImporter::populateNet(Net dstNet)
 
             if (haveConst)
             {
-                layerParams.blobs.resize(1);
-                blobFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]);
+                Mat values = getTensorContent(getConstBlob(layer, value_id));
+                CV_Assert(values.type() == CV_32FC1);
 
-                int id = dstNet.addLayer(name, "Shift", layerParams);
+                int id;
+                if (values.total() == 1)  // is a scalar.
+                {
+                    layerParams.set("shift", values.at<float>(0));
+                    id = dstNet.addLayer(name, "Power", layerParams);
+                }
+                else  // is a vector
+                {
+                    layerParams.blobs.resize(1, values);
+                    id = dstNet.addLayer(name, "Shift", layerParams);
+                }
                 layer_id[name] = id;
 
                 // one input only
@@ -777,11 +787,21 @@ void TFImporter::populateNet(Net dstNet)
             }
             CV_Assert(haveConst);
 
-            layerParams.blobs.resize(1);
-            blobFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]);
-            layerParams.blobs[0] *= -1;
+            Mat values = getTensorContent(getConstBlob(layer, value_id));
+            CV_Assert(values.type() == CV_32FC1);
+            values *= -1.0f;
 
-            int id = dstNet.addLayer(name, "Shift", layerParams);
+            int id;
+            if (values.total() == 1)  // is a scalar.
+            {
+                layerParams.set("shift", values.at<float>(0));
+                id = dstNet.addLayer(name, "Power", layerParams);
+            }
+            else  // is a vector
+            {
+                layerParams.blobs.resize(1, values);
+                id = dstNet.addLayer(name, "Shift", layerParams);
+            }
             layer_id[name] = id;
 
             // one input only