Concat layer from TensorFlow with constant inputs
authorDmitry Kurtaev <dmitry.kurtaev+github@gmail.com>
Tue, 4 Dec 2018 12:02:22 +0000 (15:02 +0300)
committerDmitry Kurtaev <dmitry.kurtaev+github@gmail.com>
Tue, 4 Dec 2018 16:41:40 +0000 (19:41 +0300)
modules/dnn/include/opencv2/dnn/all_layers.hpp
modules/dnn/src/init.cpp
modules/dnn/src/layers/const_layer.cpp [new file with mode: 0644]
modules/dnn/src/tensorflow/tf_importer.cpp
modules/dnn/test/test_tf_importer.cpp

index 5642423..c6fe6d0 100644 (file)
@@ -77,6 +77,15 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
         static Ptr<Layer> create(const LayerParams &params);
     };
 
+    /**
+     * Constant layer produces the same data blob at an every forward pass.
+     */
+    class CV_EXPORTS ConstLayer : public Layer
+    {
+    public:
+        static Ptr<Layer> create(const LayerParams &params);
+    };
+
     //! LSTM recurrent layer
     class CV_EXPORTS LSTMLayer : public Layer
     {
index d8b6334..7f6c831 100644 (file)
@@ -112,6 +112,7 @@ void initializeLayerFactory()
     CV_DNN_REGISTER_LAYER_CLASS(Dropout,        BlankLayer);
     CV_DNN_REGISTER_LAYER_CLASS(Identity,       BlankLayer);
     CV_DNN_REGISTER_LAYER_CLASS(Silence,        BlankLayer);
+    CV_DNN_REGISTER_LAYER_CLASS(Const,          ConstLayer);
 
     CV_DNN_REGISTER_LAYER_CLASS(Crop,           CropLayer);
     CV_DNN_REGISTER_LAYER_CLASS(Eltwise,        EltwiseLayer);
diff --git a/modules/dnn/src/layers/const_layer.cpp b/modules/dnn/src/layers/const_layer.cpp
new file mode 100644 (file)
index 0000000..339f2ec
--- /dev/null
@@ -0,0 +1,68 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+// Copyright (C) 2018, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+
+#include "../precomp.hpp"
+#include "layers_common.hpp"
+
+#ifdef HAVE_OPENCL
+#include "opencl_kernels_dnn.hpp"
+#endif
+
+namespace cv { namespace dnn {
+
+class ConstLayerImpl CV_FINAL : public ConstLayer
+{
+public:
+    ConstLayerImpl(const LayerParams& params)
+    {
+        setParamsFrom(params);
+        CV_Assert(blobs.size() == 1);
+    }
+
+    virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
+                                 const int requiredOutputs,
+                                 std::vector<MatShape> &outputs,
+                                 std::vector<MatShape> &internals) const CV_OVERRIDE
+    {
+        CV_Assert(inputs.empty());
+        outputs.assign(1, shape(blobs[0]));
+        return false;
+    }
+
+#ifdef HAVE_OPENCL
+    bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
+    {
+        std::vector<UMat> outputs;
+        outs.getUMatVector(outputs);
+        if (outs.depth() == CV_16S)
+            convertFp16(blobs[0], outputs[0]);
+        else
+            blobs[0].copyTo(outputs[0]);
+        return true;
+    }
+#endif
+
+    void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
+    {
+        CV_TRACE_FUNCTION();
+        CV_TRACE_ARG_VALUE(name, "name", name.c_str());
+
+        CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
+                   forward_ocl(inputs_arr, outputs_arr, internals_arr))
+
+        std::vector<Mat> outputs;
+        outputs_arr.getMatVector(outputs);
+        blobs[0].copyTo(outputs[0]);
+    }
+};
+
+Ptr<Layer> ConstLayer::create(const LayerParams& params)
+{
+    return Ptr<Layer>(new ConstLayerImpl(params));
+}
+
+}}  // namespace cv::dnn
index 4f38c02..a465b7d 100644 (file)
@@ -1266,14 +1266,31 @@ void TFImporter::populateNet(Net dstNet)
                 axis = toNCHW(axis);
             layerParams.set("axis", axis);
 
-            int id = dstNet.addLayer(name, "Concat", layerParams);
-            layer_id[name] = id;
-
-
+            // input(0) or input(n-1) is concat_dim
             int from = (type == "Concat" ? 1 : 0);
             int to = (type == "Concat" ? layer.input_size() : layer.input_size() - 1);
 
-            // input(0) or input(n-1) is concat_dim
+            for (int ii = from; ii < to; ii++)
+            {
+                Pin inp = parsePin(layer.input(ii));
+                if (layer_id.find(inp.name) == layer_id.end())
+                {
+                    // There are constant inputs.
+                    LayerParams lp;
+                    lp.name = inp.name;
+                    lp.type = "Const";
+                    lp.blobs.resize(1);
+                    blobFromTensor(getConstBlob(layer, value_id, ii), lp.blobs.back());
+                    CV_Assert_N(!lp.blobs[0].empty(), lp.blobs[0].type() == CV_32F);
+
+                    int constInpId = dstNet.addLayer(lp.name, lp.type, lp);
+                    layer_id[lp.name] = constInpId;
+                }
+            }
+
+            int id = dstNet.addLayer(name, "Concat", layerParams);
+            layer_id[name] = id;
+
             for (int ii = from; ii < to; ii++)
             {
                 Pin inp = parsePin(layer.input(ii));
index adb45b8..5f94477 100644 (file)
@@ -136,6 +136,7 @@ TEST_P(Test_TensorFlow_layers, padding)
     runTensorFlowNet("padding_same");
     runTensorFlowNet("padding_valid");
     runTensorFlowNet("spatial_padding");
+    runTensorFlowNet("keras_pad_concat");
 }
 
 TEST_P(Test_TensorFlow_layers, eltwise_add_mul)