From 2c3c59d018a51d364abb41bc63dfa2c8ea28bc06 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Mon, 28 May 2018 18:13:32 +0300 Subject: [PATCH] Remove Shift deep learning layer --- modules/dnn/include/opencv2/dnn/all_layers.hpp | 2 +- modules/dnn/src/layers/scale_layer.cpp | 87 +++++++++++---- modules/dnn/src/layers/shift_layer.cpp | 145 ------------------------- modules/dnn/src/tensorflow/tf_importer.cpp | 34 ++++-- 4 files changed, 95 insertions(+), 173 deletions(-) delete mode 100644 modules/dnn/src/layers/shift_layer.cpp diff --git a/modules/dnn/include/opencv2/dnn/all_layers.hpp b/modules/dnn/include/opencv2/dnn/all_layers.hpp index f4e93b7..f2124dd 100644 --- a/modules/dnn/include/opencv2/dnn/all_layers.hpp +++ b/modules/dnn/include/opencv2/dnn/all_layers.hpp @@ -503,7 +503,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN class CV_EXPORTS ShiftLayer : public Layer { public: - static Ptr create(const LayerParams& params); + static Ptr create(const LayerParams& params); }; class CV_EXPORTS PriorBoxLayer : public Layer diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index f4b2bae..3c7d63a 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -28,6 +28,7 @@ public: setParamsFrom(params); hasBias = params.get("bias_term", false); axis = params.get("axis", 1); + hasWeights = false; } bool getMemoryShapes(const std::vector &inputs, @@ -35,11 +36,16 @@ public: std::vector &outputs, std::vector &internals) const CV_OVERRIDE { - CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == 1 + hasBias); outputs.assign(1, inputs[0]); return true; } + virtual void finalize(const std::vector &inputs, std::vector &outputs) CV_OVERRIDE + { + hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias); + CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == (int)hasWeights + (int)hasBias); + } + virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_DEFAULT || @@ -63,10 +69,15 @@ public: Mat &inpBlob = *inputs[0]; Mat &outBlob = outputs[0]; - Mat &weights = blobs.empty() ? *inputs[1] : blobs[0]; - Mat bias = hasBias ? blobs.back() : Mat(); + // There is a mode when we multiply a first blob by a second one + // instead of trainable weights. + Mat weights = blobs.empty() ? *inputs[1] : (hasWeights ? blobs[0] : Mat()); + Mat bias = hasBias ? blobs.back().reshape(1, 1) : Mat(); + if (!weights.empty()) + weights = weights.reshape(1, 1); MatShape inpShape = shape(inpBlob); - const int numWeights = weights.total(); + const int numWeights = !weights.empty() ? weights.total() : bias.total(); + CV_Assert(numWeights != 0, !hasWeights || !hasBias || weights.total() == bias.total()); int endAxis; for (endAxis = axis + 1; endAxis <= inpBlob.dims; ++endAxis) @@ -84,15 +95,15 @@ public: if (endAxis != inpBlob.dims) { - float* weightsData = (float*)weights.data; + float* weightsData = !weights.empty() ? (float*)weights.data : 0; float* biasesData = hasBias ? (float*)bias.data : 0; int spatialSize = total(inpShape, endAxis); // spatialSize != 1 for (int i = 0; i < numSlices; ++i) { for (int j = 0; j < numWeights; ++j) { - float w = weightsData[j]; - float b = hasBias ? biasesData[j] : 0; + float w = weightsData ? weightsData[j] : 1; + float b = biasesData ? biasesData[j] : 0; Mat inpSlice(1, spatialSize, CV_32F, inpData); Mat outSlice(1, spatialSize, CV_32F, outData); inpSlice.convertTo(outSlice, CV_32F, w, b); @@ -105,12 +116,16 @@ public: { for (int i = 0; i < numSlices; ++i) { - Mat inpSlice(weights.dims, weights.size, CV_32F, inpData); - Mat outSlice(weights.dims, weights.size, CV_32F, outData); - multiply(inpSlice, weights, outSlice); - if (hasBias) - add(outSlice, bias, outSlice); - + Mat inpSlice(1, numWeights, CV_32F, inpData); + Mat outSlice(1, numWeights, CV_32F, outData); + if (!weights.empty()) + { + multiply(inpSlice, weights, outSlice); + if (hasBias) + add(outSlice, bias, outSlice); + } + else if (hasBias) + add(inpSlice, bias, outSlice); inpData += numWeights; outData += numWeights; } @@ -157,11 +172,15 @@ public: const int numChannels = blobs[0].total(); - auto weights = wrapToHalideBuffer(blobs[0], {numChannels}); - Halide::Expr topExpr = input * weights(c); + Halide::Expr topExpr = input; + if (hasWeights) + { + auto weights = wrapToHalideBuffer(blobs[0], {numChannels}); + topExpr *= weights(c); + } if (hasBias) { - auto bias = wrapToHalideBuffer(blobs[1], {numChannels}); + auto bias = wrapToHalideBuffer(blobs.back(), {numChannels}); topExpr += bias(c); } top(x, y, c, n) = topExpr; @@ -178,10 +197,24 @@ public: lp.precision = InferenceEngine::Precision::FP32; std::shared_ptr ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); + CV_Assert(!blobs.empty()); const size_t numChannels = blobs[0].total(); - ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C); + if (hasWeights) + { + ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C); + } + else + { + auto weights = InferenceEngine::make_shared_blob(InferenceEngine::Precision::FP32, + {numChannels}); + weights->allocate(); + + std::vector ones(numChannels, 1); + weights->set(ones); + ieLayer->_weights = weights; + } if (hasBias) - ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {numChannels}, InferenceEngine::Layout::C); + ieLayer->_biases = wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C); return Ptr(new InfEngineBackendNode(ieLayer)); #endif // HAVE_INF_ENGINE @@ -190,8 +223,8 @@ public: void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE { - scale = !blobs.empty() ? blobs[0] : Mat(); - shift = hasBias ? blobs[1] : Mat(); + scale = hasWeights ? blobs[0] : Mat(); + shift = hasBias ? blobs.back() : Mat(); } virtual int64 getFLOPS(const std::vector &inputs, @@ -205,6 +238,9 @@ public: } return flops; } + +private: + bool hasWeights; }; @@ -213,5 +249,16 @@ Ptr ScaleLayer::create(const LayerParams& params) return Ptr(new ScaleLayerImpl(params)); } +Ptr ShiftLayer::create(const LayerParams& params) +{ + LayerParams scaleParams; + scaleParams.name = params.name; + scaleParams.type = "Scale"; + scaleParams.blobs = params.blobs; + scaleParams.set("bias_term", true); + scaleParams.set("axis", 0); + return Ptr(new ScaleLayerImpl(scaleParams)); +} + } // namespace dnn } // namespace cv diff --git a/modules/dnn/src/layers/shift_layer.cpp b/modules/dnn/src/layers/shift_layer.cpp deleted file mode 100644 index 7c3bb14..0000000 --- a/modules/dnn/src/layers/shift_layer.cpp +++ /dev/null @@ -1,145 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -// Copyright (C) 2016, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. - -/* -Implementation of shift layer, which adds up const values to blob. -*/ - -#include "../precomp.hpp" -#include "../op_inf_engine.hpp" -#include - -namespace cv -{ -namespace dnn -{ - -class ShiftLayerImpl CV_FINAL : public ShiftLayer -{ -public: - ShiftLayerImpl(const LayerParams ¶ms) - { - setParamsFrom(params); - CV_Assert(blobs.size() == 1); - } - - virtual bool supportBackend(int backendId) CV_OVERRIDE - { - return backendId == DNN_BACKEND_DEFAULT || - backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine(); - } - - bool getMemoryShapes(const std::vector &inputs, - const int requiredOutputs, - std::vector &outputs, - std::vector &internals) const CV_OVERRIDE - { - Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals); - internals.assign(1, shape(1, total(inputs[0], 2))); - return true; - } - - void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - CV_TRACE_ARG_VALUE(name, "name", name.c_str()); - - Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr); - } - - virtual void forward(std::vector &inputs, std::vector &outputs, std::vector &internals) CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - CV_TRACE_ARG_VALUE(name, "name", name.c_str()); - - CV_Assert(inputs.size() > 0); - CV_Assert(blobs.size() > 0); - - if(inputs[0]->dims == blobs[0].dims) - { - for (size_t ii = 0; ii < outputs.size(); ii++) - { - Mat &inpBlob = *inputs[ii]; - Mat &outBlob = outputs[ii]; - - outBlob = inpBlob + blobs[0]; - } - } - else - { - Mat biasOnesMat = internals[0]; - biasOnesMat.setTo(1); - for (size_t ii = 0; ii < outputs.size(); ii++) - { - Mat &inpBlob = *inputs[ii]; - Mat &outBlob = outputs[ii]; - - inpBlob.copyTo(outBlob); - - for (int n = 0; n < inpBlob.size[0]; n++) - { - Mat dstMat(inpBlob.size[1], inpBlob.size[2] * inpBlob.size[3], - outBlob.type(), outBlob.ptr(n)); - gemm(blobs[0], biasOnesMat, 1, dstMat, 1, dstMat); //TODO: gemv - } - } - } - } - - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { -#ifdef HAVE_INF_ENGINE - // Inference Engine has no layer just for biases. Create a linear - // transformation layer with ones weights. - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "ScaleShift"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); - - auto weights = InferenceEngine::make_shared_blob(InferenceEngine::Precision::FP32, - {blobs[0].total()}); - weights->allocate(); - - std::vector ones(blobs[0].total(), 1); - weights->set(ones); - ieLayer->_weights = weights; - - ieLayer->_biases = wrapToInfEngineBlob(blobs[0]); - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif // HAVE_INF_ENGINE - return Ptr(); - } - - void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE - { - scale = Mat(); - shift = blobs[0]; - } - - virtual int64 getFLOPS(const std::vector &inputs, - const std::vector &outputs) const CV_OVERRIDE - { - (void)outputs; // suppress unused variable warning - long flops = 0; - - for(int i= 0; i < inputs.size(); i++) - { - flops += total(inputs[i]); - } - - return flops; - } -}; - -Ptr ShiftLayer::create(const LayerParams& params) -{ - return Ptr(new ShiftLayerImpl(params)); -} - -} -} diff --git a/modules/dnn/src/tensorflow/tf_importer.cpp b/modules/dnn/src/tensorflow/tf_importer.cpp index efedbce..195b516 100644 --- a/modules/dnn/src/tensorflow/tf_importer.cpp +++ b/modules/dnn/src/tensorflow/tf_importer.cpp @@ -743,10 +743,20 @@ void TFImporter::populateNet(Net dstNet) if (haveConst) { - layerParams.blobs.resize(1); - blobFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]); + Mat values = getTensorContent(getConstBlob(layer, value_id)); + CV_Assert(values.type() == CV_32FC1); - int id = dstNet.addLayer(name, "Shift", layerParams); + int id; + if (values.total() == 1) // is a scalar. + { + layerParams.set("shift", values.at(0)); + id = dstNet.addLayer(name, "Power", layerParams); + } + else // is a vector + { + layerParams.blobs.resize(1, values); + id = dstNet.addLayer(name, "Shift", layerParams); + } layer_id[name] = id; // one input only @@ -777,11 +787,21 @@ void TFImporter::populateNet(Net dstNet) } CV_Assert(haveConst); - layerParams.blobs.resize(1); - blobFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]); - layerParams.blobs[0] *= -1; + Mat values = getTensorContent(getConstBlob(layer, value_id)); + CV_Assert(values.type() == CV_32FC1); + values *= -1.0f; - int id = dstNet.addLayer(name, "Shift", layerParams); + int id; + if (values.total() == 1) // is a scalar. + { + layerParams.set("shift", values.at(0)); + id = dstNet.addLayer(name, "Power", layerParams); + } + else // is a vector + { + layerParams.blobs.resize(1, values); + id = dstNet.addLayer(name, "Shift", layerParams); + } layer_id[name] = id; // one input only -- 2.7.4