From 7d727ac2fb527814af477210ac05237168812ae2 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Sat, 9 Jun 2018 18:06:53 +0300 Subject: [PATCH] Fuse top layers to batch normalization --- modules/dnn/src/layers/batch_norm_layer.cpp | 40 +++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp index d42face..3b47232 100644 --- a/modules/dnn/src/layers/batch_norm_layer.cpp +++ b/modules/dnn/src/layers/batch_norm_layer.cpp @@ -96,6 +96,46 @@ public: shift = bias_; } + virtual bool tryFuse(Ptr& top) CV_OVERRIDE + { + Mat w, b; + top->getScaleShift(w, b); + if (w.empty() && b.empty()) + return false; + + const int numChannels = weights_.total(); + const int numFusedWeights = w.total(); + const int numFusedBias = b.total(); + + if ((numFusedWeights != numChannels && numFusedWeights != 1 && !w.empty()) || + (numFusedBias != numChannels && numFusedBias != 1 && !b.empty())) + return false; + + if (!w.empty()) + { + w = w.reshape(1, 1); + if (numFusedWeights == 1) + { + multiply(weights_, w.at(0), weights_); + multiply(bias_, w.at(0), bias_); + } + else + { + multiply(weights_, w, weights_); + multiply(bias_, w, bias_); + } + } + if (!b.empty()) + { + b = b.reshape(1, 1); + if (numFusedBias == 1) + add(bias_, b.at(0), bias_); + else + add(bias_, b.reshape(1, 1), bias_); + } + return true; + } + bool getMemoryShapes(const std::vector &inputs, const int requiredOutputs, std::vector &outputs, -- 2.7.4