Fuse multipliers but not convolution layers weights
authorDmitry Kurtaev <dmitry.kurtaev+github@gmail.com>
Fri, 4 May 2018 09:09:06 +0000 (12:09 +0300)
committerDmitry Kurtaev <dmitry.kurtaev+github@gmail.com>
Thu, 10 May 2018 16:24:38 +0000 (19:24 +0300)
modules/dnn/src/caffe/caffe_io.cpp
modules/dnn/src/layers/convolution_layer.cpp

index 02d1aa1..730c752 100644 (file)
@@ -270,6 +270,7 @@ void UpgradeV0PaddingLayers(const NetParameter& param,
 
 bool UpgradeV0LayerParameter(V1LayerParameter* v0_layer_connection_,
                              V1LayerParameter* layer_param) {
+  CV_Assert(v0_layer_connection_ != NULL);
   const V1LayerParameter& v0_layer_connection = *v0_layer_connection_;
   bool is_fully_compatible = true;
   layer_param->Clear();
@@ -791,6 +792,7 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
 
 bool UpgradeV1Net(NetParameter* net_param) {
   // V1LayerParameter layers -> LayerParameter layer
+  CV_Assert(net_param != NULL);
   bool is_fully_compatible = true;
   if (net_param->layer_size() > 0) {
     LOG(ERROR) << "Input NetParameter to be upgraded already specifies 'layer' "
@@ -834,6 +836,7 @@ void UpgradeNetBatchNorm(NetParameter* net_param) {
 
 bool UpgradeV1LayerParameter(V1LayerParameter* v1_layer_param_,
                              LayerParameter* layer_param) {
+  CV_Assert(v1_layer_param_ != NULL);
   const V1LayerParameter& v1_layer_param = *v1_layer_param_;
   layer_param->Clear();
   bool is_fully_compatible = true;
index 31b4975..2bb96f9 100644 (file)
@@ -169,7 +169,8 @@ class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
 {
 public:
     enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F };
-    Mat weightsMat, weightsMat_doubles;
+    Mat weightsMat;
+    std::vector<double> weightsMultipliers;
     std::vector<float> biasvec;
     std::vector<float> reluslope;
     Ptr<ActivationLayer> activ;
@@ -259,7 +260,7 @@ public:
             wm = wm_aligned;
         }
         weightsMat = wm;
-        weightsMat.convertTo(weightsMat_doubles, CV_64F);
+        weightsMultipliers.assign(outCn, 1.0);
 
         Mat biasMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat();
         biasvec.resize(outCn+2);
@@ -335,13 +336,14 @@ public:
 
         if (!w.empty())
         {
+            Mat originWeights = blobs[0].reshape(1, outCn);
             for (int i = 0; i < outCn; ++i)
             {
                 double wi = w.at<float>(i);
-                cv::multiply(slice(weightsMat_doubles, i), wi, slice(weightsMat_doubles, i));
+                weightsMultipliers[i] *= wi;
+                cv::multiply(originWeights.row(i), weightsMultipliers[i], weightsMat.row(i));
                 biasvec[i] *= wi;
             }
-            weightsMat_doubles.convertTo(weightsMat, weightsMat.type());
         }
 
         if (!b.empty())