Merge remote-tracking branch 'upstream/3.4' into merge-3.4
authorAlexander Alekhin <alexander.a.alekhin@gmail.com>
Sat, 19 Jun 2021 18:36:27 +0000 (18:36 +0000)
committerAlexander Alekhin <alexander.a.alekhin@gmail.com>
Sat, 19 Jun 2021 18:44:16 +0000 (18:44 +0000)
1  2 
modules/core/src/norm.cpp
modules/core/test/test_arithm.cpp
modules/dnn/src/tensorflow/tf_importer.cpp
modules/features2d/include/opencv2/features2d.hpp
modules/features2d/src/draw.cpp
modules/highgui/src/window_gtk.cpp

@@@ -430,9 -430,6 +430,9 @@@ static bool ocl_norm( InputArray _src, 
      bool doubleSupport = d.doubleFPConfig() > 0,
              haveMask = _mask.kind() != _InputArray::NONE;
  
 +    if (depth >= CV_16F)
 +        return false;  // TODO: support FP16
 +
      if ( !(normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR) ||
           (!doubleSupport && depth == CV_64F))
          return false;
@@@ -694,7 -691,7 +694,7 @@@ double norm( InputArray _src, int normT
          return result;
      }
  
 -    NormFunc func = getNormFunc(normType >> 1, depth);
 +    NormFunc func = getNormFunc(normType >> 1, depth == CV_16F ? CV_32F : depth);
      CV_Assert( func != 0 );
  
      const Mat* arrays[] = {&src, &mask, 0};
              }
          }
      }
 +    else if (depth == CV_16F)
 +    {
 +        const size_t esz = src.elemSize();
 +        const int total = (int)it.size;
 +        const int blockSize = std::min(total, divUp(1024, cn));
 +        AutoBuffer<float, 1026/*divUp(1024,3)*3*/> fltbuf(blockSize * cn);
 +        float* data0 = fltbuf.data();
 +        for (size_t i = 0; i < it.nplanes; i++, ++it)
 +        {
 +            for (int j = 0; j < total; j += blockSize)
 +            {
 +                int bsz = std::min(total - j, blockSize);
 +                hal::cvt16f32f((const float16_t*)ptrs[0], data0, bsz * cn);
 +                func((uchar*)data0, ptrs[1], (uchar*)&result.d, bsz, cn);
 +                ptrs[0] += bsz*esz;
 +                if (ptrs[1])
 +                    ptrs[1] += bsz;
 +            }
 +        }
 +    }
      else
      {
          // generic implementation
  
      if( normType == NORM_INF )
      {
 -        if( depth == CV_64F )
 +        if(depth == CV_64F || depth == CV_16F)
              return result.d;
 -        else if( depth == CV_32F )
 +        else if (depth == CV_32F)
              return result.f;
          else
              return result.i;
@@@ -1171,7 -1148,7 +1171,7 @@@ double norm( InputArray _src1, InputArr
          return result;
      }
  
 -    NormDiffFunc func = getNormDiffFunc(normType >> 1, depth);
 +    NormDiffFunc func = getNormDiffFunc(normType >> 1, depth == CV_16F ? CV_32F : depth);
      CV_Assert( func != 0 );
  
      const Mat* arrays[] = {&src1, &src2, &mask, 0};
          // special case to handle "integer" overflow in accumulator
          const size_t esz = src1.elemSize();
          const int total = (int)it.size;
-         const int intSumBlockSize = normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15);
+         const int intSumBlockSize = (normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15))/cn;
          const int blockSize = std::min(total, intSumBlockSize);
          int isum = 0;
          int count = 0;
              }
          }
      }
 +    else if (depth == CV_16F)
 +    {
 +        const size_t esz = src1.elemSize();
 +        const int total = (int)it.size;
 +        const int blockSize = std::min(total, divUp(512, cn));
 +        AutoBuffer<float, 1026/*divUp(512,3)*3*2*/> fltbuf(blockSize * cn * 2);
 +        float* data0 = fltbuf.data();
 +        float* data1 = fltbuf.data() + blockSize * cn;
 +        for (size_t i = 0; i < it.nplanes; i++, ++it)
 +        {
 +            for (int j = 0; j < total; j += blockSize)
 +            {
 +                int bsz = std::min(total - j, blockSize);
 +                hal::cvt16f32f((const float16_t*)ptrs[0], data0, bsz * cn);
 +                hal::cvt16f32f((const float16_t*)ptrs[1], data1, bsz * cn);
 +                func((uchar*)data0, (uchar*)data1, ptrs[2], (uchar*)&result.d, bsz, cn);
 +                ptrs[0] += bsz*esz;
 +                ptrs[1] += bsz*esz;
 +                if (ptrs[2])
 +                    ptrs[2] += bsz;
 +            }
 +        }
 +    }
      else
      {
          // generic implementation
  
      if( normType == NORM_INF )
      {
 -        if( depth == CV_64F )
 +        if (depth == CV_64F || depth == CV_16F)
              return result.d;
 -        else if( depth == CV_32F )
 +        else if (depth == CV_32F)
              return result.f;
          else
              return result.u;
@@@ -1271,15 -1225,15 +1271,15 @@@ cv::Hamming::ResultType Hamming::operat
      return cv::hal::normHamming(a, b, size);
  }
  
 -double PSNR(InputArray _src1, InputArray _src2)
 +double PSNR(InputArray _src1, InputArray _src2, double R)
  {
      CV_INSTRUMENT_REGION();
  
      //Input arrays must have depth CV_8U
 -    CV_Assert( _src1.depth() == CV_8U && _src2.depth() == CV_8U );
 +    CV_Assert( _src1.type() == _src2.type() );
  
      double diff = std::sqrt(norm(_src1, _src2, NORM_L2SQR)/(_src1.total()*_src1.channels()));
 -    return 20*log10(255./(diff+DBL_EPSILON));
 +    return 20*log10(R/(diff+DBL_EPSILON));
  }
  
  
@@@ -476,7 -476,7 +476,7 @@@ struct CopyOp : public BaseElemWiseO
      }
      int getRandomType(RNG& rng)
      {
 -        return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL, 1, ARITHM_MAX_CHANNELS);
 +        return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_16F, 1, ARITHM_MAX_CHANNELS);
      }
      double getMaxErr(int)
      {
@@@ -498,7 -498,7 +498,7 @@@ struct SetOp : public BaseElemWiseO
      }
      int getRandomType(RNG& rng)
      {
 -        return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL, 1, ARITHM_MAX_CHANNELS);
 +        return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_16F, 1, ARITHM_MAX_CHANNELS);
      }
      double getMaxErr(int)
      {
@@@ -1847,54 -1847,13 +1847,54 @@@ INSTANTIATE_TEST_CASE_P(Arithm, Subtrac
      testing::Values(-1, CV_16S, CV_32S, CV_32F),
      testing::Bool()));
  
 -TEST(Core_FindNonZero, singular)
 +TEST(Core_FindNonZero, regression)
  {
      Mat img(10, 10, CV_8U, Scalar::all(0));
 -    vector<Point> pts, pts2(10);
 +    vector<Point> pts, pts2(5);
      findNonZero(img, pts);
      findNonZero(img, pts2);
      ASSERT_TRUE(pts.empty() && pts2.empty());
 +
 +    RNG rng((uint64)-1);
 +    size_t nz = 0;
 +    for( int i = 0; i < 10; i++ )
 +    {
 +        int idx = rng.uniform(0, img.rows*img.cols);
 +        if( !img.data[idx] ) nz++;
 +        img.data[idx] = (uchar)rng.uniform(1, 256);
 +    }
 +    findNonZero(img, pts);
 +    ASSERT_TRUE(pts.size() == nz);
 +
 +    img.convertTo( img, CV_8S );
 +    pts.clear();
 +    findNonZero(img, pts);
 +    ASSERT_TRUE(pts.size() == nz);
 +
 +    img.convertTo( img, CV_16U );
 +    pts.resize(pts.size()*2);
 +    findNonZero(img, pts);
 +    ASSERT_TRUE(pts.size() == nz);
 +
 +    img.convertTo( img, CV_16S );
 +    pts.resize(pts.size()*3);
 +    findNonZero(img, pts);
 +    ASSERT_TRUE(pts.size() == nz);
 +
 +    img.convertTo( img, CV_32S );
 +    pts.resize(pts.size()*4);
 +    findNonZero(img, pts);
 +    ASSERT_TRUE(pts.size() == nz);
 +
 +    img.convertTo( img, CV_32F );
 +    pts.resize(pts.size()*5);
 +    findNonZero(img, pts);
 +    ASSERT_TRUE(pts.size() == nz);
 +
 +    img.convertTo( img, CV_64F );
 +    pts.clear();
 +    findNonZero(img, pts);
 +    ASSERT_TRUE(pts.size() == nz);
  }
  
  TEST(Core_BoolVector, support)
@@@ -2021,14 -1980,6 +2021,14 @@@ TEST(Compare, regression_8999
      EXPECT_THROW(cv::compare(A, B, C, CMP_LT), cv::Exception);
  }
  
 +TEST(Compare, regression_16F_do_not_crash)
 +{
 +    cv::Mat mat1(2, 2, CV_16F, cv::Scalar(1));
 +    cv::Mat mat2(2, 2, CV_16F, cv::Scalar(2));
 +    cv::Mat dst;
 +    EXPECT_THROW(cv::compare(mat1, mat2, dst, cv::CMP_EQ), cv::Exception);
 +}
 +
  
  TEST(Core_minMaxIdx, regression_9207_1)
  {
@@@ -2166,6 -2117,15 +2166,15 @@@ TEST(Core_Norm, IPP_regression_NORM_L1_
      EXPECT_EQ((double)20*cn, cv::norm(a, b, NORM_L1, mask));
  }
  
+ TEST(Core_Norm, NORM_L2_8UC4)
+ {
+     // Tests there is no integer overflow in norm computation for multiple channels.
+     const int kSide = 100;
+     cv::Mat4b a(kSide, kSide, cv::Scalar(255, 255, 255, 255));
+     cv::Mat4b b = cv::Mat4b::zeros(kSide, kSide);
+     const double kNorm = 2.*kSide*255.;
+     EXPECT_EQ(kNorm, cv::norm(a, b, NORM_L2));
+ }
  
  TEST(Core_ConvertTo, regression_12121)
  {
@@@ -2294,7 -2254,6 +2303,7 @@@ template <typename T> static inlin
  void testDivideChecks(const Mat& dst)
  {
      ASSERT_FALSE(dst.empty());
 +    CV_StaticAssert(std::numeric_limits<T>::is_integer, "");
      for (int y = 0; y < dst.rows; y++)
      {
          for (int x = 0; x < dst.cols; x++)
  }
  
  template <typename T> static inline
 +void testDivideChecksFP(const Mat& dst)
 +{
 +    ASSERT_FALSE(dst.empty());
 +    CV_StaticAssert(!std::numeric_limits<T>::is_integer, "");
 +    for (int y = 0; y < dst.rows; y++)
 +    {
 +        for (int x = 0; x < dst.cols; x++)
 +        {
 +            if ((y % 3) == 0 && (x % 4) == 2)
 +            {
 +                EXPECT_TRUE(cvIsNaN(dst.at<T>(y, x))) << "dst(" << y << ", " << x << ") = " << dst.at<T>(y, x);
 +            }
 +            else if ((x % 4) == 2)
 +            {
 +                EXPECT_TRUE(cvIsInf(dst.at<T>(y, x))) << "dst(" << y << ", " << x << ") = " << dst.at<T>(y, x);
 +            }
 +            else
 +            {
 +                EXPECT_FALSE(cvIsNaN(dst.at<T>(y, x))) << "dst(" << y << ", " << x << ") = " << dst.at<T>(y, x);
 +                EXPECT_FALSE(cvIsInf(dst.at<T>(y, x))) << "dst(" << y << ", " << x << ") = " << dst.at<T>(y, x);
 +            }
 +        }
 +    }
 +}
 +
 +template <> inline void testDivideChecks<float>(const Mat& dst) { testDivideChecksFP<float>(dst); }
 +template <> inline void testDivideChecks<double>(const Mat& dst) { testDivideChecksFP<double>(dst); }
 +
 +
 +template <typename T> static inline
  void testDivide(bool isUMat, double scale, bool largeSize, bool tailProcessing, bool roi)
  {
      Mat src1, src2;
@@@ -30,7 -30,7 +30,7 @@@ Implementation of Tensorflow models par
  
  namespace cv {
  namespace dnn {
 -CV__DNN_EXPERIMENTAL_NS_BEGIN
 +CV__DNN_INLINE_NS_BEGIN
  
  #if HAVE_PROTOBUF
  
@@@ -510,2051 -510,2274 +510,2274 @@@ protected
  
  private:
      void addPermuteLayer(const int* order, const std::string& permName, Pin& inpId);
+     typedef void (TFImporter::*TFImporterNodeParser)(tensorflow::GraphDef&, const tensorflow::NodeDef&, LayerParams&);
+     typedef std::map<std::string, TFImporterNodeParser> DispatchMap;
+     const DispatchMap dispatch;
+     static const DispatchMap buildDispatchMap();
+     void parseConvolution        (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseBias               (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseMatMul             (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseReshape            (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseFlatten            (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseTranspose          (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseConstant           (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseLrn                (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseConcat             (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseMaxPool            (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseAvgPool            (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseMaxPoolGrad        (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parsePlaceholder        (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseSplit              (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseSlice              (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseStridedSlice       (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseMul                (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseFusedBatchNorm     (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseConv2DBackpropInput(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseBlockLSTM          (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseResize             (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseL2Normalize        (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parsePriorBox           (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseSoftmax            (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseCropAndResize      (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseMean               (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parsePack               (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseClipByValue        (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseLeakyRelu          (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseActivation         (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
+     void parseCustomLayer        (tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams);
  };
  
- TFImporter::TFImporter(Net& net, const char *model, const char *config)
-     : dstNet(net)
+ const TFImporter::DispatchMap TFImporter::buildDispatchMap()
  {
-     if (model && model[0])
-     {
-         CV_LOG_DEBUG(NULL, "DNN/TF: processing TensorFlow model from file: " << model);
-         ReadTFNetParamsFromBinaryFileOrDie(model, &netBin);
-     }
-     if (config && config[0])
-     {
-         CV_LOG_DEBUG(NULL, "DNN/TF: processing TensorFlow config from file: " << config);
-         ReadTFNetParamsFromTextFileOrDie(config, &netTxt);
-     }
-     populateNet();
+     static DispatchMap dispatch;
+     dispatch["Conv2D"] = dispatch["SpaceToBatchND"] = dispatch["DepthwiseConv2dNative"] =
+             dispatch["Pad"] = dispatch["MirrorPad"] = dispatch["Conv3D"] = &TFImporter::parseConvolution;
+     dispatch["BiasAdd"] = dispatch["Add"] = dispatch["AddV2"] = dispatch["Sub"] = dispatch["AddN"] = &TFImporter::parseBias;
+     dispatch["MatMul"] = &TFImporter::parseMatMul;
+     dispatch["Reshape"] = &TFImporter::parseReshape;
+     dispatch["Flatten"] = dispatch["Squeeze"] = &TFImporter::parseFlatten;
+     dispatch["Transpose"] = &TFImporter::parseTranspose;
+     dispatch["Const"] = &TFImporter::parseConstant;
+     dispatch["LRN"] = &TFImporter::parseLrn;
+     dispatch["Concat"] = dispatch["ConcatV2"] = &TFImporter::parseConcat;
+     dispatch["MaxPool"] = dispatch["MaxPool3D"] = &TFImporter::parseMaxPool;
+     dispatch["AvgPool"] = dispatch["AvgPool3D"] = &TFImporter::parseAvgPool;
+     dispatch["MaxPoolGrad"] = &TFImporter::parseMaxPoolGrad;
+     dispatch["Placeholder"] = &TFImporter::parsePlaceholder;
+     dispatch["Split"] = &TFImporter::parseSplit;
+     dispatch["Slice"] = &TFImporter::parseSlice;
+     dispatch["StridedSlice"] = &TFImporter::parseStridedSlice;
+     dispatch["Mul"] = dispatch["RealDiv"] = &TFImporter::parseMul;
+     dispatch["FusedBatchNorm"] = dispatch["FusedBatchNormV3"] = &TFImporter::parseFusedBatchNorm;
+     dispatch["Conv2DBackpropInput"] = &TFImporter::parseConv2DBackpropInput;
+     dispatch["BlockLSTM"] = &TFImporter::parseBlockLSTM;
+     dispatch["ResizeNearestNeighbor"] = dispatch["ResizeBilinear"] = dispatch["FusedResizeAndPadConv2D"] = &TFImporter::parseResize;
+     dispatch["L2Normalize"] = &TFImporter::parseL2Normalize;
+     dispatch["PriorBox"] = &TFImporter::parsePriorBox;
+     dispatch["Softmax"] = &TFImporter::parseSoftmax;
+     dispatch["CropAndResize"] = &TFImporter::parseCropAndResize;
+     dispatch["Mean"] = dispatch["Sum"] = &TFImporter::parseMean;
+     dispatch["Pack"] = &TFImporter::parsePack;
+     dispatch["ClipByValue"] = &TFImporter::parseClipByValue;
+     dispatch["LeakyRelu"] = &TFImporter::parseLeakyRelu;
+     dispatch["Abs"] = dispatch["Tanh"] = dispatch["Sigmoid"] = dispatch["Relu"] =
+             dispatch["Elu"] = dispatch["Exp"] = dispatch["Identity"] = dispatch["Relu6"] = &TFImporter::parseActivation;
+     return dispatch;
  }
  
- TFImporter::TFImporter(
-         Net& net,
-         const char *dataModel, size_t lenModel,
-         const char *dataConfig, size_t lenConfig
- )
-     : dstNet(net)
+ void TFImporter::parseConvolution(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer_, LayerParams& layerParams)
  {
-     if (dataModel != NULL && lenModel > 0)
-     {
-         CV_LOG_DEBUG(NULL, "DNN/TF: processing TensorFlow model from memory (" << lenModel << " bytes)");
-         ReadTFNetParamsFromBinaryBufferOrDie(dataModel, lenModel, &netBin);
-     }
-     if (dataConfig != NULL && lenConfig > 0)
+     tensorflow::NodeDef layer = layer_;
+     std::string name = layer.name();
+     std::string type = layer.op();
+     int num_inputs = layer.input_size();
+     CV_CheckGT(num_inputs, 0, "");
+     // The first node of dilated convolution subgraph.
+     // Extract input node, dilation rate and paddings.
+     std::string input = layer.input(0);
+     StrIntVector next_layers;
+     if (type == "SpaceToBatchND" || type == "Pad")
      {
-         CV_LOG_DEBUG(NULL, "DNN/TF: processing TensorFlow config from memory (" << lenConfig << " bytes)");
-         ReadTFNetParamsFromTextBufferOrDie(dataConfig, lenConfig, &netTxt);
+         next_layers = getNextLayers(net, name, "Conv2D");
+         if (next_layers.empty())
+             next_layers = getNextLayers(net, name, "DepthwiseConv2dNative");
      }
-     populateNet();
- }
- void TFImporter::kernelFromTensor(const tensorflow::TensorProto &tensor, Mat &dstBlob)
- {
-     MatShape shape;
-     blobShapeFromTensor(tensor, shape);
-     int dims = (int)shape.size();
-     // TODO: other blob types
-     CV_Assert(tensor.dtype() == tensorflow::DT_FLOAT ||
-               tensor.dtype() == tensorflow::DT_HALF);
-     CV_Assert(dims == 4 || dims == 5);
  
-     int out_c, input_c, depth, height, width;
-     if (dims == 4)
+     if (type == "SpaceToBatchND")
      {
-         // REORDER kernel HWIO to OIHW
-         swap(shape[0], shape[2]); // IWHO
-         swap(shape[1], shape[3]); // IOHW
-         swap(shape[0], shape[1]); // OIHW
-         depth = 1; height = shape[2]; width = shape[3];
+         // op: "SpaceToBatchND"
+         // input: "input"
+         // input: "SpaceToBatchND/block_shape"
+         // input: "SpaceToBatchND/paddings"
+         CV_CheckEQ(num_inputs, 3, "");
+         DictValue dilation = parseDims(getConstBlob(layer, value_id, 1));
+         CV_Assert(dilation.size() == 2);
+         layerParams.set("dilation_h", dilation.get<int>(0));
+         layerParams.set("dilation_w", dilation.get<int>(1));
+         Mat paddings;
+         parseTensor<int>(getConstBlob(layer, value_id, 2), paddings);
+         // paddings is a 2x2 matrix: [[top, bot], [left, right]]
+         layerParams.set("pad_h", paddings.at<float>(0));
+         layerParams.set("pad_w", paddings.at<float>(2));
+         CV_Assert(next_layers.size() == 1);
+         layers_to_ignore.insert(next_layers[0].first);
+         // FIXIT don't override, rewrite this code
+         layer = net.node(next_layers[0].second);
+         name = layer.name();
+         type = layer.op();
+         num_inputs = layer.input_size();
+         CV_LOG_DEBUG(NULL, "DNN/TF:     switched to layer " << name << " @ " << type << ") with " << num_inputs << " inputs");
      }
-     else
+     else if (type == "Pad" || type == "MirrorPad")
      {
-         // REORDER kernel DHWIO to OIDHW
-         swap(shape[0], shape[4]); // OHWID
-         swap(shape[1], shape[3]); // OIWHD
-         swap(shape[2], shape[4]); // OIDHW
-         depth = shape[2]; height = shape[3]; width = shape[4];
-     }
-     out_c = shape[0]; input_c = shape[1];
+         Mat paddings = getTensorContent(getConstBlob(layer, value_id, 1));
+         CV_Assert(paddings.type() == CV_32SC1);
+         if (paddings.total() == 8)
+         {
+             // Perhaps, we have NHWC padding dimensions order.
+             //  N    H    W    C
+             // 0 1  2 3  4 5  6 7
+             std::swap(paddings.at<int32_t>(2), paddings.at<int32_t>(6));
+             std::swap(paddings.at<int32_t>(3), paddings.at<int32_t>(7));
+             //  N    C    W    H
+             // 0 1  2 3  4 5  6 7
+             std::swap(paddings.at<int32_t>(4), paddings.at<int32_t>(6));
+             std::swap(paddings.at<int32_t>(5), paddings.at<int32_t>(7));
+             //  N    C    H    W
+             // 0 1  2 3  4 5  6 7
+         }
  
-     dstBlob.create(shape, CV_32F);
+         if (next_layers.empty() || paddings.total() != 8 ||
+             paddings.at<int32_t>(4) != paddings.at<int32_t>(5) ||
+             paddings.at<int32_t>(6) != paddings.at<int32_t>(7) || type == "MirrorPad")
+         {
+             // Just a single padding layer.
+             layerParams.set("paddings", DictValue::arrayInt<int*>((int*)paddings.data, paddings.total()));
+             if (type == "MirrorPad")
+                 layerParams.set("type", "reflect");
  
-     Mat tensorContent = getTensorContent(tensor, /*no copy*/false);
-     int size = tensorContent.total();
-     CV_Assert(size == (int)dstBlob.total());
+             int id = dstNet.addLayer(name, "Padding", layerParams);
+             layer_id[name] = id;
  
-     float *dstData = dstBlob.ptr<float>();
-     const float *data = reinterpret_cast<const float*>(tensorContent.data);
+             connect(layer_id, dstNet, parsePin(input), id, 0);
+             return;
+         }
+         else
+         {
+             // Merge with subsequent convolutional layer.
+             CV_Assert(next_layers.size() == 1);
  
-     int total = out_c * input_c * depth * height * width;
-     for (int i_oc = 0; i_oc < out_c; i_oc++) {
-         for (int i_ic = 0; i_ic < input_c; i_ic++) {
-             for (int i_d = 0; i_d < depth; i_d++) {
-                 for (int i_h = 0; i_h < height; i_h++) {
-                     for (int i_w = 0; i_w < width; i_w++) {
-                         int dst_i = input_c * depth * height * width * i_oc +
-                                     depth * height * width * i_ic + height * width * i_d + width * i_h + i_w;
-                         int src_i = out_c * input_c * width * height * i_d +
-                                     out_c * input_c * width * i_h + out_c * input_c * i_w + out_c * i_ic + i_oc;
-                         CV_Assert(dst_i < total);
-                         CV_Assert(src_i < total);
-                        dstData[dst_i] = data[src_i];
-                    }
-                 }
-             }
+             layerParams.set("pad_h", paddings.at<int32_t>(4));
+             layerParams.set("pad_w", paddings.at<int32_t>(6));
+             layers_to_ignore.insert(next_layers[0].first);
+             // FIXIT don't override, rewrite this code
+             layer = net.node(next_layers[0].second);
+             name = layer.name();
+             type = layer.op();
+             num_inputs = layer.input_size();
+             CV_LOG_DEBUG(NULL, "DNN/TF:     switched to layer " << name << " @ " << type << ") with " << num_inputs << " inputs");
          }
      }
- }
  
- void TFImporter::connect(const std::map<String, int>& layers_name_id_map, Net& network, const Pin& outPin,
-              const int input_layer_id, const int input_blob_id)
- {
-     std::map<String, int>::const_iterator it = layers_name_id_map.find(outPin.name);
-     if (it == layers_name_id_map.end())
-         CV_Error(Error::StsError, "Input layer not found: " + outPin.name);
+     // For the object detection networks, TensorFlow Object Detection API
+     // predicts deltas for bounding boxes in yxYX (ymin, xmin, ymax, xmax)
+     // order. We can manage it at DetectionOutput layer parsing predictions
+     // or shuffle last convolution's weights.
+     bool locPredTransposed = hasLayerAttr(layer, "loc_pred_transposed") &&
+                              getLayerAttr(layer, "loc_pred_transposed").b();
  
-     std::vector<String>::iterator inpNameIt = std::find(netInputsNames.begin(), netInputsNames.end(), outPin.name);
-     int blobIndex;
-     if (inpNameIt == netInputsNames.end())
-         blobIndex = outPin.blobIndex;
-     else
-         blobIndex = inpNameIt - netInputsNames.begin();
-     network.connect(it->second, blobIndex, input_layer_id, input_blob_id);
- }
+     layerParams.set("bias_term", false);
+     layerParams.blobs.resize(1);
  
- void TFImporter::connectToAllBlobs(const std::map<String, int>& layer_id, Net& network, const Pin& outPin,
-                      const int input_layer_id, const int input_blobs_count)
- {
-     for (int input_blob_id = 0; input_blob_id < input_blobs_count; input_blob_id++)
-         connect(layer_id, network, outPin, input_layer_id, input_blob_id);
- }
+     next_layers = getNextLayers(net, name, "BiasAdd");
+     if (next_layers.size() == 1) {
+         layerParams.set("bias_term", true);
+         layerParams.blobs.resize(2);
  
- const tensorflow::TensorProto& TFImporter::getConstBlob(const tensorflow::NodeDef &layer, std::map<String, int> const_layers,
-                                               int input_blob_index, int* actual_inp_blob_idx) {
-     if (input_blob_index == -1) {
-         for(int i = 0; i < layer.input_size(); i++) {
-             Pin input = parsePin(layer.input(i));
-             if (const_layers.find(input.name) != const_layers.end()) {
-                 if (input_blob_index != -1)
-                     CV_Error(Error::StsError, "More than one input is Const op");
+         int weights_layer_index = next_layers[0].second;
  
-                 input_blob_index = i;
+         blobFromTensor(getConstBlob(net.node(weights_layer_index), value_id), layerParams.blobs[1]);
+         ExcludeLayer(net, weights_layer_index, 0, false);
+         layers_to_ignore.insert(next_layers[0].first);
+         // Shuffle bias from yxYX to xyXY.
+         if (locPredTransposed)
+         {
+             const int numWeights = layerParams.blobs[1].total();
+             float* biasData = reinterpret_cast<float*>(layerParams.blobs[1].data);
+             CV_Assert(numWeights % 4 == 0);
+             for (int i = 0; i < numWeights; i += 2)
+             {
+                 std::swap(biasData[i], biasData[i + 1]);
              }
          }
      }
  
-     if (input_blob_index == -1)
-         CV_Error(Error::StsError, "Const input blob for weights not found");
-     Pin kernel_inp = parsePin(layer.input(input_blob_index));
-     if (const_layers.find(kernel_inp.name) == const_layers.end())
-         CV_Error(Error::StsError, "Input [" + layer.input(input_blob_index) +
-                                   "] for node [" + layer.name() + "] not found");
-     if (kernel_inp.blobIndex != 0)
-         CV_Error(Error::StsError, "Unsupported kernel input");
-     if(actual_inp_blob_idx) {
-         *actual_inp_blob_idx = input_blob_index;
-     }
-     int nodeIdx = const_layers.at(kernel_inp.name);
-     if (nodeIdx < netBin.node_size() && netBin.node(nodeIdx).name() == kernel_inp.name)
+     int kernelTensorInpId = -1;
+     const tensorflow::TensorProto& kernelTensor = getConstBlob(layer, value_id, -1, &kernelTensorInpId);
+     const String kernelTensorName = layer.input(kernelTensorInpId);
+     std::map<String, Mat>::iterator sharedWeightsIt = sharedWeights.find(kernelTensorName);
+     if (sharedWeightsIt == sharedWeights.end())
      {
-         return netBin.node(nodeIdx).attr().at("value").tensor();
+         kernelFromTensor(kernelTensor, layerParams.blobs[0]);
+         releaseTensor(const_cast<tensorflow::TensorProto*>(&kernelTensor));
+         int* kshape = layerParams.blobs[0].size.p;
+         const int outCh = kshape[0];
+         const int inCh = kshape[1];
+         const int height = kshape[2];
+         const int width = kshape[3];
+         if (type == "DepthwiseConv2dNative")
+         {
+             CV_Assert(!locPredTransposed);
+             const int chMultiplier = kshape[0];
+             Mat copy = layerParams.blobs[0].clone();
+             float* src = (float*)copy.data;
+             float* dst = (float*)layerParams.blobs[0].data;
+             for (int i = 0; i < chMultiplier; ++i)
+                 for (int j = 0; j < inCh; ++j)
+                     for (int s = 0; s < height * width; ++s)
+                     {
+                         int src_i = (i * inCh + j) * height * width + s;
+                         int dst_i = (j * chMultiplier + i) * height* width + s;
+                         dst[dst_i] = src[src_i];
+                     }
+             // TODO Use reshape instead
+             kshape[0] = inCh * chMultiplier;
+             kshape[1] = 1;
+             size_t* kstep = layerParams.blobs[0].step.p;
+             kstep[0] = kstep[1]; // fix steps too
+         }
+         // Shuffle output channels from yxYX to xyXY.
+         if (locPredTransposed)
+         {
+             const int slice = height * width * inCh;
+             for (int i = 0; i < outCh; i += 2)
+             {
+                 cv::Mat src(1, slice, CV_32F, layerParams.blobs[0].ptr<float>(i));
+                 cv::Mat dst(1, slice, CV_32F, layerParams.blobs[0].ptr<float>(i + 1));
+                 std::swap_ranges(src.begin<float>(), src.end<float>(), dst.begin<float>());
+             }
+         }
+         sharedWeights[kernelTensorName] = layerParams.blobs[0];
      }
      else
      {
-         CV_Assert_N(nodeIdx < netTxt.node_size(),
-                     netTxt.node(nodeIdx).name() == kernel_inp.name);
-         return netTxt.node(nodeIdx).attr().at("value").tensor();
+         layerParams.blobs[0] = sharedWeightsIt->second;
      }
- }
+     Mat weights = layerParams.blobs[0];
+     layerParams.set("kernel_size",  DictValue::arrayInt(&weights.size[2], weights.dims - 2));
  
- static void addConstNodes(tensorflow::GraphDef& net, std::map<String, int>& const_layers,
-                           std::set<String>& layers_to_ignore)
- {
-     CV_LOG_DEBUG(NULL, "DNN/TF: addConstNodes(): handling " << net.node_size() << " nodes...");
-     for (int li = 0; li < net.node_size(); li++)
-     {
-         const tensorflow::NodeDef &layer = net.node(li);
-         String name = layer.name();
-         String type = layer.op();
+     layerParams.set("num_output", layerParams.blobs[0].size[0]);
  
-         //CV_LOG_DEBUG(NULL, "DNN/TF: layer_id=" << li << " - '" << name << "' @ " << type);
+     setStrides(layerParams, layer);
+     if (!layerParams.has("pad_w") && !layerParams.has("pad_h"))
+         setPadding(layerParams, layer);
  
-         try
-         {
-             if (type == "Dequantize")
-             {
-                 // Example of Dequantize node:
-                 //   name: "conv2d_1/bias"
-                 //   op: "Dequantize"
-                 //   input: "conv2d_1/bias_quantized_const" (tensor of dtype DT_QUINT8)
-                 //   input: "conv2d_1/bias_quantized_min"
-                 //   input: "conv2d_1/bias_quantized_max"
-                 //   attr { key: "T" value { type: DT_QUINT8 } }   (quantized type)
-                 //   attr { key: "mode" value { s: "MIN_FIRST" } } (quantization technique)
-                 CV_CheckEQ(layer.input_size(), 3, "Dequantize: 3 inputs is supported only");
-                 for (int i = 0; i < 3; ++i)
-                     CV_Assert(const_layers.find(layer.input(i)) != const_layers.end());
-                 CV_Assert(hasLayerAttr(layer, "mode") &&
-                           getLayerAttr(layer, "mode").s() == "MIN_FIRST");
+     // The final node of dilated convolution subgraph.
+     next_layers = getNextLayers(net, name, "BatchToSpaceND");
+     if (!next_layers.empty())
+     {
+         CV_Assert(next_layers.size() == 1);
+         ExcludeLayer(net, next_layers[0].second, 0, false);
+         layers_to_ignore.insert(next_layers[0].first);
+     }
  
-                 int tensorId = const_layers[layer.input(0)];
-                 int minId = const_layers[layer.input(1)];
-                 int maxId = const_layers[layer.input(2)];
+     int id = dstNet.addLayer(name, "Convolution", layerParams);
+     layer_id[name] = id;
  
-                 tensorflow::TensorProto* tensor = net.mutable_node(tensorId)
-                                                     ->mutable_attr()->at("value")
-                                                      .mutable_tensor();
-                 CV_CheckEQ((int)tensor->dtype(), (int)tensorflow::DT_QUINT8, "");
+     // one input only
+     connect(layer_id, dstNet, parsePin(input), id, 0);
  
-                 Mat qMin = getTensorContent(net.node(minId).attr().at("value").tensor());
-                 Mat qMax = getTensorContent(net.node(maxId).attr().at("value").tensor());
-                 CV_CheckEQ(qMin.total(), (size_t)1, "");
-                 CV_CheckTypeEQ(qMin.type(), CV_32FC1, "");
-                 CV_CheckEQ(qMax.total(), (size_t)1, "");
-                 CV_CheckTypeEQ(qMax.type(), CV_32FC1, "");
  
-                 Mat content = getTensorContent(*tensor);
+     if (getDataLayout(name, data_layouts) == DATA_LAYOUT_UNKNOWN)
+         data_layouts[name] = DATA_LAYOUT_NHWC;
+ }
  
-                 float minVal = qMin.at<float>(0);
-                 float rangeScale = (qMax.at<float>(0) - minVal) / 255;
-                 CV_Assert(rangeScale >= 0);
-                 content.convertTo(content, CV_32FC1, rangeScale,
-                                   rangeScale * cvRound(minVal / rangeScale));
+ void TFImporter::parseBias(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const std::string& type = layer.op();
+     const int num_inputs = layer.input_size();
  
-                 tensor->set_dtype(tensorflow::DT_FLOAT);
-                 tensor->set_tensor_content(content.data, content.total() * content.elemSize1());
+     CV_CheckGT(num_inputs, 0, "");
+     bool haveConst = false;
+     for(int ii = 0; !haveConst && ii < num_inputs; ++ii)
+     {
+         Pin input = parsePin(layer.input(ii));
+         haveConst = value_id.find(input.name) != value_id.end();
+     }
+     CV_Assert(!haveConst || num_inputs == 2);
  
-                 net.mutable_node(tensorId)->set_name(name);
-                 CV_Assert(const_layers.insert(std::make_pair(name, tensorId)).second);
-                 layers_to_ignore.insert(name);
-                 continue;
-             }
-             else if (type != "Const")
-                 continue;  // only Const parameters are supported
+     if (haveConst)
+     {
+         Mat values = getTensorContent(getConstBlob(layer, value_id));
+         CV_Assert(values.type() == CV_32FC1);
+         if (type == "Sub")
+             values *= -1.0f;
  
-             if (layer.attr().find("value") != layer.attr().end())
-             {
-                 CV_Assert(const_layers.insert(std::make_pair(name, li)).second);
-             }
-             layers_to_ignore.insert(name);
+         int id;
+         if (values.total() == 1)  // is a scalar.
+         {
+             layerParams.set("shift", values.at<float>(0));
+             id = dstNet.addLayer(name, "Power", layerParams);
          }
-         catch (const std::exception& e)
+         else  // is a vector
          {
-             CV_LOG_ERROR(NULL, "DNN/TF: Can't handle node='" << name << "'. Exception: " << e.what());
-             throw;
+             layerParams.blobs.resize(1, values);
+             id = dstNet.addLayer(name, "Shift", layerParams);
+         }
+         layer_id[name] = id;
+         // one input only
+         connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+     }
+     else
+     {
+         layerParams.set("operation", "sum");
+         if (type == "Sub")
+         {
+             static float subCoeffs[] = {1.f, -1.f};
+             layerParams.set("coeff", DictValue::arrayReal<float*>(subCoeffs, 2));
+         }
+         int id = dstNet.addLayer(name, "Eltwise", layerParams);
+         layer_id[name] = id;
+         for (int ii = 0; ii < num_inputs; ii++)
+         {
+             Pin inp = parsePin(layer.input(ii));
+             if (layer_id.find(inp.name) == layer_id.end())
+                 CV_Error(Error::StsError, "Input layer not found: " + inp.name);
+             connect(layer_id, dstNet, inp, id, ii);
          }
      }
-     CV_LOG_DEBUG(NULL, "DNN/TF: layers_to_ignore.size() = " << layers_to_ignore.size());
  }
  
- // If all inputs of specific layer have the same data layout we can say that
- // this layer's output has this data layout too. Returns DATA_LAYOUT_UNKNOWN otherwise.
- DataLayout TFImporter::predictOutputDataLayout(const tensorflow::NodeDef& layer)
+ void TFImporter::parseMatMul(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
  {
-     DataLayout layout = getDataLayout(layer);
-     if (layout != DATA_LAYOUT_UNKNOWN)
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
+     CV_CheckEQ(num_inputs, 2, "");
+     // For the object detection networks, TensorFlow Object Detection API
+     // predicts deltas for bounding boxes in yxYX (ymin, xmin, ymax, xmax)
+     // order. We can manage it at DetectionOutput layer parsing predictions
+     // or shuffle last Faster-RCNN's matmul weights.
+     bool locPredTransposed = hasLayerAttr(layer, "loc_pred_transposed") &&
+                              getLayerAttr(layer, "loc_pred_transposed").b();
+     layerParams.set("bias_term", false);
+     layerParams.blobs.resize(1);
+     StrIntVector next_layers = getNextLayers(net, name, "BiasAdd");  // FIXIT Use layers fusion instead
+     if (next_layers.empty())
      {
-         CV_LOG_DEBUG(NULL, "DNN/TF: predictOutputDataLayout(" << layer.name() << " @ " << layer.op() << ") => " << (int)layout << " (from attrs)");
-         return layout;
+         next_layers = getNextLayers(net, name, "Add");
      }
+     if (next_layers.size() == 1) {
+         layerParams.set("bias_term", true);
+         layerParams.blobs.resize(2);
  
-     // Determine layout by layer's inputs
-     for (int i = 0, n = layer.input_size(); i < n; ++i)
-     {
-         std::map<String, DataLayout>::const_iterator it = data_layouts.find(getNodeName(layer.input(i)));
-         if (it != data_layouts.end())
+         int weights_layer_index = next_layers[0].second;
+         blobFromTensor(getConstBlob(net.node(weights_layer_index), value_id), layerParams.blobs[1]);
+         ExcludeLayer(net, weights_layer_index, 0, false);
+         layers_to_ignore.insert(next_layers[0].first);
+         if (locPredTransposed)
          {
-             if (layout != DATA_LAYOUT_UNKNOWN)
+             const int numWeights = layerParams.blobs[1].total();
+             float* biasData = reinterpret_cast<float*>(layerParams.blobs[1].data);
+             CV_Assert(numWeights % 4 == 0);
+             for (int i = 0; i < numWeights; i += 2)
              {
-                 if (it->second != layout && it->second != DATA_LAYOUT_UNKNOWN)
-                     return DATA_LAYOUT_UNKNOWN;
+                 std::swap(biasData[i], biasData[i + 1]);
              }
-             else
-                 layout = it->second;
          }
      }
  
-     if (layout != DATA_LAYOUT_UNKNOWN)
-     {
-         CV_LOG_DEBUG(NULL, "DNN/TF: predictOutputDataLayout(" << layer.name() << " @ " << layer.op() << ") => " << (int)layout << " (from inputs)");
-         return layout;
-     }
-     // Determine layout by layer's consumers recursively.
-     std::map<String, DataLayout>::const_iterator it = data_layouts.find(layer.name());
-     CV_Assert(it != data_layouts.end());
-     return it->second;
- }
- void TFImporter::populateNet()
- {
-     CV_Assert(netBin.ByteSize() || netTxt.ByteSize());
-     CV_LOG_INFO(NULL, "DNN/TF: parsing model"
-         << (netBin.has_versions() ? cv::format(" produced by TF v%d (min_consumer=%d)", (int)netBin.versions().producer(), (int)netBin.versions().min_consumer()) : cv::String(" (N/A version info)"))
-         << ". Number of nodes = " << netBin.node_size()
-     );
-     if (netTxt.ByteSize())
+     int kernel_blob_index = -1;
+     const tensorflow::TensorProto& kernelTensor = getConstBlob(layer, value_id, -1, &kernel_blob_index);
+     const String kernelTensorName = layer.input(kernel_blob_index);
+     std::map<String, Mat>::iterator sharedWeightsIt = sharedWeights.find(kernelTensorName);
+     if (sharedWeightsIt == sharedWeights.end())
      {
-         CV_LOG_INFO(NULL, "DNN/TF: parsing config"
-             << (netTxt.has_versions() ? cv::format(" produced by TF v%d (min_consumer=%d)", (int)netTxt.versions().producer(), (int)netTxt.versions().min_consumer()) : cv::String(" (N/A version info)"))
-             << ". Number of nodes = " << netTxt.node_size()
-         );
-         RemoveIdentityOps(netBin);
-         CV_LOG_DEBUG(NULL, "DNN/TF: RemoveIdentityOps(model) => " << netBin.node_size() << " nodes");
-         RemoveIdentityOps(netTxt);
-         CV_LOG_DEBUG(NULL, "DNN/TF: RemoveIdentityOps(config) => " << netTxt.node_size() << " nodes");
-         sortByExecutionOrder(netTxt);
-         CV_LOG_DEBUG(NULL, "DNN/TF: sortByExecutionOrder(config) => " << netTxt.node_size() << " nodes");
+         blobFromTensor(kernelTensor, layerParams.blobs[0]);
+         releaseTensor(const_cast<tensorflow::TensorProto*>(&kernelTensor));
+         sharedWeights[kernelTensorName] = layerParams.blobs[0];
      }
      else
      {
-         removePhaseSwitches(netBin);
-         CV_LOG_DEBUG(NULL, "DNN/TF: removePhaseSwitches(model) => " << netBin.node_size() << " nodes");
+         layerParams.blobs[0] = sharedWeightsIt->second;
+     }
  
-         RemoveIdentityOps(netBin);
-         CV_LOG_DEBUG(NULL, "DNN/TF: RemoveIdentityOps(model) => " << netBin.node_size() << " nodes");
+     if (kernel_blob_index == 1) { // In this case output is computed by x*W formula - W should be transposed
+         Mat data = layerParams.blobs[0].t();
+         layerParams.blobs[0] = data.clone();
+     }
  
-         simplifySubgraphs(netBin);
-         CV_LOG_DEBUG(NULL, "DNN/TF: simplifySubgraphs(model) => " << netBin.node_size() << " nodes");
-         sortByExecutionOrder(netBin);
-         CV_LOG_DEBUG(NULL, "DNN/TF: sortByExecutionOrder(model) => " << netBin.node_size() << " nodes");
+     layerParams.set("num_output", layerParams.blobs[0].size[0]);
+     if (locPredTransposed)
+     {
+         CV_Assert(layerParams.blobs[0].dims == 2);
+         for (int i = 0; i < layerParams.blobs[0].size[0]; i += 2)
+         {
+             cv::Mat src = layerParams.blobs[0].row(i);
+             cv::Mat dst = layerParams.blobs[0].row(i + 1);
+             std::swap_ranges(src.begin<float>(), src.end<float>(), dst.begin<float>());
+         }
      }
  
-     tensorflow::GraphDef& net = netTxt.ByteSize() != 0 ? netTxt : netBin;
+     int id = dstNet.addLayer(name, "InnerProduct", layerParams);
+     layer_id[name] = id;
  
-     int layersSize = net.node_size();
+     // one input only
+     int input_blob_index = kernel_blob_index == 0 ? 1 : 0;
+     connect(layer_id, dstNet, parsePin(layer.input(input_blob_index)), id, 0);
+     data_layouts[name] = DATA_LAYOUT_PLANAR;
+ }
  
-     // Pre-fill data layouts where they are set explicitly.
-     // Assuming that nodes are in topological order
-     for (int i = layersSize - 1; i >= 0; --i)
+ void TFImporter::parseReshape(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
+     CV_CheckGT(num_inputs, 0, "");
+     Pin inpId = parsePin(layer.input(0));
+     DataLayout inpLayout = getDataLayout(layer.input(0), data_layouts);
+     // There are two possible implementations: reshape an input using
+     // predefined sizes or use a second input blob as a source of new shape.
+     if (value_id.find(layer.input(1)) != value_id.end())
      {
-         const tensorflow::NodeDef& layer = net.node(i);
-         std::string name = layer.name();
-         CV_LOG_DEBUG(NULL, "DNN/TF: node(" << i << " - '" << name << "') propagating layout...");
-         try
+         Mat newShape = getTensorContent(getConstBlob(layer, value_id, 1));
+         int newShapeSize = newShape.total();
+         bool hasSwap = false;
+         if (newShapeSize == 4 && hasAllOnes(newShape, 0, 2))
          {
-             DataLayout layout = getDataLayout(layer);
-             std::map<String, DataLayout>::iterator it = data_layouts.find(name);
-             if (it != data_layouts.end())
+             // NHWC->NCHW
+             std::swap(*newShape.ptr<int32_t>(0, 2), *newShape.ptr<int32_t>(0, 3));
+             std::swap(*newShape.ptr<int32_t>(0, 1), *newShape.ptr<int32_t>(0, 2));
+             hasSwap = true;
+         }
+         if (inpLayout == DATA_LAYOUT_NHWC)
+         {
+             if (newShapeSize >= 2 || newShape.at<int>(1) == 1)
              {
-                 if (layout != DATA_LAYOUT_UNKNOWN)
+                 int order[] = {0, 2, 3, 1};  // From OpenCV's NCHW to NHWC.
+                 addPermuteLayer(order, name + "/nhwc", inpId);
+                 if (newShapeSize < 4)
                  {
-                     if (it->second == DATA_LAYOUT_UNKNOWN)
-                         it->second = layout;
-                     else if (it->second != layout)
-                     {
-                         it->second = DATA_LAYOUT_UNKNOWN;
-                         layout = DATA_LAYOUT_UNKNOWN;
-                     }
+                     inpLayout = DATA_LAYOUT_NCHW;
                  }
                  else
-                     layout = it->second;
-             }
-             else
-                 data_layouts[name] = layout;
-             // Specify input layers to have the same data layout.
-             for (int j = 0; j < layer.input_size(); ++j)
-             {
-                 name = getNodeName(layer.input(j));
-                 it = data_layouts.find(name);
-                 if (it != data_layouts.end())
                  {
-                     if (layout != DATA_LAYOUT_UNKNOWN)
-                     {
-                         if (it->second == DATA_LAYOUT_UNKNOWN)
-                             it->second = layout;
-                         else if (it->second != layout)
-                             it->second = DATA_LAYOUT_UNKNOWN;
-                     }
+                     inpLayout = DATA_LAYOUT_NHWC;
                  }
-                 else
-                     data_layouts[name] = layout;
              }
          }
-         catch (const std::exception& e)
-         {
-             CV_LOG_ERROR(NULL, "DNN/TF: Can't propagate layout for node='" << name << "'. Exception: " << e.what());
-             throw;
-         }
-     }
-     addConstNodes(netBin, value_id, layers_to_ignore);
-     addConstNodes(netTxt, value_id, layers_to_ignore);
+         layerParams.set("dim", DictValue::arrayInt<int*>(newShape.ptr<int>(), newShapeSize));
  
+         int id = dstNet.addLayer(name, "Reshape", layerParams);
+         layer_id[name] = id;
  
-     for (int li = 0; li < layersSize; li++)
-     {
-         const tensorflow::NodeDef& layer = net.node(li);
+         // one input only
+         connect(layer_id, dstNet, inpId, id, 0);
+         inpId = Pin(name);
  
-         const std::string name = layer.name();
-         const std::string type = layer.op();
-         const int ninputs = layer.input_size();
-         CV_LOG_DEBUG(NULL, "DNN/TF: (" << li << "/" << layersSize << ") Parse layer " << name << " @ " << type << " with " << ninputs << " inputs");
+         if ((inpLayout == DATA_LAYOUT_NHWC || inpLayout == DATA_LAYOUT_UNKNOWN || inpLayout == DATA_LAYOUT_PLANAR) &&
+             newShapeSize == 4 && !hasSwap)
+         {
+             int order[] = {0, 3, 1, 2};  // Transform back to OpenCV's NCHW.
+             addPermuteLayer(order, name + "/nchw", inpId);
+             inpLayout = DATA_LAYOUT_NCHW;
+         }
  
-         parseNode(layer);
+         data_layouts[name] = newShapeSize == 2 ? DATA_LAYOUT_PLANAR : inpLayout;
      }
-     for (size_t i = 0; i < netInputsNames.size(); i++)
+     else
      {
-         CV_LOG_DEBUG(NULL, "DNN/TF: Model input: " << i << " - '" << netInputsNames[i] << "'");
-         CV_Assert(!netInputsNames[i].empty());
+         int id = dstNet.addLayer(name, "Reshape", layerParams);
+         layer_id[name] = id;
+         connect(layer_id, dstNet, inpId, id, 0);
+         connect(layer_id, dstNet, parsePin(layer.input(1)), id, 1);
+         data_layouts[name] = inpLayout;
      }
-     dstNet.setInputsNames(netInputsNames);
-     CV_LOG_DEBUG(NULL, "DNN/TF: ===================== Import completed =====================");
  }
  
- void TFImporter::addPermuteLayer(const int* order, const std::string& permName, Pin& inpId)
+ void TFImporter::parseFlatten(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
  {
-     LayerParams permLP;
-     permLP.set("order", DictValue::arrayInt<const int*>(order, 4));
-     CV_Assert(layer_id.find(permName) == layer_id.end());
-     int permId = dstNet.addLayer(permName, "Permute", permLP);
-     layer_id[permName] = permId;
-     connect(layer_id, dstNet, inpId, permId, 0);
-     inpId = Pin(permName);
+     const std::string& name = layer.name();
+     const std::string& type = layer.op();
+     const int num_inputs = layer.input_size();
+     CV_CheckGT(num_inputs, 0, "");
+     Pin inpId = parsePin(layer.input(0));
+     int inpLayout = getDataLayout(layer.input(0), data_layouts);
+     if (type == "Squeeze")
+     {
+         CV_Assert(hasLayerAttr(layer, "squeeze_dims"));
+         const tensorflow::AttrValue& dims = getLayerAttr(layer, "squeeze_dims");
+         std::vector<int> dimsVector(dims.list().i_size());
+         for (int i = 0; i < dimsVector.size(); ++i)
+             dimsVector[i] = dims.list().i(i);
+         // Flatten layer can squeeze dimensions range into one.
+         std::sort(dimsVector.begin(), dimsVector.end());
+         for (int i = 1; i < dimsVector.size(); ++i)
+         {
+             if (dimsVector[i] != dimsVector[i - 1] + 1)
+                 CV_Error(Error::StsNotImplemented, "Unsupported squeeze configuration");
+         }
+         int start = dimsVector.front() - 1, end = dimsVector.back();
+         if (start == -1 && end == 0)  // squeeze 0th dimension
+         {
+             start = 0;
+             end = 1;
+         }
+         layerParams.set("axis", start);
+         layerParams.set("end_axis", end);
+     }
+     if (inpLayout == DATA_LAYOUT_NHWC)
+     {
+         LayerParams permLP;
+         int order[] = {0, 2, 3, 1};  // From OpenCV's NCHW to NHWC.
+         permLP.set("order", DictValue::arrayInt<int*>(order, 4));
+         std::string permName = name + "/nchw";
+         CV_Assert(layer_id.find(permName) == layer_id.end());
+         int permId = dstNet.addLayer(permName, "Permute", permLP);
+         layer_id[permName] = permId;
+         connect(layer_id, dstNet, inpId, permId, 0);
+         inpId = Pin(permName);
+     }
+     int id = dstNet.addLayer(name, "Flatten", layerParams);
+     layer_id[name] = id;
+     connect(layer_id, dstNet, inpId, id, 0);
+     data_layouts[name] = DATA_LAYOUT_PLANAR;
  }
  
- void TFImporter::parseNode(const tensorflow::NodeDef& layer_)
+ void TFImporter::parseTranspose(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
  {
-     tensorflow::NodeDef layer = layer_;
-     tensorflow::GraphDef& net = netTxt.ByteSize() != 0 ? netTxt : netBin;
-     /*const*/ std::string name = layer.name();
-     /*const*/ std::string type = layer.op();
-     /*const*/ int num_inputs = layer.input_size();
-     try
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
+     CV_CheckGT(num_inputs, 0, "");
+     Mat perm = getTensorContent(getConstBlob(layer, value_id, 1));
+     CV_Assert(perm.type() == CV_32SC1);
+     int* permData = (int*)perm.data;
+     if (perm.total() == 4)
      {
-         LayerParams layerParams;
-         if (layers_to_ignore.find(name) != layers_to_ignore.end())
+         // Only NHWC <-> NCHW permutations are allowed. OpenCV is always
+         // keep NCHW layout this way.
+         int inpLayout = getDataLayout(layer.input(0), data_layouts);
+         std::string type = "Identity";
+         if (inpLayout == DATA_LAYOUT_NHWC)
          {
-             CV_LOG_DEBUG(NULL, "DNN/TF:     ignored");
-             return;
+             if (permData[0] == 0 && permData[1] == 3 && permData[2] == 1 && permData[3] == 2)
+             {
+                 // in TensorFlow: NHWC->NCHW
+                 // in OpenCV: NCHW->NCHW
+                 data_layouts[name] = DATA_LAYOUT_NCHW;
+             }
+             else if (permData[0] == 0 && permData[1] == 1 && permData[2] == 2 && permData[3] == 3)
+             {
+                 // in TensorFlow: NHWC->NHWC
+                 // in OpenCV: NCHW->NCHW
+                 data_layouts[name] = DATA_LAYOUT_NHWC;
+             }
+             else if (permData[0] == 0 && permData[1] == 3 && permData[2] == 2 && permData[3] == 1)
+             {
+                 // in TensorFlow: NHWC->NCWH
+                 // in OpenCV: NCHW->NCWH
+                 int permData[] = {0, 1, 3, 2};
+                 layerParams.set("order", DictValue::arrayInt<int*>(permData, perm.total()));
+                 data_layouts[name] = DATA_LAYOUT_NCHW;  // we keep track NCHW because channels position only matters
+                 type = "Permute";
+             }
+             else
+                 CV_Error(Error::StsParseError, "Only NHWC <-> NCHW permutations are allowed.");
          }
-         DataLayout predictedLayout = predictOutputDataLayout(layer);
-         data_layouts[name] = predictedLayout;
-         if (type == "Conv2D" || type == "SpaceToBatchND" || type == "DepthwiseConv2dNative" || type == "Pad" || type == "MirrorPad" || type == "Conv3D")
+         else if (inpLayout == DATA_LAYOUT_NCHW)
          {
-             CV_CheckGT(num_inputs, 0, "");
-             // The first node of dilated convolution subgraph.
-             // Extract input node, dilation rate and paddings.
-             std::string input = layer.input(0);
-             StrIntVector next_layers;
-             if (type == "SpaceToBatchND" || type == "Pad")
+             if (permData[0] == 0 && permData[1] == 2 && permData[2] == 3 && permData[3] == 1)
              {
-                 next_layers = getNextLayers(net, name, "Conv2D");
-                 if (next_layers.empty())
-                     next_layers = getNextLayers(net, name, "DepthwiseConv2dNative");
+                 // in TensorFlow: NCHW->NHWC
+                 // in OpenCV: NCHW->NCHW
+                 data_layouts[name] = DATA_LAYOUT_NHWC;
              }
-             if (type == "SpaceToBatchND")
+             else if (permData[0] == 0 && permData[1] == 1 && permData[2] == 2 && permData[3] == 3)
              {
-                 // op: "SpaceToBatchND"
-                 // input: "input"
-                 // input: "SpaceToBatchND/block_shape"
-                 // input: "SpaceToBatchND/paddings"
-                 CV_CheckEQ(num_inputs, 3, "");
+                 // in TensorFlow: NCHW->NCHW
+                 // in OpenCV: NCHW->NCHW
+                 data_layouts[name] = DATA_LAYOUT_NCHW;
+             }
+             else
+                 CV_Error(Error::StsParseError, "Only NHWC <-> NCHW permutations are allowed.");
+         }
+         int id = dstNet.addLayer(name, type, layerParams);
+         layer_id[name] = id;
+         connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+     }
+     else
+     {
+         layerParams.set("order", DictValue::arrayInt<int*>(permData, perm.total()));
  
-                 DictValue dilation = parseDims(getConstBlob(layer, value_id, 1));
-                 CV_Assert(dilation.size() == 2);
-                 layerParams.set("dilation_h", dilation.get<int>(0));
-                 layerParams.set("dilation_w", dilation.get<int>(1));
+         int id = dstNet.addLayer(name, "Permute", layerParams);
+         layer_id[name] = id;
  
-                 Mat paddings;
-                 parseTensor<int>(getConstBlob(layer, value_id, 2), paddings);
+         // one input only
+         connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+         data_layouts[name] = DATA_LAYOUT_UNKNOWN;
+     }
+ }
  
-                 // paddings is a 2x2 matrix: [[top, bot], [left, right]]
-                 layerParams.set("pad_h", paddings.at<float>(0));
-                 layerParams.set("pad_w", paddings.at<float>(2));
+ void TFImporter::parseConstant(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+ }
  
-                 CV_Assert(next_layers.size() == 1);
-                 layers_to_ignore.insert(next_layers[0].first);
+ void TFImporter::parseLrn(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
  
-                 // FIXIT don't override, rewrite this code
-                 layer = net.node(next_layers[0].second);
-                 name = layer.name();
-                 type = layer.op();
-                 num_inputs = layer.input_size();
-                 CV_LOG_DEBUG(NULL, "DNN/TF:     switched to layer " << name << " @ " << type << ") with " << num_inputs << " inputs");
-             }
-             else if (type == "Pad" || type == "MirrorPad")
-             {
-                 Mat paddings = getTensorContent(getConstBlob(layer, value_id, 1));
-                 CV_Assert(paddings.type() == CV_32SC1);
-                 if (paddings.total() == 8)
-                 {
-                     // Perhaps, we have NHWC padding dimensions order.
-                     //  N    H    W    C
-                     // 0 1  2 3  4 5  6 7
-                     std::swap(paddings.at<int32_t>(2), paddings.at<int32_t>(6));
-                     std::swap(paddings.at<int32_t>(3), paddings.at<int32_t>(7));
-                     //  N    C    W    H
-                     // 0 1  2 3  4 5  6 7
-                     std::swap(paddings.at<int32_t>(4), paddings.at<int32_t>(6));
-                     std::swap(paddings.at<int32_t>(5), paddings.at<int32_t>(7));
-                     //  N    C    H    W
-                     // 0 1  2 3  4 5  6 7
-                 }
+     CV_CheckGT(num_inputs, 0, "");
+     if(hasLayerAttr(layer, "alpha")) {
+         layerParams.set("alpha", getLayerAttr(layer, "alpha").f());
+     }
+     if(hasLayerAttr(layer, "beta")) {
+         layerParams.set("beta", getLayerAttr(layer, "beta").f());
+     }
+     if(hasLayerAttr(layer, "depth_radius")) {
+         int radius = (int)getLayerAttr(layer, "depth_radius").i();
+         layerParams.set("local_size", 2*radius + 1);
+     }
+     if(hasLayerAttr(layer, "bias")) {
+         layerParams.set("bias", getLayerAttr(layer, "bias").f());
+     }
+     layerParams.set("norm_by_size", false);
  
-                 if (next_layers.empty() || paddings.total() != 8 ||
-                     paddings.at<int32_t>(4) != paddings.at<int32_t>(5) ||
-                     paddings.at<int32_t>(6) != paddings.at<int32_t>(7) || type == "MirrorPad")
-                 {
-                     // Just a single padding layer.
-                     layerParams.set("paddings", DictValue::arrayInt<int*>((int*)paddings.data, paddings.total()));
-                     if (type == "MirrorPad")
-                         layerParams.set("type", "reflect");
+     int id = dstNet.addLayer(name, "LRN", layerParams);
+     layer_id[name] = id;
  
-                     int id = dstNet.addLayer(name, "Padding", layerParams);
-                     layer_id[name] = id;
+     connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
+ }
  
-                     connect(layer_id, dstNet, parsePin(input), id, 0);
-                     return;
-                 }
-                 else
-                 {
-                     // Merge with subsequent convolutional layer.
-                     CV_Assert(next_layers.size() == 1);
+ void TFImporter::parseConcat(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const std::string& type = layer.op();
+     const int num_inputs = layer.input_size();
  
-                     layerParams.set("pad_h", paddings.at<int32_t>(4));
-                     layerParams.set("pad_w", paddings.at<int32_t>(6));
+     CV_CheckGT(num_inputs, 0, "");
+     int axisId = (type == "Concat" ? 0 : num_inputs - 1);
+     int axis = getConstBlob(layer, value_id, axisId).int_val().Get(0);
  
-                     layers_to_ignore.insert(next_layers[0].first);
+     if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
+         axis = toNCHW(axis);
+     else if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NDHWC)
+         axis = toNCDHW(axis);
+     layerParams.set("axis", axis);
  
-                     // FIXIT don't override, rewrite this code
-                     layer = net.node(next_layers[0].second);
-                     name = layer.name();
-                     type = layer.op();
-                     num_inputs = layer.input_size();
-                     CV_LOG_DEBUG(NULL, "DNN/TF:     switched to layer " << name << " @ " << type << ") with " << num_inputs << " inputs");
-                 }
-             }
+     // input(0) or input(n-1) is concat_dim
+     int from = (type == "Concat" ? 1 : 0);
+     int to = (type == "Concat" ? num_inputs : num_inputs - 1);
  
-             // For the object detection networks, TensorFlow Object Detection API
-             // predicts deltas for bounding boxes in yxYX (ymin, xmin, ymax, xmax)
-             // order. We can manage it at DetectionOutput layer parsing predictions
-             // or shuffle last convolution's weights.
-             bool locPredTransposed = hasLayerAttr(layer, "loc_pred_transposed") &&
-                                      getLayerAttr(layer, "loc_pred_transposed").b();
+     for (int ii = from; ii < to; ii++)
+     {
+         Pin inp = parsePin(layer.input(ii));
+         if (layer_id.find(inp.name) == layer_id.end())
+         {
+             // There are constant inputs.
+             LayerParams lp;
+             lp.name = inp.name;
+             lp.type = "Const";
+             lp.blobs.resize(1);
+             blobFromTensor(getConstBlob(layer, value_id, ii), lp.blobs.back());
+             CV_Assert_N(!lp.blobs[0].empty(), lp.blobs[0].type() == CV_32F);
+             int constInpId = dstNet.addLayer(lp.name, lp.type, lp);
+             layer_id[lp.name] = constInpId;
+         }
+     }
  
-             layerParams.set("bias_term", false);
-             layerParams.blobs.resize(1);
+     int id = dstNet.addLayer(name, "Concat", layerParams);
+     layer_id[name] = id;
  
-             next_layers = getNextLayers(net, name, "BiasAdd");
-             if (next_layers.size() == 1) {
-                 layerParams.set("bias_term", true);
-                 layerParams.blobs.resize(2);
+     for (int ii = from; ii < to; ii++)
+     {
+         Pin inp = parsePin(layer.input(ii));
+         if (layer_id.find(inp.name) == layer_id.end())
+             CV_Error(Error::StsError, "Input layer not found: " + inp.name);
+         connect(layer_id, dstNet, inp, id, ii - from);
+     }
+ }
  
-                 int weights_layer_index = next_layers[0].second;
+ void TFImporter::parseMaxPool(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
  
-                 blobFromTensor(getConstBlob(net.node(weights_layer_index), value_id), layerParams.blobs[1]);
-                 ExcludeLayer(net, weights_layer_index, 0, false);
-                 layers_to_ignore.insert(next_layers[0].first);
+     CV_CheckGT(num_inputs, 0, "");
+     layerParams.set("pool", "max");
  
-                 // Shuffle bias from yxYX to xyXY.
-                 if (locPredTransposed)
-                 {
-                     const int numWeights = layerParams.blobs[1].total();
-                     float* biasData = reinterpret_cast<float*>(layerParams.blobs[1].data);
-                     CV_Assert(numWeights % 4 == 0);
-                     for (int i = 0; i < numWeights; i += 2)
-                     {
-                         std::swap(biasData[i], biasData[i + 1]);
-                     }
-                 }
-             }
+     setKSize(layerParams, layer);
+     setStrides(layerParams, layer);
+     setPadding(layerParams, layer);
+     // Test_TensorFlow_nets.EAST_text_detection/1, NGRAPH/CPU
+     layerParams.set("ceil_mode", false);
  
-             int kernelTensorInpId = -1;
-             const tensorflow::TensorProto& kernelTensor = getConstBlob(layer, value_id, -1, &kernelTensorInpId);
-             const String kernelTensorName = layer.input(kernelTensorInpId);
-             std::map<String, Mat>::iterator sharedWeightsIt = sharedWeights.find(kernelTensorName);
-             if (sharedWeightsIt == sharedWeights.end())
-             {
-                 kernelFromTensor(kernelTensor, layerParams.blobs[0]);
-                 releaseTensor(const_cast<tensorflow::TensorProto*>(&kernelTensor));
-                 int* kshape = layerParams.blobs[0].size.p;
-                 const int outCh = kshape[0];
-                 const int inCh = kshape[1];
-                 const int height = kshape[2];
-                 const int width = kshape[3];
-                 if (type == "DepthwiseConv2dNative")
-                 {
-                     CV_Assert(!locPredTransposed);
-                     const int chMultiplier = kshape[0];
-                     Mat copy = layerParams.blobs[0].clone();
-                     float* src = (float*)copy.data;
-                     float* dst = (float*)layerParams.blobs[0].data;
-                     for (int i = 0; i < chMultiplier; ++i)
-                         for (int j = 0; j < inCh; ++j)
-                             for (int s = 0; s < height * width; ++s)
-                                 {
-                                     int src_i = (i * inCh + j) * height * width + s;
-                                     int dst_i = (j * chMultiplier + i) * height* width + s;
-                                     dst[dst_i] = src[src_i];
-                                 }
-                     // TODO Use reshape instead
-                     kshape[0] = inCh * chMultiplier;
-                     kshape[1] = 1;
-                     size_t* kstep = layerParams.blobs[0].step.p;
-                     kstep[0] = kstep[1]; // fix steps too
-                 }
+     int id = dstNet.addLayer(name, "Pooling", layerParams);
+     layer_id[name] = id;
  
-                 // Shuffle output channels from yxYX to xyXY.
-                 if (locPredTransposed)
-                 {
-                     const int slice = height * width * inCh;
-                     for (int i = 0; i < outCh; i += 2)
-                     {
-                         cv::Mat src(1, slice, CV_32F, layerParams.blobs[0].ptr<float>(i));
-                         cv::Mat dst(1, slice, CV_32F, layerParams.blobs[0].ptr<float>(i + 1));
-                         std::swap_ranges(src.begin<float>(), src.end<float>(), dst.begin<float>());
-                     }
-                 }
-                 sharedWeights[kernelTensorName] = layerParams.blobs[0];
-             }
-             else
-             {
-                 layerParams.blobs[0] = sharedWeightsIt->second;
-             }
-             Mat weights = layerParams.blobs[0];
-             layerParams.set("kernel_size",  DictValue::arrayInt(&weights.size[2], weights.dims - 2));
+     connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
+ }
  
-             layerParams.set("num_output", layerParams.blobs[0].size[0]);
+ void TFImporter::parseAvgPool(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
  
-             setStrides(layerParams, layer);
-             if (!layerParams.has("pad_w") && !layerParams.has("pad_h"))
-                 setPadding(layerParams, layer);
+     CV_CheckGT(num_inputs, 0, "");
+     layerParams.set("pool", "ave");
+     layerParams.set("ave_pool_padded_area", false);
+     setKSize(layerParams, layer);
+     setStrides(layerParams, layer);
+     setPadding(layerParams, layer);
  
-             // The final node of dilated convolution subgraph.
-             next_layers = getNextLayers(net, name, "BatchToSpaceND");
-             if (!next_layers.empty())
-             {
-                 CV_Assert(next_layers.size() == 1);
-                 ExcludeLayer(net, next_layers[0].second, 0, false);
-                 layers_to_ignore.insert(next_layers[0].first);
-             }
+     int id = dstNet.addLayer(name, "Pooling", layerParams);
+     layer_id[name] = id;
  
-             int id = dstNet.addLayer(name, "Convolution", layerParams);
-             layer_id[name] = id;
+     connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
+ }
  
-             // one input only
-             connect(layer_id, dstNet, parsePin(input), id, 0);
+ void TFImporter::parseMaxPoolGrad(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
  
+     CV_CheckEQ(num_inputs, 3, "");
  
-             if (getDataLayout(name, data_layouts) == DATA_LAYOUT_UNKNOWN)
-                 data_layouts[name] = DATA_LAYOUT_NHWC;
-         }
-         else if (type == "BiasAdd" || type == "Add" || type == "AddV2" || type == "Sub" || type=="AddN")
-         {
-             CV_CheckGT(num_inputs, 0, "");
-             bool haveConst = false;
-             for(int ii = 0; !haveConst && ii < num_inputs; ++ii)
-             {
-                 Pin input = parsePin(layer.input(ii));
-                 haveConst = value_id.find(input.name) != value_id.end();
-             }
-             CV_Assert(!haveConst || num_inputs == 2);
+     layerParams.set("pool_k_h", 0);
+     layerParams.set("pool_k_w", 0);
+     layerParams.set("pool_stride_h", 0);
+     layerParams.set("pool_stride_w", 0);
+     layerParams.set("pool_pad_h", 0);
+     layerParams.set("pool_pad_w", 0);
  
-             if (haveConst)
-             {
-                 Mat values = getTensorContent(getConstBlob(layer, value_id));
-                 CV_Assert(values.type() == CV_32FC1);
-                 if (type == "Sub")
-                     values *= -1.0f;
+     int id = dstNet.addLayer(name, "MaxUnpool", layerParams);
+     layer_id[name] = id;
  
-                 int id;
-                 if (values.total() == 1)  // is a scalar.
-                 {
-                     layerParams.set("shift", values.at<float>(0));
-                     id = dstNet.addLayer(name, "Power", layerParams);
-                 }
-                 else  // is a vector
-                 {
-                     layerParams.blobs.resize(1, values);
-                     id = dstNet.addLayer(name, "Shift", layerParams);
-                 }
-                 layer_id[name] = id;
+     connect(layer_id, dstNet, parsePin(layer.input(2)), id, 0);
+     connect(layer_id, dstNet, parsePin(layer.input(1) + ":1"), id, 1);
+     connect(layer_id, dstNet, parsePin(layer.input(0)), id, 2);
+ }
  
-                 // one input only
-                 connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
-             }
-             else
-             {
-                 layerParams.set("operation", "sum");
-                 if (type == "Sub")
-                 {
-                     static float subCoeffs[] = {1.f, -1.f};
-                     layerParams.set("coeff", DictValue::arrayReal<float*>(subCoeffs, 2));
-                 }
+ void TFImporter::parsePlaceholder(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
  
-                 int id = dstNet.addLayer(name, "Eltwise", layerParams);
-                 layer_id[name] = id;
+     DataLayout predictedLayout = data_layouts[name];
  
-                 for (int ii = 0; ii < num_inputs; ii++)
-                 {
-                     Pin inp = parsePin(layer.input(ii));
-                     if (layer_id.find(inp.name) == layer_id.end())
-                         CV_Error(Error::StsError, "Input layer not found: " + inp.name);
-                     connect(layer_id, dstNet, inp, id, ii);
-                 }
-             }
+     if (!hasLayerAttr(layer, "dtype") ||
+         getLayerAttr(layer, "dtype").type() != tensorflow::DT_BOOL)  // If input is not a train/test flag.
+     {
+         netInputsNames.push_back(name);
+         layer_id[name] = 0;
+     }
+     tensorflow::TensorShapeProto shape;
+     if (hasLayerAttr(layer, "shape"))
+         shape = getLayerAttr(layer, "shape").shape();
+     else if (hasLayerAttr(layer, "_output_shapes"))
+     {
+         tensorflow::AttrValue_ListValue list = getLayerAttr(layer, "_output_shapes").list();
+         if (list.shape_size())
+             shape = list.shape()[0];
+     }
+     if (shape.dim_size())
+     {
+         MatShape dims(shape.dim_size());
+         for (int i = 0; i < dims.size(); ++i)
+             dims[i] = shape.dim(i).size();
+         if (dims.size() == 4 && predictedLayout == DATA_LAYOUT_NHWC)
+         {
+             std::swap(dims[1], dims[3]);  // NHWC->NCWH
+             std::swap(dims[2], dims[3]);  // NCWH->NCHW
+             if (dims[0] == -1)  // It's OK to have undetermined batch size
+                 dims[0] = 1;
          }
-         else if (type == "MatMul")
+         bool hasNeg = false;
+         for (int i = 0; i < dims.size() && !hasNeg; ++i)
          {
-             CV_CheckEQ(num_inputs, 2, "");
+             hasNeg = dims[i] < 0;
+         }
+         if (!hasNeg)
+             netInputShapes.push_back(dims);
+     }
+ }
  
-             // For the object detection networks, TensorFlow Object Detection API
-             // predicts deltas for bounding boxes in yxYX (ymin, xmin, ymax, xmax)
-             // order. We can manage it at DetectionOutput layer parsing predictions
-             // or shuffle last Faster-RCNN's matmul weights.
-             bool locPredTransposed = hasLayerAttr(layer, "loc_pred_transposed") &&
-                                      getLayerAttr(layer, "loc_pred_transposed").b();
+ void TFImporter::parseSplit(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     // TODO: determining axis index remapping by input dimensions order of input blob
+     // TODO: slicing input may be Const op
+     // TODO: slicing kernels for convolutions - in current implementation it is impossible
+     // TODO: add parsing num of slices parameter
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
+     CV_CheckEQ(num_inputs, 2, "");
+     // num_split
+     // 1st blob is dims tensor
+     int axis = getConstBlob(layer, value_id, 0).int_val().Get(0);
+     if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
+         axis = toNCHW(axis);
+     layerParams.set("axis", axis);
+     if (hasLayerAttr(layer, "num_split"))
+         layerParams.set("num_split", getLayerAttr(layer, "num_split").i());
+     int id = dstNet.addLayer(name, "Slice", layerParams);
+     layer_id[name] = id;
+     // one input only
+     connect(layer_id, dstNet, parsePin(layer.input(1)), id, 0);
+ }
  
-             layerParams.set("bias_term", false);
-             layerParams.blobs.resize(1);
+ void TFImporter::parseSlice(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     // op: "Slice"
+     // input: "input_node"
+     // input: "Slice/begin"
+     // input: "Slice/size"
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
+     CV_CheckEQ(num_inputs, 3, "");
+     Mat begins = getTensorContent(getConstBlob(layer, value_id, 1));
+     Mat sizes = getTensorContent(getConstBlob(layer, value_id, 2));
+     CV_Assert_N(!begins.empty(), !sizes.empty());
+     CV_CheckTypeEQ(begins.type(), CV_32SC1, "");
+     CV_CheckTypeEQ(sizes.type(), CV_32SC1, "");
+     if (begins.total() == 4 && getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
+     {
+         // Swap NHWC parameters' order to NCHW.
+         std::swap(*begins.ptr<int32_t>(0, 2), *begins.ptr<int32_t>(0, 3));
+         std::swap(*begins.ptr<int32_t>(0, 1), *begins.ptr<int32_t>(0, 2));
+         std::swap(*sizes.ptr<int32_t>(0, 2), *sizes.ptr<int32_t>(0, 3));
+         std::swap(*sizes.ptr<int32_t>(0, 1), *sizes.ptr<int32_t>(0, 2));
+     }
+     layerParams.set("begin", DictValue::arrayInt((int*)begins.data, begins.total()));
+     layerParams.set("size", DictValue::arrayInt((int*)sizes.data, sizes.total()));
  
-             StrIntVector next_layers = getNextLayers(net, name, "BiasAdd");  // FIXIT Use layers fusion instead
-             if (next_layers.empty())
-             {
-                 next_layers = getNextLayers(net, name, "Add");
-             }
-             if (next_layers.size() == 1) {
-                 layerParams.set("bias_term", true);
-                 layerParams.blobs.resize(2);
+     int id = dstNet.addLayer(name, "Slice", layerParams);
+     layer_id[name] = id;
  
-                 int weights_layer_index = next_layers[0].second;
-                 blobFromTensor(getConstBlob(net.node(weights_layer_index), value_id), layerParams.blobs[1]);
-                 ExcludeLayer(net, weights_layer_index, 0, false);
-                 layers_to_ignore.insert(next_layers[0].first);
+     connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+ }
  
-                 if (locPredTransposed)
-                 {
-                     const int numWeights = layerParams.blobs[1].total();
-                     float* biasData = reinterpret_cast<float*>(layerParams.blobs[1].data);
-                     CV_Assert(numWeights % 4 == 0);
-                     for (int i = 0; i < numWeights; i += 2)
-                     {
-                         std::swap(biasData[i], biasData[i + 1]);
-                     }
-                 }
-             }
+ void TFImporter::parseStridedSlice(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
+     CV_CheckEQ(num_inputs, 4, "");
+     Mat begins = getTensorContent(getConstBlob(layer, value_id, 1));
+     Mat ends = getTensorContent(getConstBlob(layer, value_id, 2));
+     Mat strides = getTensorContent(getConstBlob(layer, value_id, 3));
+     CV_CheckTypeEQ(begins.type(), CV_32SC1, "");
+     CV_CheckTypeEQ(ends.type(), CV_32SC1, "");
+     CV_CheckTypeEQ(strides.type(), CV_32SC1, "");
+     const int num = begins.total();
+     CV_Assert_N(num == ends.total(), num == strides.total());
+     int end_mask = getLayerAttr(layer, "end_mask").i();
+     for (int i = 0; i < num; ++i)
+     {
+         if (ends.at<int>(i) < 0)
+             ends.at<int>(i) -= 1;
+         if (end_mask & (1 << i))
+             ends.at<int>(i) = -1;
+         if (strides.at<int>(i) != 1)
+             CV_Error(Error::StsNotImplemented,
+                      format("StridedSlice with stride %d", strides.at<int>(i)));
+     }
+     if (begins.total() == 4 && getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
+     {
+         // Swap NHWC parameters' order to NCHW.
+         std::swap(begins.at<int>(2), begins.at<int>(3));
+         std::swap(begins.at<int>(1), begins.at<int>(2));
+         std::swap(ends.at<int>(2), ends.at<int>(3));
+         std::swap(ends.at<int>(1), ends.at<int>(2));
+     }
+     layerParams.set("begin", DictValue::arrayInt((int*)begins.data, begins.total()));
+     layerParams.set("end", DictValue::arrayInt((int*)ends.data, ends.total()));
  
-             int kernel_blob_index = -1;
-             const tensorflow::TensorProto& kernelTensor = getConstBlob(layer, value_id, -1, &kernel_blob_index);
-             const String kernelTensorName = layer.input(kernel_blob_index);
-             std::map<String, Mat>::iterator sharedWeightsIt = sharedWeights.find(kernelTensorName);
-             if (sharedWeightsIt == sharedWeights.end())
-             {
-                 blobFromTensor(kernelTensor, layerParams.blobs[0]);
-                 releaseTensor(const_cast<tensorflow::TensorProto*>(&kernelTensor));
-                 sharedWeights[kernelTensorName] = layerParams.blobs[0];
-             }
-             else
-             {
-                 layerParams.blobs[0] = sharedWeightsIt->second;
-             }
+     int id = dstNet.addLayer(name, "Slice", layerParams);
+     layer_id[name] = id;
  
-             if (kernel_blob_index == 1) { // In this case output is computed by x*W formula - W should be transposed
-                 Mat data = layerParams.blobs[0].t();
-                 layerParams.blobs[0] = data.clone();
-             }
+     connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+ }
  
-             layerParams.set("num_output", layerParams.blobs[0].size[0]);
-             if (locPredTransposed)
-             {
-                 CV_Assert(layerParams.blobs[0].dims == 2);
-                 for (int i = 0; i < layerParams.blobs[0].size[0]; i += 2)
-                 {
-                     cv::Mat src = layerParams.blobs[0].row(i);
-                     cv::Mat dst = layerParams.blobs[0].row(i + 1);
-                     std::swap_ranges(src.begin<float>(), src.end<float>(), dst.begin<float>());
-                 }
-             }
+ void TFImporter::parseMul(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const std::string& type = layer.op();
+     const int num_inputs = layer.input_size();
  
-             int id = dstNet.addLayer(name, "InnerProduct", layerParams);
-             layer_id[name] = id;
+     CV_CheckGT(num_inputs, 0, "");
+     int constId = -1;
+     for(int ii = 0; ii < num_inputs; ++ii)
+     {
+         Pin input = parsePin(layer.input(ii));
+         if (value_id.find(input.name) != value_id.end())
+         {
+             constId = ii;
+             break;
+         }
+     }
+     CV_Assert((constId != -1) || (num_inputs == 2));
  
-             // one input only
-             int input_blob_index = kernel_blob_index == 0 ? 1 : 0;
-             connect(layer_id, dstNet, parsePin(layer.input(input_blob_index)), id, 0);
-             data_layouts[name] = DATA_LAYOUT_PLANAR;
+     if (constId != -1)
+     {
+         // Multiplication by constant.
+         CV_CheckEQ(num_inputs, 2, "");
+         Mat scaleMat = getTensorContent(getConstBlob(layer, value_id));
+         CV_Assert(scaleMat.type() == CV_32FC1);
+         if (type == "RealDiv")
+         {
+             if (constId == 0)
+                 CV_Error(Error::StsNotImplemented, "Division of constant over variable");
+             scaleMat = 1.0f / scaleMat;
          }
-         else if (type == "Reshape")
+         int id;
+         if (scaleMat.total() == 1)  // is a scalar.
          {
-             CV_CheckGT(num_inputs, 0, "");
-             Pin inpId = parsePin(layer.input(0));
-             DataLayout inpLayout = getDataLayout(layer.input(0), data_layouts);
-             // There are two possible implementations: reshape an input using
-             // predefined sizes or use a second input blob as a source of new shape.
-             if (value_id.find(layer.input(1)) != value_id.end())
+             // Try to match with a LeakyRelu:
+             // node {
+             //   name: "LeakyRelu/mul"
+             //   op: "Mul"
+             //   input: "LeakyRelu/alpha"
+             //   input: "input"
+             // }
+             // node {
+             //   name: "LeakyRelu/Maximum"
+             //   op: "Maximum"
+             //   input: "LeakyRelu/mul"
+             //   input: "input"
+             // }
+             StrIntVector next_layers = getNextLayers(net, name, "Maximum");
+             if (!next_layers.empty())
              {
-                 Mat newShape = getTensorContent(getConstBlob(layer, value_id, 1));
-                 int newShapeSize = newShape.total();
-                 bool hasSwap = false;
-                 if (newShapeSize == 4 && hasAllOnes(newShape, 0, 2))
-                 {
-                     // NHWC->NCHW
-                     std::swap(*newShape.ptr<int32_t>(0, 2), *newShape.ptr<int32_t>(0, 3));
-                     std::swap(*newShape.ptr<int32_t>(0, 1), *newShape.ptr<int32_t>(0, 2));
-                     hasSwap = true;
-                 }
-                 if (inpLayout == DATA_LAYOUT_NHWC)
-                 {
-                     if (newShapeSize >= 2 || newShape.at<int>(1) == 1)
-                     {
-                         int order[] = {0, 2, 3, 1};  // From OpenCV's NCHW to NHWC.
-                         addPermuteLayer(order, name + "/nhwc", inpId);
-                         if (newShapeSize < 4)
-                         {
-                             inpLayout = DATA_LAYOUT_NCHW;
-                         }
-                         else
-                         {
-                             inpLayout = DATA_LAYOUT_NHWC;
-                         }
-                     }
-                 }
-                 layerParams.set("dim", DictValue::arrayInt<int*>(newShape.ptr<int>(), newShapeSize));
+                 int maximumLayerIdx = next_layers[0].second;
  
-                 int id = dstNet.addLayer(name, "Reshape", layerParams);
-                 layer_id[name] = id;
+                 CV_Assert(net.node(maximumLayerIdx).input_size() == 2);
  
-                 // one input only
-                 connect(layer_id, dstNet, inpId, id, 0);
-                 inpId = Pin(name);
+                 // The input from the Mul layer can also be at index 1.
+                 int mulInputIdx = (net.node(maximumLayerIdx).input(0) == name) ? 0 : 1;
  
-                 if ((inpLayout == DATA_LAYOUT_NHWC || inpLayout == DATA_LAYOUT_UNKNOWN || inpLayout == DATA_LAYOUT_PLANAR) &&
-                     newShapeSize == 4 && !hasSwap)
-                 {
-                     int order[] = {0, 3, 1, 2};  // Transform back to OpenCV's NCHW.
-                     addPermuteLayer(order, name + "/nchw", inpId);
-                     inpLayout = DATA_LAYOUT_NCHW;
-                 }
+                 ExcludeLayer(net, maximumLayerIdx, mulInputIdx, false);
+                 layers_to_ignore.insert(next_layers[0].first);
  
-                 data_layouts[name] = newShapeSize == 2 ? DATA_LAYOUT_PLANAR : inpLayout;
+                 layerParams.set("negative_slope", scaleMat.at<float>(0));
+                 id = dstNet.addLayer(name, "ReLU", layerParams);
              }
              else
              {
-                 int id = dstNet.addLayer(name, "Reshape", layerParams);
-                 layer_id[name] = id;
-                 connect(layer_id, dstNet, inpId, id, 0);
-                 connect(layer_id, dstNet, parsePin(layer.input(1)), id, 1);
-                 data_layouts[name] = inpLayout;
+                 // Just a multiplication.
+                 layerParams.set("scale", scaleMat.at<float>(0));
+                 id = dstNet.addLayer(name, "Power", layerParams);
              }
          }
-         else if (type == "Flatten" || type == "Squeeze")
+         else  // is a vector
          {
-             CV_CheckGT(num_inputs, 0, "");
-             Pin inpId = parsePin(layer.input(0));
-             int inpLayout = getDataLayout(layer.input(0), data_layouts);
-             if (type == "Squeeze")
-             {
-                 CV_Assert(hasLayerAttr(layer, "squeeze_dims"));
-                 const tensorflow::AttrValue& dims = getLayerAttr(layer, "squeeze_dims");
-                 std::vector<int> dimsVector(dims.list().i_size());
-                 for (int i = 0; i < dimsVector.size(); ++i)
-                     dimsVector[i] = dims.list().i(i);
-                 // Flatten layer can squeeze dimensions range into one.
-                 std::sort(dimsVector.begin(), dimsVector.end());
-                 for (int i = 1; i < dimsVector.size(); ++i)
-                 {
-                     if (dimsVector[i] != dimsVector[i - 1] + 1)
-                         CV_Error(Error::StsNotImplemented, "Unsupported squeeze configuration");
-                 }
-                 int start = dimsVector.front() - 1, end = dimsVector.back();
-                 if (start == -1 && end == 0)  // squeeze 0th dimension
-                 {
-                     start = 0;
-                     end = 1;
-                 }
-                 layerParams.set("axis", start);
-                 layerParams.set("end_axis", end);
-             }
-             if (inpLayout == DATA_LAYOUT_NHWC)
+             layerParams.blobs.resize(1, scaleMat);
+             StrIntVector next_layers = getNextLayers(net, name, "Add");
+             if (!next_layers.empty())
              {
-                 LayerParams permLP;
-                 int order[] = {0, 2, 3, 1};  // From OpenCV's NCHW to NHWC.
-                 permLP.set("order", DictValue::arrayInt<int*>(order, 4));
+                 layerParams.set("bias_term", true);
+                 layerParams.blobs.resize(2);
  
-                 std::string permName = name + "/nchw";
-                 CV_Assert(layer_id.find(permName) == layer_id.end());
-                 int permId = dstNet.addLayer(permName, "Permute", permLP);
-                 layer_id[permName] = permId;
-                 connect(layer_id, dstNet, inpId, permId, 0);
-                 inpId = Pin(permName);
+                 int weights_layer_index = next_layers[0].second;
+                 blobFromTensor(getConstBlob(net.node(weights_layer_index), value_id), layerParams.blobs.back());
+                 ExcludeLayer(net, weights_layer_index, 0, false);
+                 layers_to_ignore.insert(next_layers[0].first);
              }
-             int id = dstNet.addLayer(name, "Flatten", layerParams);
-             layer_id[name] = id;
-             connect(layer_id, dstNet, inpId, id, 0);
-             data_layouts[name] = DATA_LAYOUT_PLANAR;
+             if (hasLayerAttr(layer, "axis"))
+                 layerParams.set("axis", getLayerAttr(layer, "axis").i());
+             id = dstNet.addLayer(name, "Scale", layerParams);
          }
-         else if (type == "Transpose")
+         layer_id[name] = id;
+         Pin inp0 = parsePin(layer.input(0));
+         if (layer_id.find(inp0.name) != layer_id.end())
+             // First operand is a constant.
+             connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+         else
+             connect(layer_id, dstNet, parsePin(layer.input(1)), id, 0);
+     }
+     else
+     {
+         // Check if all the inputs have the same shape.
+         bool equalInpShapes = true;
+         bool isShapeOnes = false;
+         MatShape outShape0;
+         for (int ii = 0; ii < num_inputs && !netInputShapes.empty(); ii++)
          {
-             CV_CheckGT(num_inputs, 0, "");
-             Mat perm = getTensorContent(getConstBlob(layer, value_id, 1));
-             CV_Assert(perm.type() == CV_32SC1);
-             int* permData = (int*)perm.data;
-             if (perm.total() == 4)
+             Pin pin = parsePin(layer.input(ii));
+             int inpId = layer_id.find(pin.name)->second;
+             // Get input shape
+             MatShape outShape;
+             std::vector<MatShape> inpShapes, outShapes;
+             dstNet.getLayerShapes(netInputShapes, inpId, inpShapes, outShapes);
+             CV_CheckGT(static_cast<int>(outShapes.size()), pin.blobIndex, "");
+             outShape = outShapes[pin.blobIndex];
+             if (ii == 0)
              {
-                 // Only NHWC <-> NCHW permutations are allowed. OpenCV is always
-                 // keep NCHW layout this way.
-                 int inpLayout = getDataLayout(layer.input(0), data_layouts);
-                 std::string type = "Identity";
-                 if (inpLayout == DATA_LAYOUT_NHWC)
-                 {
-                     if (permData[0] == 0 && permData[1] == 3 && permData[2] == 1 && permData[3] == 2)
-                     {
-                         // in TensorFlow: NHWC->NCHW
-                         // in OpenCV: NCHW->NCHW
-                         data_layouts[name] = DATA_LAYOUT_NCHW;
-                     }
-                     else if (permData[0] == 0 && permData[1] == 1 && permData[2] == 2 && permData[3] == 3)
-                     {
-                         // in TensorFlow: NHWC->NHWC
-                         // in OpenCV: NCHW->NCHW
-                         data_layouts[name] = DATA_LAYOUT_NHWC;
-                     }
-                     else if (permData[0] == 0 && permData[1] == 3 && permData[2] == 2 && permData[3] == 1)
-                     {
-                         // in TensorFlow: NHWC->NCWH
-                         // in OpenCV: NCHW->NCWH
-                         int permData[] = {0, 1, 3, 2};
-                         layerParams.set("order", DictValue::arrayInt<int*>(permData, perm.total()));
-                         data_layouts[name] = DATA_LAYOUT_NCHW;  // we keep track NCHW because channels position only matters
-                         type = "Permute";
-                     }
-                     else
-                         CV_Error(Error::StsParseError, "Only NHWC <-> NCHW permutations are allowed.");
-                 }
-                 else if (inpLayout == DATA_LAYOUT_NCHW)
-                 {
-                     if (permData[0] == 0 && permData[1] == 2 && permData[2] == 3 && permData[3] == 1)
-                     {
-                         // in TensorFlow: NCHW->NHWC
-                         // in OpenCV: NCHW->NCHW
-                         data_layouts[name] = DATA_LAYOUT_NHWC;
-                     }
-                     else if (permData[0] == 0 && permData[1] == 1 && permData[2] == 2 && permData[3] == 3)
-                     {
-                         // in TensorFlow: NCHW->NCHW
-                         // in OpenCV: NCHW->NCHW
-                         data_layouts[name] = DATA_LAYOUT_NCHW;
-                     }
-                     else
-                         CV_Error(Error::StsParseError, "Only NHWC <-> NCHW permutations are allowed.");
-                 }
-                 int id = dstNet.addLayer(name, type, layerParams);
-                 layer_id[name] = id;
-                 connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+                 outShape0 = outShape;
              }
-             else
+             else if (outShape != outShape0)
              {
-                 layerParams.set("order", DictValue::arrayInt<int*>(permData, perm.total()));
-                 int id = dstNet.addLayer(name, "Permute", layerParams);
-                 layer_id[name] = id;
-                 // one input only
-                 connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
-                 data_layouts[name] = DATA_LAYOUT_UNKNOWN;
+                 equalInpShapes = false;
+                 isShapeOnes = isAllOnes(outShape, 2, outShape.size()) ||
+                               isAllOnes(outShape0, 2, outShape0.size());
+                 break;
              }
          }
-         else if (type == "Const")
+         int id;
+         if (equalInpShapes || netInputShapes.empty() || (!equalInpShapes && isShapeOnes))
          {
+             layerParams.set("operation", type == "RealDiv" ? "div" : "prod");
+             id = dstNet.addLayer(name, "Eltwise", layerParams);
          }
-         else if (type == "LRN")
+         else
          {
-             CV_CheckGT(num_inputs, 0, "");
-             if(hasLayerAttr(layer, "alpha")) {
-                 layerParams.set("alpha", getLayerAttr(layer, "alpha").f());
-             }
-             if(hasLayerAttr(layer, "beta")) {
-                 layerParams.set("beta", getLayerAttr(layer, "beta").f());
-             }
-             if(hasLayerAttr(layer, "depth_radius")) {
-                 int radius = (int)getLayerAttr(layer, "depth_radius").i();
-                 layerParams.set("local_size", 2*radius + 1);
-             }
-             if(hasLayerAttr(layer, "bias")) {
-                 layerParams.set("bias", getLayerAttr(layer, "bias").f());
-             }
-             layerParams.set("norm_by_size", false);
+             if (type == "RealDiv")
+                 CV_Error(Error::StsNotImplemented, "Division of non equal tensors");
+             id = dstNet.addLayer(name, "Scale", layerParams);
+         }
  
-             int id = dstNet.addLayer(name, "LRN", layerParams);
-             layer_id[name] = id;
+         layer_id[name] = id;
  
-             connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
-         }
-         else if (type == "Concat" || type == "ConcatV2")
+         for (int ii = 0; ii < num_inputs; ii++)
          {
-             CV_CheckGT(num_inputs, 0, "");
-             int axisId = (type == "Concat" ? 0 : num_inputs - 1);
-             int axis = getConstBlob(layer, value_id, axisId).int_val().Get(0);
+             Pin inp = parsePin(layer.input(ii));
+             if (layer_id.find(inp.name) == layer_id.end())
+                 CV_Error(Error::StsError, "Input layer not found: " + inp.name);
+             connect(layer_id, dstNet, inp, id, ii);
+         }
+     }
+ }
  
-             if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
-                 axis = toNCHW(axis);
-             else if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NDHWC)
-                 axis = toNCDHW(axis);
-             layerParams.set("axis", axis);
+ void TFImporter::parseFusedBatchNorm(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     // op: "FusedBatchNorm"
+     // input: "input"
+     // input: "BatchNorm/gamma"
+     // input: "BatchNorm/beta"
+     // input: "BatchNorm/moving_mean"
+     // input: "BatchNorm/moving_variance"
  
-             // input(0) or input(n-1) is concat_dim
-             int from = (type == "Concat" ? 1 : 0);
-             int to = (type == "Concat" ? num_inputs : num_inputs - 1);
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
  
-             for (int ii = from; ii < to; ii++)
-             {
-                 Pin inp = parsePin(layer.input(ii));
-                 if (layer_id.find(inp.name) == layer_id.end())
-                 {
-                     // There are constant inputs.
-                     LayerParams lp;
-                     lp.name = inp.name;
-                     lp.type = "Const";
-                     lp.blobs.resize(1);
-                     blobFromTensor(getConstBlob(layer, value_id, ii), lp.blobs.back());
-                     CV_Assert_N(!lp.blobs[0].empty(), lp.blobs[0].type() == CV_32F);
-                     int constInpId = dstNet.addLayer(lp.name, lp.type, lp);
-                     layer_id[lp.name] = constInpId;
-                 }
-             }
+     CV_CheckEQ(num_inputs, 5, "Expected gamma, beta, mean and std");
+     Pin inpId = parsePin(layer.input(0));
  
-             int id = dstNet.addLayer(name, "Concat", layerParams);
-             layer_id[name] = id;
+     bool isTraining = hasLayerAttr(layer, "is_training") && getLayerAttr(layer, "is_training").b();
  
-             for (int ii = from; ii < to; ii++)
-             {
-                 Pin inp = parsePin(layer.input(ii));
-                 if (layer_id.find(inp.name) == layer_id.end())
-                     CV_Error(Error::StsError, "Input layer not found: " + inp.name);
-                 connect(layer_id, dstNet, inp, id, ii - from);
-             }
-         }
-         else if (type == "MaxPool" || type == "MaxPool3D")
-         {
-             CV_CheckGT(num_inputs, 0, "");
-             layerParams.set("pool", "max");
+     layerParams.blobs.resize(2);
  
-             setKSize(layerParams, layer);
-             setStrides(layerParams, layer);
-             setPadding(layerParams, layer);
-             // Test_TensorFlow_nets.EAST_text_detection/1, NGRAPH/CPU
-             layerParams.set("ceil_mode", false);
+     const tensorflow::TensorProto& gammaTensor = getConstBlob(layer, value_id, 1);
+     if (!gammaTensor.tensor_content().empty())
+     {
+         layerParams.blobs.resize(layerParams.blobs.size() + 1);
+         layerParams.set("has_weight", true);
+         blobFromTensor(gammaTensor, layerParams.blobs.back());
+     }
+     else
+         layerParams.set("has_weight", false);
  
-             int id = dstNet.addLayer(name, "Pooling", layerParams);
-             layer_id[name] = id;
+     const tensorflow::TensorProto& betaTensor = getConstBlob(layer, value_id, 2);
+     if (!betaTensor.tensor_content().empty())
+     {
+         layerParams.blobs.resize(layerParams.blobs.size() + 1);
+         layerParams.set("has_bias", true);
+         blobFromTensor(betaTensor, layerParams.blobs.back());
+     }
+     else
+         layerParams.set("has_bias", false);
  
-             connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
-         }
-         else if (type == "AvgPool" || type == "AvgPool3D")
-         {
-             CV_CheckGT(num_inputs, 0, "");
-             layerParams.set("pool", "ave");
-             layerParams.set("ave_pool_padded_area", false);
-             setKSize(layerParams, layer);
-             setStrides(layerParams, layer);
-             setPadding(layerParams, layer);
+     Mat mean, std;
+     if (isTraining)
+     {
+         if (layerParams.blobs.size() == 2)
+             CV_Error(Error::StsNotImplemented, "Cannot determine number "
+                                                "of parameters for batch normalization layer.");
+         mean = Mat::zeros(1, layerParams.blobs[2].total(), CV_32F);
+         std = Mat::ones(1, layerParams.blobs[2].total(), CV_32F);
+         // Add an extra layer: Mean-Variance normalization
+         LayerParams mvnParams;
+         std::string mvnName = name + "/MVN";
+         CV_Assert(layer_id.find(mvnName) == layer_id.end());
+         int mvnId = dstNet.addLayer(mvnName, "MVN", mvnParams);
+         layer_id[mvnName] = mvnId;
+         connect(layer_id, dstNet, inpId, mvnId, 0);
+         inpId = Pin(mvnName);
+     }
+     else
+     {
+         blobFromTensor(getConstBlob(layer, value_id, 3), mean);
+         blobFromTensor(getConstBlob(layer, value_id, 4), std);
+     }
+     layerParams.blobs[0] = mean;
+     layerParams.blobs[1] = std;
  
-             int id = dstNet.addLayer(name, "Pooling", layerParams);
-             layer_id[name] = id;
+     if (hasLayerAttr(layer, "epsilon"))
+         layerParams.set("eps", getLayerAttr(layer, "epsilon").f());
  
-             connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
-         }
-         else if (type == "MaxPoolGrad")
-         {
-             CV_CheckEQ(num_inputs, 3, "");
+     int id = dstNet.addLayer(name, "BatchNorm", layerParams);
+     layer_id[name] = id;
  
-             layerParams.set("pool_k_h", 0);
-             layerParams.set("pool_k_w", 0);
-             layerParams.set("pool_stride_h", 0);
-             layerParams.set("pool_stride_w", 0);
-             layerParams.set("pool_pad_h", 0);
-             layerParams.set("pool_pad_w", 0);
+     // one input only
+     connect(layer_id, dstNet, inpId, id, 0);
+ }
  
-             int id = dstNet.addLayer(name, "MaxUnpool", layerParams);
-             layer_id[name] = id;
+ void TFImporter::parseConv2DBackpropInput(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     // op: "Conv2DBackpropInput"
+     // input: "conv2d_transpose/output_shape"
+     // input: "weights"
+     // input: "input"
  
-             connect(layer_id, dstNet, parsePin(layer.input(2)), id, 0);
-             connect(layer_id, dstNet, parsePin(layer.input(1) + ":1"), id, 1);
-             connect(layer_id, dstNet, parsePin(layer.input(0)), id, 2);
-         }
-         else if (type == "Placeholder")
-         {
-             if (!hasLayerAttr(layer, "dtype") ||
-                 getLayerAttr(layer, "dtype").type() != tensorflow::DT_BOOL)  // If input is not a train/test flag.
-             {
-                 netInputsNames.push_back(name);
-                 layer_id[name] = 0;
-             }
-             tensorflow::TensorShapeProto shape;
-             if (hasLayerAttr(layer, "shape"))
-                 shape = getLayerAttr(layer, "shape").shape();
-             else if (hasLayerAttr(layer, "_output_shapes"))
-             {
-                 tensorflow::AttrValue_ListValue list = getLayerAttr(layer, "_output_shapes").list();
-                 if (list.shape_size())
-                     shape = list.shape()[0];
-             }
-             if (shape.dim_size())
-             {
-                 MatShape dims(shape.dim_size());
-                 for (int i = 0; i < dims.size(); ++i)
-                     dims[i] = shape.dim(i).size();
-                 if (dims.size() == 4 && predictedLayout == DATA_LAYOUT_NHWC)
-                 {
-                     std::swap(dims[1], dims[3]);  // NHWC->NCWH
-                     std::swap(dims[2], dims[3]);  // NCWH->NCHW
-                     if (dims[0] == -1)  // It's OK to have undetermined batch size
-                         dims[0] = 1;
-                 }
-                 bool hasNeg = false;
-                 for (int i = 0; i < dims.size() && !hasNeg; ++i)
-                 {
-                     hasNeg = dims[i] < 0;
-                 }
-                 if (!hasNeg)
-                     netInputShapes.push_back(dims);
-             }
-         }
-         else if (type == "Split") {
-             // TODO: determining axis index remapping by input dimensions order of input blob
-             // TODO: slicing input may be Const op
-             // TODO: slicing kernels for convolutions - in current implementation it is impossible
-             // TODO: add parsing num of slices parameter
-             CV_CheckEQ(num_inputs, 2, "");
-             // num_split
-             // 1st blob is dims tensor
-             int axis = getConstBlob(layer, value_id, 0).int_val().Get(0);
-             if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
-                 axis = toNCHW(axis);
-             layerParams.set("axis", axis);
-             if (hasLayerAttr(layer, "num_split"))
-                 layerParams.set("num_split", getLayerAttr(layer, "num_split").i());
-             int id = dstNet.addLayer(name, "Slice", layerParams);
-             layer_id[name] = id;
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
  
-             // one input only
-             connect(layer_id, dstNet, parsePin(layer.input(1)), id, 0);
-         }
-         else if (type == "Slice")
-         {
-             // op: "Slice"
-             // input: "input_node"
-             // input: "Slice/begin"
-             // input: "Slice/size"
-             CV_CheckEQ(num_inputs, 3, "");
-             Mat begins = getTensorContent(getConstBlob(layer, value_id, 1));
-             Mat sizes = getTensorContent(getConstBlob(layer, value_id, 2));
-             CV_Assert_N(!begins.empty(), !sizes.empty());
-             CV_CheckTypeEQ(begins.type(), CV_32SC1, "");
-             CV_CheckTypeEQ(sizes.type(), CV_32SC1, "");
-             if (begins.total() == 4 && getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
-             {
-                 // Swap NHWC parameters' order to NCHW.
-                 std::swap(*begins.ptr<int32_t>(0, 2), *begins.ptr<int32_t>(0, 3));
-                 std::swap(*begins.ptr<int32_t>(0, 1), *begins.ptr<int32_t>(0, 2));
-                 std::swap(*sizes.ptr<int32_t>(0, 2), *sizes.ptr<int32_t>(0, 3));
-                 std::swap(*sizes.ptr<int32_t>(0, 1), *sizes.ptr<int32_t>(0, 2));
-             }
-             layerParams.set("begin", DictValue::arrayInt((int*)begins.data, begins.total()));
-             layerParams.set("size", DictValue::arrayInt((int*)sizes.data, sizes.total()));
+     CV_CheckEQ(num_inputs, 3, "Expected output shape, weights and input nodes");
  
-             int id = dstNet.addLayer(name, "Slice", layerParams);
-             layer_id[name] = id;
+     layerParams.set("bias_term", false);
+     layerParams.blobs.resize(1);
  
-             connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
-         }
-         else if (type == "StridedSlice")
+     StrIntVector next_layers = getNextLayers(net, name, "BiasAdd");
+     if (next_layers.size() == 1)
+     {
+         layerParams.set("bias_term", true);
+         layerParams.blobs.resize(2);
+         int weights_layer_index = next_layers[0].second;
+         blobFromTensor(getConstBlob(net.node(weights_layer_index), value_id), layerParams.blobs[1]);
+         ExcludeLayer(net, weights_layer_index, 0, false);
+         layers_to_ignore.insert(next_layers[0].first);
+     }
+     kernelFromTensor(getConstBlob(layer, value_id, 1), layerParams.blobs[0]);
+     const int* kshape = layerParams.blobs[0].size.p;
+     const int kernelH = kshape[2];
+     const int kernelW = kshape[3];
+     layerParams.set("kernel_h", kernelH);
+     layerParams.set("kernel_w", kernelW);
+     layerParams.set("num_output", kshape[1]);
+     setStrides(layerParams, layer);
+     setPadding(layerParams, layer);
+     // For convolution layer, output shape computes as
+     // o = 1 + (i - k + 2*p) / s
+     // i - input size, o - output size, k - kernel size, p - pad, s - stride
+     // In TensorFlow, p == 0 is padMode == 'VALID' or p == (k - 1) / 2
+     // considering that k is odd.
+     // SAME:  o = 1 + (i - 1) / s
+     // VALID: o = 1 + i / s
+     // Deconvolution's layer output shape computes as
+     // SAME:  o = 1 + (i - 1)*s
+     // VALID: o = (i - 1)*s
+     // If output_shape differs from formulas above then adjust padding is applied.
+     const int strideY = layerParams.get<int>("stride_h");
+     const int strideX = layerParams.get<int>("stride_w");
+     Mat outShape = getTensorContent(getConstBlob(layer, value_id, 0));
+     const int outH = outShape.at<int>(1);
+     const int outW = outShape.at<int>(2);
+     if (layerParams.get<String>("pad_mode") == "SAME")
+     {
+         layerParams.set("adj_w", (outW - 1) % strideX);
+         layerParams.set("adj_h", (outH - 1) % strideY);
+     }
+     else if (layerParams.get<String>("pad_mode") == "VALID")
+     {
+         layerParams.set("adj_w", (outW - kernelW) % strideX);
+         layerParams.set("adj_h", (outH - kernelH) % strideY);
+     }
+     int id = dstNet.addLayer(name, "Deconvolution", layerParams);
+     layer_id[name] = id;
+     // one input only
+     connect(layer_id, dstNet, parsePin(layer.input(2)), id, 0);
+ }
+ void TFImporter::parseBlockLSTM(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     // op: "BlockLSTM"
+     // input: "lstm_block_wrapper/ToInt64/x"  (ignore, number of time stamps)
+     // input: "input"
+     // input: "lstm_block_wrapper/zeros"      (ignore)
+     // input: "lstm_block_wrapper/zeros"      (ignore)
+     // input: "lstm_block_wrapper/kernel"
+     // input: "lstm_block_wrapper/w_i_diag"
+     // input: "lstm_block_wrapper/w_f_diag"
+     // input: "lstm_block_wrapper/w_o_diag"
+     // input: "lstm_block_wrapper/bias"
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
+     CV_CheckEQ(num_inputs, 9, "Unexpected number of input nodes");
+     if (hasLayerAttr(layer, "forget_bias"))
+         layerParams.set("forget_bias", getLayerAttr(layer, "forget_bias").f());
+     if (hasLayerAttr(layer, "forget_bias"))
+     {
+         float cellClip = getLayerAttr(layer, "cell_clip").f();
+         // Cell clip disabled if it's negative.
+         if (cellClip >= 0)
          {
-             CV_CheckEQ(num_inputs, 4, "");
-             Mat begins = getTensorContent(getConstBlob(layer, value_id, 1));
-             Mat ends = getTensorContent(getConstBlob(layer, value_id, 2));
-             Mat strides = getTensorContent(getConstBlob(layer, value_id, 3));
-             CV_CheckTypeEQ(begins.type(), CV_32SC1, "");
-             CV_CheckTypeEQ(ends.type(), CV_32SC1, "");
-             CV_CheckTypeEQ(strides.type(), CV_32SC1, "");
-             const int num = begins.total();
-             CV_Assert_N(num == ends.total(), num == strides.total());
-             int end_mask = getLayerAttr(layer, "end_mask").i();
-             for (int i = 0; i < num; ++i)
-             {
-                 if (ends.at<int>(i) < 0)
-                     ends.at<int>(i) -= 1;
-                 if (end_mask & (1 << i))
-                     ends.at<int>(i) = -1;
-                 if (strides.at<int>(i) != 1)
-                     CV_Error(Error::StsNotImplemented,
-                              format("StridedSlice with stride %d", strides.at<int>(i)));
-             }
-             if (begins.total() == 4 && getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
-             {
-                 // Swap NHWC parameters' order to NCHW.
-                 std::swap(begins.at<int>(2), begins.at<int>(3));
-                 std::swap(begins.at<int>(1), begins.at<int>(2));
-                 std::swap(ends.at<int>(2), ends.at<int>(3));
-                 std::swap(ends.at<int>(1), ends.at<int>(2));
-             }
-             layerParams.set("begin", DictValue::arrayInt((int*)begins.data, begins.total()));
-             layerParams.set("end", DictValue::arrayInt((int*)ends.data, ends.total()));
+             layerParams.set("use_cell_clip", true);
+             layerParams.set("cell_clip", cellClip);
+         }
+     }
  
-             int id = dstNet.addLayer(name, "Slice", layerParams);
-             layer_id[name] = id;
+     Mat W, Wh, Wx, b;
+     blobFromTensor(getConstBlob(layer, value_id, 4), W);
+     blobFromTensor(getConstBlob(layer, value_id, 8), b);
+     const int outSize = W.cols / 4;
  
-             connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+     // IGFO->IFOG
+     float* weightData = (float*)W.data;
+     for (int i = 0; i < W.rows; ++i)
+         for (int j = 0; j < outSize; ++j)
+         {
+             std::swap(weightData[i * W.cols + 1 * outSize + j],
+                       weightData[i * W.cols + 2 * outSize + j]);
+             std::swap(weightData[i * W.cols + 2 * outSize + j],
+                       weightData[i * W.cols + 3 * outSize + j]);
          }
-         else if (type == "Mul" || type == "RealDiv")
+     Wx = W.rowRange(0, W.rows - outSize).t();
+     Wh = W.rowRange(W.rows - outSize, W.rows).t();
+     layerParams.blobs.resize(3);
+     layerParams.blobs[0] = Wh;
+     layerParams.blobs[1] = Wx;
+     layerParams.blobs[2] = b;
+     if (hasLayerAttr(layer, "use_peephole"))
+     {
+         bool usePeephole = getLayerAttr(layer, "use_peephole").b();
+         if (usePeephole)
          {
-             CV_CheckGT(num_inputs, 0, "");
-             int constId = -1;
-             for(int ii = 0; ii < num_inputs; ++ii)
+             layerParams.set("use_peephole", true);
+             layerParams.blobs.resize(6);
+             for (int i = 0; i < 3; ++i)
              {
-                 Pin input = parsePin(layer.input(ii));
-                 if (value_id.find(input.name) != value_id.end())
-                 {
-                     constId = ii;
-                     break;
-                 }
+                 Mat w;
+                 blobFromTensor(getConstBlob(layer, value_id, 5 + i), w);
+                 w = w.reshape(1, w.total());  // Single column.
+                 w = Mat::diag(w);  // Make a diagonal matrix.
+                 layerParams.blobs[3 + i] = w;
              }
-             CV_Assert((constId != -1) || (num_inputs == 2));
+         }
+     }
  
-             if (constId != -1)
-             {
-                 // Multiplication by constant.
-                 CV_CheckEQ(num_inputs, 2, "");
-                 Mat scaleMat = getTensorContent(getConstBlob(layer, value_id));
-                 CV_Assert(scaleMat.type() == CV_32FC1);
-                 if (type == "RealDiv")
-                 {
-                     if (constId == 0)
-                         CV_Error(Error::StsNotImplemented, "Division of constant over variable");
-                     scaleMat = 1.0f / scaleMat;
-                 }
+     int id = dstNet.addLayer(name, "LSTM", layerParams);
+     layer_id[name] = id;
  
-                 int id;
-                 if (scaleMat.total() == 1)  // is a scalar.
-                 {
-                     // Try to match with a LeakyRelu:
-                     // node {
-                     //   name: "LeakyRelu/mul"
-                     //   op: "Mul"
-                     //   input: "LeakyRelu/alpha"
-                     //   input: "input"
-                     // }
-                     // node {
-                     //   name: "LeakyRelu/Maximum"
-                     //   op: "Maximum"
-                     //   input: "LeakyRelu/mul"
-                     //   input: "input"
-                     // }
-                     StrIntVector next_layers = getNextLayers(net, name, "Maximum");
-                     if (!next_layers.empty())
-                     {
-                         int maximumLayerIdx = next_layers[0].second;
+     // one input only
+     connect(layer_id, dstNet, parsePin(layer.input(1)), id, 0);
+     data_layouts[name] = DATA_LAYOUT_UNKNOWN;
+ }
  
-                         CV_Assert(net.node(maximumLayerIdx).input_size() == 2);
+ void TFImporter::parseResize(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer_, LayerParams& layerParams)
+ {
+     tensorflow::NodeDef layer = layer_;
+     std::string name = layer.name();
+     const std::string& type = layer.op();
+     int num_inputs = layer.input_size();
  
-                         // The input from the Mul layer can also be at index 1.
-                         int mulInputIdx = (net.node(maximumLayerIdx).input(0) == name) ? 0 : 1;
+     CV_CheckGT(num_inputs, 0, "");
+     std::string convWeights = "";
+     if (type == "FusedResizeAndPadConv2D")
+     {
+         // input: "mul_1"
+         // input: "decoder/ResizeBilinear/size"
+         // input: "decoder/decoder_conv0/Conv2D_dummy_paddings"
+         // input: "decoder/decoder_conv0/weights"
+         CV_CheckEQ(num_inputs, 4, "Number of input for FusedResizeAndPadConv2D");
  
-                         ExcludeLayer(net, maximumLayerIdx, mulInputIdx, false);
-                         layers_to_ignore.insert(next_layers[0].first);
+         Mat paddings = getTensorContent(getConstBlob(layer, value_id, 2));
+         CV_CheckEQ(countNonZero(paddings), 0, "Unsupported mode");
  
-                         layerParams.set("negative_slope", scaleMat.at<float>(0));
-                         id = dstNet.addLayer(name, "ReLU", layerParams);
-                     }
-                     else
-                     {
-                         // Just a multiplication.
-                         layerParams.set("scale", scaleMat.at<float>(0));
-                         id = dstNet.addLayer(name, "Power", layerParams);
-                     }
-                 }
-                 else  // is a vector
-                 {
-                     layerParams.blobs.resize(1, scaleMat);
-                    StrIntVector next_layers = getNextLayers(net, name, "Add");
-                    if (!next_layers.empty())
-                    {
-                        layerParams.set("bias_term", true);
-                        layerParams.blobs.resize(2);
-                        int weights_layer_index = next_layers[0].second;
-                        blobFromTensor(getConstBlob(net.node(weights_layer_index), value_id), layerParams.blobs.back());
-                        ExcludeLayer(net, weights_layer_index, 0, false);
-                        layers_to_ignore.insert(next_layers[0].first);
-                    }
+         convWeights = layer.input(3);
+         layer.mutable_input()->DeleteSubrange(2, 2);  // FIXIT do NOT modify input model
+         num_inputs = layer.input_size();
+         name = name + "/resize";
  
-                     if (hasLayerAttr(layer, "axis"))
-                         layerParams.set("axis", getLayerAttr(layer, "axis").i());
+         if (hasLayerAttr(layer, "resize_align_corners"))
+         {
+             // FIXIT do NOT modify input model
+             layer.mutable_attr()->insert(
+                     ::google::protobuf::MapPair<std::string, tensorflow::AttrValue>("align_corners",
+                                                                                     getLayerAttr(layer, "resize_align_corners")));
+         }
+     }
+     if (num_inputs == 2)
+     {
+         Mat outSize = getTensorContent(getConstBlob(layer, value_id, 1));
+         CV_CheckTypeEQ(outSize.type(), CV_32SC1, ""); CV_CheckEQ(outSize.total(), (size_t)2, "");
+         layerParams.set("height", outSize.at<int>(0, 0));
+         layerParams.set("width", outSize.at<int>(0, 1));
+     }
+     else if (num_inputs == 3)
+     {
+         Mat factorHeight = getTensorContent(getConstBlob(layer, value_id, 1));
+         Mat factorWidth = getTensorContent(getConstBlob(layer, value_id, 2));
+         factorHeight.convertTo(factorHeight, CV_32F);
+         factorWidth.convertTo(factorWidth, CV_32F);
+         layerParams.set("zoom_factor_x", factorWidth.at<float>(0));
+         layerParams.set("zoom_factor_y", factorHeight.at<float>(0));
+     }
+     else
+         CV_Check(num_inputs, num_inputs == 2 || num_inputs == 3, "");
  
-                     id = dstNet.addLayer(name, "Scale", layerParams);
-                 }
-                 layer_id[name] = id;
+     if (type == "ResizeNearestNeighbor")
+         layerParams.set("interpolation", "nearest");
+     else
+         layerParams.set("interpolation", "bilinear");
  
-                 Pin inp0 = parsePin(layer.input(0));
-                 if (layer_id.find(inp0.name) != layer_id.end())
-                     // First operand is a constant.
-                     connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
-                 else
-                     connect(layer_id, dstNet, parsePin(layer.input(1)), id, 0);
-             }
-             else
-             {
-                 // Check if all the inputs have the same shape.
-                 bool equalInpShapes = true;
-                 bool isShapeOnes = false;
-                 MatShape outShape0;
-                 for (int ii = 0; ii < num_inputs && !netInputShapes.empty(); ii++)
-                 {
-                     Pin pin = parsePin(layer.input(ii));
-                     int inpId = layer_id.find(pin.name)->second;
+     if (hasLayerAttr(layer, "align_corners"))
+         layerParams.set("align_corners", getLayerAttr(layer, "align_corners").b());
  
-                     // Get input shape
-                     MatShape outShape;
-                     std::vector<MatShape> inpShapes, outShapes;
-                     dstNet.getLayerShapes(netInputShapes, inpId, inpShapes, outShapes);
-                     CV_CheckGT(static_cast<int>(outShapes.size()), pin.blobIndex, "");
-                     outShape = outShapes[pin.blobIndex];
+     if (hasLayerAttr(layer, "half_pixel_centers"))
+         layerParams.set("half_pixel_centers", getLayerAttr(layer, "half_pixel_centers").b());
  
-                     if (ii == 0)
-                     {
-                         outShape0 = outShape;
-                     }
-                     else if (outShape != outShape0)
-                     {
-                         equalInpShapes = false;
-                         isShapeOnes = isAllOnes(outShape, 2, outShape.size()) ||
-                                       isAllOnes(outShape0, 2, outShape0.size());
-                         break;
-                     }
-                 }
+     int id = dstNet.addLayer(name, "Resize", layerParams);
+     layer_id[name] = id;
  
-                 int id;
-                 if (equalInpShapes || netInputShapes.empty() || (!equalInpShapes && isShapeOnes))
-                 {
-                     layerParams.set("operation", type == "RealDiv" ? "div" : "prod");
-                     id = dstNet.addLayer(name, "Eltwise", layerParams);
-                 }
-                 else
-                 {
-                     if (type == "RealDiv")
-                         CV_Error(Error::StsNotImplemented, "Division of non equal tensors");
-                     id = dstNet.addLayer(name, "Scale", layerParams);
-                 }
+     connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
  
-                 layer_id[name] = id;
+     // Step back to add convolution
+     if (type == "FusedResizeAndPadConv2D")
+     {
+         tensorflow::NodeDef conv = layer_;
+         conv.clear_input();
+         conv.add_input(name);
+         conv.add_input(convWeights);
+         conv.set_op("Conv2D");
+         parseNode(conv);
+     }
+ }
  
-                 for (int ii = 0; ii < num_inputs; ii++)
-                 {
-                     Pin inp = parsePin(layer.input(ii));
-                     if (layer_id.find(inp.name) == layer_id.end())
-                         CV_Error(Error::StsError, "Input layer not found: " + inp.name);
-                     connect(layer_id, dstNet, inp, id, ii);
-                 }
-             }
-         }
-         else if (type == "FusedBatchNorm" || type == "FusedBatchNormV3")
-         {
-             // op: "FusedBatchNorm"
-             // input: "input"
-             // input: "BatchNorm/gamma"
-             // input: "BatchNorm/beta"
-             // input: "BatchNorm/moving_mean"
-             // input: "BatchNorm/moving_variance"
-             CV_CheckEQ(num_inputs, 5, "Expected gamma, beta, mean and std");
-             Pin inpId = parsePin(layer.input(0));
+ void TFImporter::parseL2Normalize(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     // op: "L2Normalize"
+     // input: "input"
+     // input: "reduction_indices" (axis)
  
-             bool isTraining = hasLayerAttr(layer, "is_training") && getLayerAttr(layer, "is_training").b();
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
  
-             layerParams.blobs.resize(2);
+     CV_CheckEQ(num_inputs, 2, "");
+     Mat reductionIndices = getTensorContent(getConstBlob(layer, value_id, 1));
+     CV_Assert(reductionIndices.type() == CV_32SC1);
  
-             const tensorflow::TensorProto& gammaTensor = getConstBlob(layer, value_id, 1);
-             if (!gammaTensor.tensor_content().empty())
-             {
-                 layerParams.blobs.resize(layerParams.blobs.size() + 1);
-                 layerParams.set("has_weight", true);
-                 blobFromTensor(gammaTensor, layerParams.blobs.back());
-             }
-             else
-                 layerParams.set("has_weight", false);
-             const tensorflow::TensorProto& betaTensor = getConstBlob(layer, value_id, 2);
-             if (!betaTensor.tensor_content().empty())
-             {
-                 layerParams.blobs.resize(layerParams.blobs.size() + 1);
-                 layerParams.set("has_bias", true);
-                 blobFromTensor(betaTensor, layerParams.blobs.back());
-             }
-             else
-                 layerParams.set("has_bias", false);
-             Mat mean, std;
-             if (isTraining)
-             {
-                 if (layerParams.blobs.size() == 2)
-                     CV_Error(Error::StsNotImplemented, "Cannot determine number "
-                              "of parameters for batch normalization layer.");
-                 mean = Mat::zeros(1, layerParams.blobs[2].total(), CV_32F);
-                 std = Mat::ones(1, layerParams.blobs[2].total(), CV_32F);
-                 // Add an extra layer: Mean-Variance normalization
-                 LayerParams mvnParams;
-                 std::string mvnName = name + "/MVN";
-                 CV_Assert(layer_id.find(mvnName) == layer_id.end());
-                 int mvnId = dstNet.addLayer(mvnName, "MVN", mvnParams);
-                 layer_id[mvnName] = mvnId;
-                 connect(layer_id, dstNet, inpId, mvnId, 0);
-                 inpId = Pin(mvnName);
-             }
-             else
-             {
-                 blobFromTensor(getConstBlob(layer, value_id, 3), mean);
-                 blobFromTensor(getConstBlob(layer, value_id, 4), std);
-             }
-             layerParams.blobs[0] = mean;
-             layerParams.blobs[1] = std;
+     const int numAxes = reductionIndices.total();
+     if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
+         for (int i = 0; i < numAxes; ++i)
+             reductionIndices.at<int>(i) = toNCHW(reductionIndices.at<int>(i));
  
-             if (hasLayerAttr(layer, "epsilon"))
-                 layerParams.set("eps", getLayerAttr(layer, "epsilon").f());
+     cv::sort(reductionIndices, reductionIndices, SORT_ASCENDING);
+     for (int i = 1; i < numAxes; ++i)
+     {
+         CV_Assert(reductionIndices.at<int>(i) == reductionIndices.at<int>(i - 1) + 1);
+         // Axes have the same sign.
+         CV_Assert(reductionIndices.at<int>(i) * reductionIndices.at<int>(i - 1) >= 0);
+     }
+     layerParams.set("start_axis", reductionIndices.at<int>(0));
+     layerParams.set("end_axis", reductionIndices.at<int>(numAxes - 1));
  
-             int id = dstNet.addLayer(name, "BatchNorm", layerParams);
-             layer_id[name] = id;
+     int id = dstNet.addLayer(name, "Normalize", layerParams);
+     layer_id[name] = id;
+     connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+ }
  
-             // one input only
-             connect(layer_id, dstNet, inpId, id, 0);
-         }
-         else if (type == "Conv2DBackpropInput")
+ void TFImporter::parsePriorBox(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
+     CV_CheckEQ(num_inputs, 2, "");
+     if (hasLayerAttr(layer, "min_size"))
+         layerParams.set("min_size", getLayerAttr(layer, "min_size").i());
+     if (hasLayerAttr(layer, "max_size"))
+         layerParams.set("max_size", getLayerAttr(layer, "max_size").i());
+     if (hasLayerAttr(layer, "flip"))
+         layerParams.set("flip", getLayerAttr(layer, "flip").b());
+     if (hasLayerAttr(layer, "clip"))
+         layerParams.set("clip", getLayerAttr(layer, "clip").b());
+     if (hasLayerAttr(layer, "offset"))
+         layerParams.set("offset", getLayerAttr(layer, "offset").f());
+     if (hasLayerAttr(layer, "step"))
+         layerParams.set("step", getLayerAttr(layer, "step").f());
+     const std::string paramNames[] = {"variance", "aspect_ratio", "scales",
+                                       "width", "height"};
+     for (int i = 0; i < 5; ++i)
+     {
+         if (hasLayerAttr(layer, paramNames[i]))
          {
-             // op: "Conv2DBackpropInput"
-             // input: "conv2d_transpose/output_shape"
-             // input: "weights"
-             // input: "input"
-             CV_CheckEQ(num_inputs, 3, "Expected output shape, weights and input nodes");
+             Mat values = getTensorContent(getLayerAttr(layer, paramNames[i]).tensor());
+             layerParams.set(paramNames[i],
+                             DictValue::arrayReal<float*>((float*)values.data, values.total()));
+         }
+     }
+     int id = dstNet.addLayer(name, "PriorBox", layerParams);
+     layer_id[name] = id;
+     connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+     connect(layer_id, dstNet, parsePin(layer.input(1)), id, 1);
+     data_layouts[name] = DATA_LAYOUT_UNKNOWN;
+ }
  
-             layerParams.set("bias_term", false);
-             layerParams.blobs.resize(1);
+ void TFImporter::parseSoftmax(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
  
-             StrIntVector next_layers = getNextLayers(net, name, "BiasAdd");
-             if (next_layers.size() == 1)
-             {
-                 layerParams.set("bias_term", true);
-                 layerParams.blobs.resize(2);
+     CV_CheckGT(num_inputs, 0, "");
+     if (hasLayerAttr(layer, "axis"))
+         layerParams.set("axis", getLayerAttr(layer, "axis").i());
  
-                 int weights_layer_index = next_layers[0].second;
+     int id = dstNet.addLayer(name, "Softmax", layerParams);
+     layer_id[name] = id;
+     connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
+ }
  
-                 blobFromTensor(getConstBlob(net.node(weights_layer_index), value_id), layerParams.blobs[1]);
-                 ExcludeLayer(net, weights_layer_index, 0, false);
-                 layers_to_ignore.insert(next_layers[0].first);
-             }
+ void TFImporter::parseCropAndResize(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     // op: "CropAndResize"
+     // input: "input"
+     // input: "boxes"
+     // input: "sizes"
  
-             kernelFromTensor(getConstBlob(layer, value_id, 1), layerParams.blobs[0]);
-             const int* kshape = layerParams.blobs[0].size.p;
-             const int kernelH = kshape[2];
-             const int kernelW = kshape[3];
-             layerParams.set("kernel_h", kernelH);
-             layerParams.set("kernel_w", kernelW);
-             layerParams.set("num_output", kshape[1]);
-             setStrides(layerParams, layer);
-             setPadding(layerParams, layer);
-             // For convolution layer, output shape computes as
-             // o = 1 + (i - k + 2*p) / s
-             // i - input size, o - output size, k - kernel size, p - pad, s - stride
-             // In TensorFlow, p == 0 is padMode == 'VALID' or p == (k - 1) / 2
-             // considering that k is odd.
-             // SAME:  o = 1 + (i - 1) / s
-             // VALID: o = 1 + i / s
-             // Deconvolution's layer output shape computes as
-             // SAME:  o = 1 + (i - 1)*s
-             // VALID: o = (i - 1)*s
-             // If output_shape differs from formulas above then adjust padding is applied.
-             const int strideY = layerParams.get<int>("stride_h");
-             const int strideX = layerParams.get<int>("stride_w");
-             Mat outShape = getTensorContent(getConstBlob(layer, value_id, 0));
-             const int outH = outShape.at<int>(1);
-             const int outW = outShape.at<int>(2);
-             if (layerParams.get<String>("pad_mode") == "SAME")
-             {
-                 layerParams.set("adj_w", (outW - 1) % strideX);
-                 layerParams.set("adj_h", (outH - 1) % strideY);
-             }
-             else if (layerParams.get<String>("pad_mode") == "VALID")
-             {
-                 layerParams.set("adj_w", (outW - kernelW) % strideX);
-                 layerParams.set("adj_h", (outH - kernelH) % strideY);
-             }
-             int id = dstNet.addLayer(name, "Deconvolution", layerParams);
-             layer_id[name] = id;
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
+     CV_CheckEQ(num_inputs, 3, "");
  
-             // one input only
-             connect(layer_id, dstNet, parsePin(layer.input(2)), id, 0);
-         }
-         else if (type == "BlockLSTM")
-         {
-             // op: "BlockLSTM"
-             // input: "lstm_block_wrapper/ToInt64/x"  (ignore, number of time stamps)
-             // input: "input"
-             // input: "lstm_block_wrapper/zeros"      (ignore)
-             // input: "lstm_block_wrapper/zeros"      (ignore)
-             // input: "lstm_block_wrapper/kernel"
-             // input: "lstm_block_wrapper/w_i_diag"
-             // input: "lstm_block_wrapper/w_f_diag"
-             // input: "lstm_block_wrapper/w_o_diag"
-             // input: "lstm_block_wrapper/bias"
-             CV_CheckEQ(num_inputs, 9, "Unexpected number of input nodes");
-             if (hasLayerAttr(layer, "forget_bias"))
-                 layerParams.set("forget_bias", getLayerAttr(layer, "forget_bias").f());
-             if (hasLayerAttr(layer, "forget_bias"))
-             {
-                 float cellClip = getLayerAttr(layer, "cell_clip").f();
-                 // Cell clip disabled if it's negative.
-                 if (cellClip >= 0)
-                 {
-                     layerParams.set("use_cell_clip", true);
-                     layerParams.set("cell_clip", cellClip);
-                 }
-             }
+     Mat cropSize = getTensorContent(getConstBlob(layer, value_id, 2));
+     CV_CheckTypeEQ(cropSize.type(), CV_32SC1, ""); CV_CheckEQ(cropSize.total(), (size_t)2, "");
  
-             Mat W, Wh, Wx, b;
-             blobFromTensor(getConstBlob(layer, value_id, 4), W);
-             blobFromTensor(getConstBlob(layer, value_id, 8), b);
-             const int outSize = W.cols / 4;
+     layerParams.set("height", cropSize.at<int>(0));
+     layerParams.set("width", cropSize.at<int>(1));
  
-             // IGFO->IFOG
-             float* weightData = (float*)W.data;
-             for (int i = 0; i < W.rows; ++i)
-                 for (int j = 0; j < outSize; ++j)
-                 {
-                     std::swap(weightData[i * W.cols + 1 * outSize + j],
-                               weightData[i * W.cols + 2 * outSize + j]);
-                     std::swap(weightData[i * W.cols + 2 * outSize + j],
-                               weightData[i * W.cols + 3 * outSize + j]);
-                 }
-             Wx = W.rowRange(0, W.rows - outSize).t();
-             Wh = W.rowRange(W.rows - outSize, W.rows).t();
+     int id = dstNet.addLayer(name, "CropAndResize", layerParams);
+     layer_id[name] = id;
  
-             layerParams.blobs.resize(3);
-             layerParams.blobs[0] = Wh;
-             layerParams.blobs[1] = Wx;
-             layerParams.blobs[2] = b;
+     connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+     connect(layer_id, dstNet, parsePin(layer.input(1)), id, 1);
+ }
  
-             if (hasLayerAttr(layer, "use_peephole"))
-             {
-                 bool usePeephole = getLayerAttr(layer, "use_peephole").b();
-                 if (usePeephole)
-                 {
-                     layerParams.set("use_peephole", true);
-                     layerParams.blobs.resize(6);
-                     for (int i = 0; i < 3; ++i)
-                     {
-                         Mat w;
-                         blobFromTensor(getConstBlob(layer, value_id, 5 + i), w);
-                         w = w.reshape(1, w.total());  // Single column.
-                         w = Mat::diag(w);  // Make a diagonal matrix.
-                         layerParams.blobs[3 + i] = w;
-                     }
-                 }
-             }
+ void TFImporter::parseMean(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     // Computes the mean of elements across dimensions of a tensor.
+     // If keepdims is false (default) reduces input_tensor along the dimensions given in axis,
+     // else the reduced dimensions are retained with length 1.
+     // if indices = [1, 2] in NHWC layout we use global pooling: NxCxHxW --Pooling--> NxCx1x1
+     // if keepdims is false we use Flatten after Pooling: out_shape = NxC
+     // if indices = [0] we use a global pooling by indices.
+     // To return correct shape, we use Reshape after Pooling. To determine input shape use Slice for input,
+     // if keepdims is false we use Flatten after Slice.
+     // Example: input_shape = NxCxHxW
+     // determine out shape: NxCxHxW --Slice--> 1xCxHxW
+     //                      out_shape = 1xCxHxW if keepDims else (1xCxHxW --Flatten--> CxHxW)
+     // global pool: NxCxHxW --Flatten--> Nx(C*H*W) --Reshape--> 1x1xNx(C*H*W) --Pooling--> 1x1x1x(C*H*W) --Reshape--> out_shape
+     const std::string& name = layer.name();
+     const std::string& type = layer.op();
+     const int num_inputs = layer.input_size();
+     CV_CheckGT(num_inputs, 0, "");
+     Mat indices = getTensorContent(getConstBlob(layer, value_id, 1));
+     CV_Assert(indices.type() == CV_32SC1);
+     // There are two attributes, "keepdims" and a deprecated "keep_dims".
+     bool keepDims = false;
+     if (hasLayerAttr(layer, "keepdims"))
+         keepDims = getLayerAttr(layer, "keepdims").b();
+     else if (hasLayerAttr(layer, "keep_dims"))
+         keepDims = getLayerAttr(layer, "keep_dims").b();
+     if (indices.total() == 1 && indices.at<int>(0) == 0)
+     {
+         LayerParams flattenLp;
+         std::string flattenName = name + "/flatten";
+         CV_Assert(layer_id.find(flattenName) == layer_id.end());
+         int flattenId = dstNet.addLayer(flattenName, "Flatten", flattenLp);
+         layer_id[flattenName] = flattenId;
+         connect(layer_id, dstNet, parsePin(layer.input(0)), flattenId, 0);
+         LayerParams reshapeLp;
+         std::string reshapeName = name + "/reshape";
+         CV_Assert(layer_id.find(reshapeName) == layer_id.end());
+         reshapeLp.set("axis", 0);
+         reshapeLp.set("num_axes", 1);
+         int newShape[] = {1, 1, -1};
+         reshapeLp.set("dim", DictValue::arrayInt(&newShape[0], 3));
+         int reshapeId = dstNet.addLayer(reshapeName, "Reshape", reshapeLp);
+         layer_id[reshapeName] = reshapeId;
+         connect(layer_id, dstNet, Pin(flattenName), reshapeId, 0);
+         LayerParams avgLp;
+         std::string avgName = name + "/avg";
+         CV_Assert(layer_id.find(avgName) == layer_id.end());
+         avgLp.set("pool", type == "Mean" ? "ave" : "sum");
+         // pooling kernel H x 1
+         avgLp.set("global_pooling_h", true);
+         avgLp.set("kernel_w", 1);
+         int avgId = dstNet.addLayer(avgName, "Pooling", avgLp);
+         layer_id[avgName] = avgId;
+         connect(layer_id, dstNet, Pin(reshapeName), avgId, 0);
+         LayerParams sliceLp;
+         std::string layerShapeName = name + "/slice";
+         CV_Assert(layer_id.find(layerShapeName) == layer_id.end());
+         sliceLp.set("axis", 0);
+         int begin[] = {0};
+         int size[] = {1};
+         sliceLp.set("begin", DictValue::arrayInt(&begin[0], 1));
+         sliceLp.set("size", DictValue::arrayInt(&size[0], 1));
+         int sliceId = dstNet.addLayer(layerShapeName, "Slice", sliceLp);
+         layer_id[layerShapeName] = sliceId;
+         connect(layer_id, dstNet, Pin(layer.input(0)), sliceId, 0);
+         if (!keepDims)
+         {
+             LayerParams squeezeLp;
+             std::string squeezeName = name + "/squeeze";
+             CV_Assert(layer_id.find(squeezeName) == layer_id.end());
+             squeezeLp.set("axis", 0);
+             squeezeLp.set("end_axis", 1);
+             int squeezeId = dstNet.addLayer(squeezeName, "Flatten", squeezeLp);
+             layer_id[squeezeName] = squeezeId;
+             connect(layer_id, dstNet, Pin(layerShapeName), squeezeId, 0);
+             layerShapeName = squeezeName;
+         }
  
-             int id = dstNet.addLayer(name, "LSTM", layerParams);
+         int id = dstNet.addLayer(name, "Reshape", layerParams);
+         layer_id[name] = id;
+         connect(layer_id, dstNet, Pin(avgName), id, 0);
+         connect(layer_id, dstNet, Pin(layerShapeName), id, 1);
+     } else if (indices.total() == 1) {
+         int axis = toNCHW(indices.at<int>(0));
+         if (axis == 2 || axis == 3)
+         {
+             layerParams.set("pool", type == "Mean" ? "ave" : "sum");
+             layerParams.set(axis == 2 ? "kernel_w" : "kernel_h", 1);
+             layerParams.set(axis == 2 ? "global_pooling_h" : "global_pooling_w", true);
+             int id = dstNet.addLayer(name, "Pooling", layerParams);
              layer_id[name] = id;
+             connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
  
-             // one input only
-             connect(layer_id, dstNet, parsePin(layer.input(1)), id, 0);
-             data_layouts[name] = DATA_LAYOUT_UNKNOWN;
+             if (!keepDims)
+             {
+                 // To keep correct order after squeeze dims we first need to change layout from NCHW to NHWC
+                 LayerParams permLP;
+                 int order[] = {0, 2, 3, 1};  // From OpenCV's NCHW to NHWC.
+                 std::string permName = name + "/nchw";
+                 Pin inpId = Pin(name);
+                 addPermuteLayer(order, permName, inpId);
+                 LayerParams squeezeLp;
+                 std::string squeezeName = name + "/squeeze";
+                 CV_Assert(layer_id.find(squeezeName) == layer_id.end());
+                 squeezeLp.set("axis", indices.at<int>(0));
+                 squeezeLp.set("end_axis", indices.at<int>(0) + 1);
+                 int squeezeId = dstNet.addLayer(squeezeName, "Flatten", squeezeLp);
+                 layer_id[squeezeName] = squeezeId;
+                 connect(layer_id, dstNet, Pin(permName), squeezeId, 0);
+             }
          }
-         else if (type == "ResizeNearestNeighbor" || type == "ResizeBilinear" || type == "FusedResizeAndPadConv2D")
+         else if (axis == 1)
          {
-             CV_CheckGT(num_inputs, 0, "");
-             std::string convWeights = "";
-             if (type == "FusedResizeAndPadConv2D")
-             {
-                 // input: "mul_1"
-                 // input: "decoder/ResizeBilinear/size"
-                 // input: "decoder/decoder_conv0/Conv2D_dummy_paddings"
-                 // input: "decoder/decoder_conv0/weights"
-                 CV_CheckEQ(num_inputs, 4, "Number of input for FusedResizeAndPadConv2D");
-                 Mat paddings = getTensorContent(getConstBlob(layer, value_id, 2));
-                 CV_CheckEQ(countNonZero(paddings), 0, "Unsupported mode");
+             int order[] = {0, 2, 3, 1};  // From OpenCV's NCHW to NHWC.
+             Pin inpId = parsePin(layer.input(0));
+             addPermuteLayer(order, name + "/nhwc", inpId);
  
-                 convWeights = layer.input(3);
-                 layer.mutable_input()->DeleteSubrange(2, 2);  // FIXIT do NOT modify input model
-                 num_inputs = layer.input_size();
-                 name = name + "/resize";
+             layerParams.set("pool", type == "Mean" ? "ave" : "sum");
+             layerParams.set("kernel_h", 1);
+             layerParams.set("global_pooling_w", true);
+             int id = dstNet.addLayer(name, "Pooling", layerParams);
+             layer_id[name] = id;
+             connect(layer_id, dstNet, inpId, id, 0);
  
-                 if (hasLayerAttr(layer, "resize_align_corners"))
-                 {
-                     // FIXIT do NOT modify input model
-                     layer.mutable_attr()->insert(
-                         ::google::protobuf::MapPair<std::string, tensorflow::AttrValue>("align_corners",
-                                                                                         getLayerAttr(layer, "resize_align_corners")));
-                 }
-             }
-             if (num_inputs == 2)
-             {
-                 Mat outSize = getTensorContent(getConstBlob(layer, value_id, 1));
-                 CV_CheckTypeEQ(outSize.type(), CV_32SC1, ""); CV_CheckEQ(outSize.total(), (size_t)2, "");
-                 layerParams.set("height", outSize.at<int>(0, 0));
-                 layerParams.set("width", outSize.at<int>(0, 1));
-             }
-             else if (num_inputs == 3)
+             if (!keepDims)
              {
-                 Mat factorHeight = getTensorContent(getConstBlob(layer, value_id, 1));
-                 Mat factorWidth = getTensorContent(getConstBlob(layer, value_id, 2));
-                 factorHeight.convertTo(factorHeight, CV_32F);
-                 factorWidth.convertTo(factorWidth, CV_32F);
-                 layerParams.set("zoom_factor_x", factorWidth.at<float>(0));
-                 layerParams.set("zoom_factor_y", factorHeight.at<float>(0));
+                 LayerParams squeezeLp;
+                 std::string squeezeName = name + "/squeeze";
+                 CV_Assert(layer_id.find(squeezeName) == layer_id.end());
+                 int channel_id = 3; // TF NHWC layout
+                 squeezeLp.set("axis", channel_id - 1);
+                 squeezeLp.set("end_axis", channel_id);
+                 int squeezeId = dstNet.addLayer(squeezeName, "Flatten", squeezeLp);
+                 layer_id[squeezeName] = squeezeId;
+                 connect(layer_id, dstNet, Pin(name), squeezeId, 0);
              }
              else
-                 CV_Check(num_inputs, num_inputs == 2 || num_inputs == 3, "");
-             if (type == "ResizeNearestNeighbor")
-                 layerParams.set("interpolation", "nearest");
-             else
-                 layerParams.set("interpolation", "bilinear");
-             if (hasLayerAttr(layer, "align_corners"))
-                 layerParams.set("align_corners", getLayerAttr(layer, "align_corners").b());
-             if (hasLayerAttr(layer, "half_pixel_centers"))
-                 layerParams.set("half_pixel_centers", getLayerAttr(layer, "half_pixel_centers").b());
-             int id = dstNet.addLayer(name, "Resize", layerParams);
-             layer_id[name] = id;
-             connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
-             // Step back to add convolution
-             if (type == "FusedResizeAndPadConv2D")
              {
-                 tensorflow::NodeDef conv = layer_;
-                 conv.clear_input();
-                 conv.add_input(name);
-                 conv.add_input(convWeights);
-                 conv.set_op("Conv2D");
-                 parseNode(conv);
+                 int order[] = {0, 3, 1, 2};  // From NHWC to OpenCV's NCHW.
+                 Pin inpId = parsePin(name);
+                 addPermuteLayer(order, name + "/nchw", inpId);
              }
          }
-         else if (type == "L2Normalize")
-         {
-             // op: "L2Normalize"
-             // input: "input"
-             // input: "reduction_indices" (axis)
-             CV_CheckEQ(num_inputs, 2, "");
-             Mat reductionIndices = getTensorContent(getConstBlob(layer, value_id, 1));
-             CV_Assert(reductionIndices.type() == CV_32SC1);
-             const int numAxes = reductionIndices.total();
-             if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
-                 for (int i = 0; i < numAxes; ++i)
-                     reductionIndices.at<int>(i) = toNCHW(reductionIndices.at<int>(i));
-             cv::sort(reductionIndices, reductionIndices, SORT_ASCENDING);
-             for (int i = 1; i < numAxes; ++i)
-             {
-                 CV_Assert(reductionIndices.at<int>(i) == reductionIndices.at<int>(i - 1) + 1);
-                 // Axes have the same sign.
-                 CV_Assert(reductionIndices.at<int>(i) * reductionIndices.at<int>(i - 1) >= 0);
-             }
-             layerParams.set("start_axis", reductionIndices.at<int>(0));
-             layerParams.set("end_axis", reductionIndices.at<int>(numAxes - 1));
+     } else {
+         if (indices.total() != 2 || indices.at<int>(0) != 1 || indices.at<int>(1) != 2)
+             CV_Error(Error::StsNotImplemented, "Unsupported mode of reduce_mean or reduce_sum operation.");
  
-             int id = dstNet.addLayer(name, "Normalize", layerParams);
-             layer_id[name] = id;
-             connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
-         }
-         else if (type == "PriorBox")
+         layerParams.set("pool", type == "Mean" ? "ave" : "sum");
+         layerParams.set("global_pooling", true);
+         int id = dstNet.addLayer(name, "Pooling", layerParams);
+         layer_id[name] = id;
+         connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+         if (!keepDims)
          {
-             CV_CheckEQ(num_inputs, 2, "");
-             if (hasLayerAttr(layer, "min_size"))
-                 layerParams.set("min_size", getLayerAttr(layer, "min_size").i());
-             if (hasLayerAttr(layer, "max_size"))
-                 layerParams.set("max_size", getLayerAttr(layer, "max_size").i());
-             if (hasLayerAttr(layer, "flip"))
-                 layerParams.set("flip", getLayerAttr(layer, "flip").b());
-             if (hasLayerAttr(layer, "clip"))
-                 layerParams.set("clip", getLayerAttr(layer, "clip").b());
-             if (hasLayerAttr(layer, "offset"))
-                 layerParams.set("offset", getLayerAttr(layer, "offset").f());
-             if (hasLayerAttr(layer, "step"))
-                 layerParams.set("step", getLayerAttr(layer, "step").f());
-             const std::string paramNames[] = {"variance", "aspect_ratio", "scales",
-                                               "width", "height"};
-             for (int i = 0; i < 5; ++i)
-             {
-                 if (hasLayerAttr(layer, paramNames[i]))
-                 {
-                     Mat values = getTensorContent(getLayerAttr(layer, paramNames[i]).tensor());
-                     layerParams.set(paramNames[i],
-                                     DictValue::arrayReal<float*>((float*)values.data, values.total()));
-                 }
-             }
-             int id = dstNet.addLayer(name, "PriorBox", layerParams);
-             layer_id[name] = id;
-             connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
-             connect(layer_id, dstNet, parsePin(layer.input(1)), id, 1);
-             data_layouts[name] = DATA_LAYOUT_UNKNOWN;
+             LayerParams flattenLp;
+             std::string flattenName = name + "/flatten";
+             CV_Assert(layer_id.find(flattenName) == layer_id.end());
+             int flattenId = dstNet.addLayer(flattenName, "Flatten", flattenLp);
+             layer_id[flattenName] = flattenId;
+             connect(layer_id, dstNet, Pin(name), flattenId, 0);
          }
-         else if (type == "Softmax")
-         {
-             CV_CheckGT(num_inputs, 0, "");
-             if (hasLayerAttr(layer, "axis"))
-                 layerParams.set("axis", getLayerAttr(layer, "axis").i());
+     }
+ }
  
-             int id = dstNet.addLayer(name, "Softmax", layerParams);
-             layer_id[name] = id;
-             connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
-         }
-         else if (type == "CropAndResize")
-         {
-             // op: "CropAndResize"
-             // input: "input"
-             // input: "boxes"
-             // input: "sizes"
-             CV_CheckEQ(num_inputs, 3, "");
+ void TFImporter::parsePack(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     // op: tf.stack(list of tensors, axis=0)
+     // Join a list of inputs along a new axis.
+     // The "axis" specifies the index of the new axis in the dimensions of the output.
+     // Example: given a list with "N" tensors of shape (C, H, W):
+     // if axis == 0 then the output tensor will have the shape (N, C, H, W),
+     // if axis == 1 then the output tensor will have the shape (C, N, H, W).
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
+     CV_CheckGT(num_inputs, 0, "");
+     CV_Assert(hasLayerAttr(layer, "axis"));
+     int dim = (int)getLayerAttr(layer, "axis").i();
+     if (dim != 0)
+         CV_Error(Error::StsNotImplemented, "Unsupported mode of pack operation.");
+     CV_Assert(hasLayerAttr(layer, "N"));
+     int num = (int)getLayerAttr(layer, "N").i();
+     CV_CheckEQ(num_inputs, num, "");
+     std::string base_name = name + "/reshape_";
+     std::vector<int> reshape_ids;
+     for (int i = 0; i < num; i++) {
+         std::ostringstream ss;
+         ss << i;
+         std::string reshape_name = base_name + ss.str();
+         LayerParams reshapeLP;
+         reshapeLP.set("axis", dim);
+         reshapeLP.set("num_axes", 1);
+         int outShape[] = {1, -1};
+         reshapeLP.set("dim", DictValue::arrayInt(&outShape[0], 2));
+         int id = dstNet.addLayer(reshape_name, "Reshape", reshapeLP);
+         layer_id[reshape_name] = id;
+         reshape_ids.push_back(id);
+         connect(layer_id, dstNet, parsePin(layer.input(i)), id, 0);
+     }
  
-             Mat cropSize = getTensorContent(getConstBlob(layer, value_id, 2));
-             CV_CheckTypeEQ(cropSize.type(), CV_32SC1, ""); CV_CheckEQ(cropSize.total(), (size_t)2, "");
+     layerParams.set("axis", dim);
+     int id = dstNet.addLayer(name, "Concat", layerParams);
+     layer_id[name] = id;
  
-             layerParams.set("height", cropSize.at<int>(0));
-             layerParams.set("width", cropSize.at<int>(1));
+     for (int li = 0; li < num; li++)
+         dstNet.connect(reshape_ids[li], 0, id, li);
+ }
  
-             int id = dstNet.addLayer(name, "CropAndResize", layerParams);
-             layer_id[name] = id;
+ void TFImporter::parseClipByValue(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     // op: "ClipByValue"
+     // input: "input"
+     // input: "mix"
+     // input: "max"
  
-             connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
-             connect(layer_id, dstNet, parsePin(layer.input(1)), id, 1);
-         }
-         else if (type == "Mean" || type == "Sum")
-         {
-             // Computes the mean of elements across dimensions of a tensor.
-             // If keepdims is false (default) reduces input_tensor along the dimensions given in axis,
-             // else the reduced dimensions are retained with length 1.
-             // if indices = [1, 2] in NHWC layout we use global pooling: NxCxHxW --Pooling--> NxCx1x1
-             // if keepdims is false we use Flatten after Pooling: out_shape = NxC
-             // if indices = [0] we use a global pooling by indices.
-             // To return correct shape, we use Reshape after Pooling. To determine input shape use Slice for input,
-             // if keepdims is false we use Flatten after Slice.
-             // Example: input_shape = NxCxHxW
-             // determine out shape: NxCxHxW --Slice--> 1xCxHxW
-             //                      out_shape = 1xCxHxW if keepDims else (1xCxHxW --Flatten--> CxHxW)
-             // global pool: NxCxHxW --Flatten--> Nx(C*H*W) --Reshape--> 1x1xNx(C*H*W) --Pooling--> 1x1x1x(C*H*W) --Reshape--> out_shape
-             CV_CheckGT(num_inputs, 0, "");
-             Mat indices = getTensorContent(getConstBlob(layer, value_id, 1));
-             CV_Assert(indices.type() == CV_32SC1);
-             // There are two attributes, "keepdims" and a deprecated "keep_dims".
-             bool keepDims = false;
-             if (hasLayerAttr(layer, "keepdims"))
-                 keepDims = getLayerAttr(layer, "keepdims").b();
-             else if (hasLayerAttr(layer, "keep_dims"))
-                 keepDims = getLayerAttr(layer, "keep_dims").b();
-             if (indices.total() == 1 && indices.at<int>(0) == 0)
-             {
-                 LayerParams flattenLp;
-                 std::string flattenName = name + "/flatten";
-                 CV_Assert(layer_id.find(flattenName) == layer_id.end());
-                 int flattenId = dstNet.addLayer(flattenName, "Flatten", flattenLp);
-                 layer_id[flattenName] = flattenId;
-                 connect(layer_id, dstNet, parsePin(layer.input(0)), flattenId, 0);
-                 LayerParams reshapeLp;
-                 std::string reshapeName = name + "/reshape";
-                 CV_Assert(layer_id.find(reshapeName) == layer_id.end());
-                 reshapeLp.set("axis", 0);
-                 reshapeLp.set("num_axes", 1);
-                 int newShape[] = {1, 1, -1};
-                 reshapeLp.set("dim", DictValue::arrayInt(&newShape[0], 3));
-                 int reshapeId = dstNet.addLayer(reshapeName, "Reshape", reshapeLp);
-                 layer_id[reshapeName] = reshapeId;
-                 connect(layer_id, dstNet, Pin(flattenName), reshapeId, 0);
-                 LayerParams avgLp;
-                 std::string avgName = name + "/avg";
-                 CV_Assert(layer_id.find(avgName) == layer_id.end());
-                 avgLp.set("pool", type == "Mean" ? "ave" : "sum");
-                 // pooling kernel H x 1
-                 avgLp.set("global_pooling_h", true);
-                 avgLp.set("kernel_w", 1);
-                 int avgId = dstNet.addLayer(avgName, "Pooling", avgLp);
-                 layer_id[avgName] = avgId;
-                 connect(layer_id, dstNet, Pin(reshapeName), avgId, 0);
-                 LayerParams sliceLp;
-                 std::string layerShapeName = name + "/slice";
-                 CV_Assert(layer_id.find(layerShapeName) == layer_id.end());
-                 sliceLp.set("axis", 0);
-                 int begin[] = {0};
-                 int size[] = {1};
-                 sliceLp.set("begin", DictValue::arrayInt(&begin[0], 1));
-                 sliceLp.set("size", DictValue::arrayInt(&size[0], 1));
-                 int sliceId = dstNet.addLayer(layerShapeName, "Slice", sliceLp);
-                 layer_id[layerShapeName] = sliceId;
-                 connect(layer_id, dstNet, Pin(layer.input(0)), sliceId, 0);
-                 if (!keepDims)
-                 {
-                     LayerParams squeezeLp;
-                     std::string squeezeName = name + "/squeeze";
-                     CV_Assert(layer_id.find(squeezeName) == layer_id.end());
-                     squeezeLp.set("axis", 0);
-                     squeezeLp.set("end_axis", 1);
-                     int squeezeId = dstNet.addLayer(squeezeName, "Flatten", squeezeLp);
-                     layer_id[squeezeName] = squeezeId;
-                     connect(layer_id, dstNet, Pin(layerShapeName), squeezeId, 0);
-                     layerShapeName = squeezeName;
-                 }
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
  
-                 int id = dstNet.addLayer(name, "Reshape", layerParams);
-                 layer_id[name] = id;
-                 connect(layer_id, dstNet, Pin(avgName), id, 0);
-                 connect(layer_id, dstNet, Pin(layerShapeName), id, 1);
-             } else if (indices.total() == 1) {
-                 int axis = toNCHW(indices.at<int>(0));
-                 if (axis == 2 || axis == 3)
-                 {
-                     layerParams.set("pool", type == "Mean" ? "ave" : "sum");
-                     layerParams.set(axis == 2 ? "kernel_w" : "kernel_h", 1);
-                     layerParams.set(axis == 2 ? "global_pooling_h" : "global_pooling_w", true);
-                     int id = dstNet.addLayer(name, "Pooling", layerParams);
-                     layer_id[name] = id;
-                     connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
-                     if (!keepDims)
-                     {
-                         // To keep correct order after squeeze dims we first need to change layout from NCHW to NHWC
-                         LayerParams permLP;
-                         int order[] = {0, 2, 3, 1};  // From OpenCV's NCHW to NHWC.
-                         std::string permName = name + "/nchw";
-                         Pin inpId = Pin(name);
-                         addPermuteLayer(order, permName, inpId);
-                         LayerParams squeezeLp;
-                         std::string squeezeName = name + "/squeeze";
-                         CV_Assert(layer_id.find(squeezeName) == layer_id.end());
-                         squeezeLp.set("axis", indices.at<int>(0));
-                         squeezeLp.set("end_axis", indices.at<int>(0) + 1);
-                         int squeezeId = dstNet.addLayer(squeezeName, "Flatten", squeezeLp);
-                         layer_id[squeezeName] = squeezeId;
-                         connect(layer_id, dstNet, Pin(permName), squeezeId, 0);
-                     }
-                 }
-                 else if (axis == 1)
-                 {
-                     int order[] = {0, 2, 3, 1};  // From OpenCV's NCHW to NHWC.
-                     Pin inpId = parsePin(layer.input(0));
-                     addPermuteLayer(order, name + "/nhwc", inpId);
-                     layerParams.set("pool", type == "Mean" ? "ave" : "sum");
-                     layerParams.set("kernel_h", 1);
-                     layerParams.set("global_pooling_w", true);
-                     int id = dstNet.addLayer(name, "Pooling", layerParams);
-                     layer_id[name] = id;
-                     connect(layer_id, dstNet, inpId, id, 0);
-                     if (!keepDims)
-                     {
-                         LayerParams squeezeLp;
-                         std::string squeezeName = name + "/squeeze";
-                         CV_Assert(layer_id.find(squeezeName) == layer_id.end());
-                         int channel_id = 3; // TF NHWC layout
-                         squeezeLp.set("axis", channel_id - 1);
-                         squeezeLp.set("end_axis", channel_id);
-                         int squeezeId = dstNet.addLayer(squeezeName, "Flatten", squeezeLp);
-                         layer_id[squeezeName] = squeezeId;
-                         connect(layer_id, dstNet, Pin(name), squeezeId, 0);
-                     }
-                     else
-                     {
-                         int order[] = {0, 3, 1, 2};  // From NHWC to OpenCV's NCHW.
-                         Pin inpId = parsePin(name);
-                         addPermuteLayer(order, name + "/nchw", inpId);
-                     }
-                 }
-             } else {
-                 if (indices.total() != 2 || indices.at<int>(0) != 1 || indices.at<int>(1) != 2)
-                     CV_Error(Error::StsNotImplemented, "Unsupported mode of reduce_mean or reduce_sum operation.");
+     CV_CheckEQ(num_inputs, 3, "");
  
-                 layerParams.set("pool", type == "Mean" ? "ave" : "sum");
-                 layerParams.set("global_pooling", true);
-                 int id = dstNet.addLayer(name, "Pooling", layerParams);
-                 layer_id[name] = id;
-                 connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+     Mat minValue = getTensorContent(getConstBlob(layer, value_id, 1));
+     Mat maxValue = getTensorContent(getConstBlob(layer, value_id, 2));
+     CV_CheckEQ(minValue.total(), (size_t)1, ""); CV_CheckTypeEQ(minValue.type(), CV_32FC1, "");
+     CV_CheckEQ(maxValue.total(), (size_t)1, ""); CV_CheckTypeEQ(maxValue.type(), CV_32FC1, "");
  
-                 if (!keepDims)
-                 {
-                     LayerParams flattenLp;
-                     std::string flattenName = name + "/flatten";
-                     CV_Assert(layer_id.find(flattenName) == layer_id.end());
-                     int flattenId = dstNet.addLayer(flattenName, "Flatten", flattenLp);
-                     layer_id[flattenName] = flattenId;
-                     connect(layer_id, dstNet, Pin(name), flattenId, 0);
-                 }
-             }
-         }
-         else if (type == "Pack")
-         {
-             // op: tf.stack(list of tensors, axis=0)
-             // Join a list of inputs along a new axis.
-             // The "axis" specifies the index of the new axis in the dimensions of the output.
-             // Example: given a list with "N" tensors of shape (C, H, W):
-             // if axis == 0 then the output tensor will have the shape (N, C, H, W),
-             // if axis == 1 then the output tensor will have the shape (C, N, H, W).
-             CV_CheckGT(num_inputs, 0, "");
-             CV_Assert(hasLayerAttr(layer, "axis"));
-             int dim = (int)getLayerAttr(layer, "axis").i();
-             if (dim != 0)
-                 CV_Error(Error::StsNotImplemented, "Unsupported mode of pack operation.");
-             CV_Assert(hasLayerAttr(layer, "N"));
-             int num = (int)getLayerAttr(layer, "N").i();
-             CV_CheckEQ(num_inputs, num, "");
-             std::string base_name = name + "/reshape_";
-             std::vector<int> reshape_ids;
-             for (int i = 0; i < num; i++) {
-                 std::ostringstream ss;
-                 ss << i;
-                 std::string reshape_name = base_name + ss.str();
-                 LayerParams reshapeLP;
-                 reshapeLP.set("axis", dim);
-                 reshapeLP.set("num_axes", 1);
-                 int outShape[] = {1, -1};
-                 reshapeLP.set("dim", DictValue::arrayInt(&outShape[0], 2));
-                 int id = dstNet.addLayer(reshape_name, "Reshape", reshapeLP);
-                 layer_id[reshape_name] = id;
-                 reshape_ids.push_back(id);
-                 connect(layer_id, dstNet, parsePin(layer.input(i)), id, 0);
-             }
+     layerParams.set("min_value", minValue.at<float>(0));
+     layerParams.set("max_value", maxValue.at<float>(0));
  
-             layerParams.set("axis", dim);
-             int id = dstNet.addLayer(name, "Concat", layerParams);
-             layer_id[name] = id;
+     int id = dstNet.addLayer(name, "ReLU6", layerParams);
+     layer_id[name] = id;
  
-             for (int li = 0; li < num; li++)
-                 dstNet.connect(reshape_ids[li], 0, id, li);
-         }
-         else if (type == "ClipByValue")
-         {
-             // op: "ClipByValue"
-             // input: "input"
-             // input: "mix"
-             // input: "max"
-             CV_CheckEQ(num_inputs, 3, "");
+     connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
+ }
  
-             Mat minValue = getTensorContent(getConstBlob(layer, value_id, 1));
-             Mat maxValue = getTensorContent(getConstBlob(layer, value_id, 2));
-             CV_CheckEQ(minValue.total(), (size_t)1, ""); CV_CheckTypeEQ(minValue.type(), CV_32FC1, "");
-             CV_CheckEQ(maxValue.total(), (size_t)1, ""); CV_CheckTypeEQ(maxValue.type(), CV_32FC1, "");
+ void TFImporter::parseLeakyRelu(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const int num_inputs = layer.input_size();
  
-             layerParams.set("min_value", minValue.at<float>(0));
-             layerParams.set("max_value", maxValue.at<float>(0));
+     CV_CheckGT(num_inputs, 0, "");
+     CV_Assert(hasLayerAttr(layer, "alpha"));
+     layerParams.set("negative_slope", getLayerAttr(layer, "alpha").f());
  
-             int id = dstNet.addLayer(name, "ReLU6", layerParams);
-             layer_id[name] = id;
+     int id = dstNet.addLayer(name, "ReLU", layerParams);
+     layer_id[name] = id;
+     connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
+ }
  
-             connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
-         }
-         else if (type == "LeakyRelu")
-         {
-             CV_CheckGT(num_inputs, 0, "");
-             CV_Assert(hasLayerAttr(layer, "alpha"));
-             layerParams.set("negative_slope", getLayerAttr(layer, "alpha").f());
+ void TFImporter::parseActivation(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     const std::string& name = layer.name();
+     const std::string& type = layer.op();
+     const int num_inputs = layer.input_size();
+     CV_CheckGT(num_inputs, 0, "");
+     std::string dnnType = type;
+     if (type == "Abs") dnnType = "AbsVal";
+     else if (type == "Tanh") dnnType = "TanH";
+     else if (type == "Relu") dnnType = "ReLU";
+     else if (type == "Relu6") dnnType = "ReLU6";
+     else if (type == "Elu") dnnType = "ELU";
+     int id = dstNet.addLayer(name, dnnType, layerParams);
+     layer_id[name] = id;
+     connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
+ }
  
-             int id = dstNet.addLayer(name, "ReLU", layerParams);
-             layer_id[name] = id;
-             connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
-         }
-         else if (type == "Abs" || type == "Tanh" || type == "Sigmoid" ||
-                  type == "Relu" || type == "Elu" || type == "Exp" ||
-                  type == "Identity" || type == "Relu6")
+ void TFImporter::parseCustomLayer(tensorflow::GraphDef& net, const tensorflow::NodeDef& layer, LayerParams& layerParams)
+ {
+     // Importer does not know how to map this TensorFlow's operation onto OpenCV's layer.
+     // However we create a layer with the same type and rely that user defined a custom layer.
+     const std::string& name = layer.name();
+     const std::string& type = layer.op();
+     const int num_inputs = layer.input_size();
+     // All the attributes are added to LayerParams.
+     google::protobuf::Map<std::string, tensorflow::AttrValue> attr = layer.attr();
+     for (google::protobuf::Map<std::string, tensorflow::AttrValue>::const_iterator ai = attr.begin();
+          ai != attr.end(); ++ai)
+     {
+         if (ai->second.value_case() == tensorflow::AttrValue::kS)  // string
+             layerParams.set(ai->first, ai->second.s());
+         if (ai->second.value_case() == tensorflow::AttrValue::kI)  // int64
+             layerParams.set(ai->first, ai->second.i());
+         if (ai->second.value_case() == tensorflow::AttrValue::kF)  // float
+             layerParams.set(ai->first, ai->second.f());
+         if (ai->second.value_case() == tensorflow::AttrValue::kB)  // bool
+             layerParams.set(ai->first, ai->second.b());
+     }
+     // All the Const input nodes are added to layer's blobs.
+     std::vector<std::string> inputsNames;
+     for (int i = 0; i < num_inputs; ++i)
+     {
+         // Check if input is a Const node.
+         if (value_id.find(layer.input(i)) != value_id.end())
          {
-             CV_CheckGT(num_inputs, 0, "");
-             std::string dnnType = type;
-             if (type == "Abs") dnnType = "AbsVal";
-             else if (type == "Tanh") dnnType = "TanH";
-             else if (type == "Relu") dnnType = "ReLU";
-             else if (type == "Relu6") dnnType = "ReLU6";
-             else if (type == "Elu") dnnType = "ELU";
-             int id = dstNet.addLayer(name, dnnType, layerParams);
-             layer_id[name] = id;
-             connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
+             Mat blob = getTensorContent(getConstBlob(layer, value_id, i));
+             layerParams.blobs.push_back(blob);
          }
          else
-         {
-             // Importer does not know how to map this TensorFlow's operation onto OpenCV's layer.
-             // However we create a layer with the same type and rely that user defined a custom layer.
+             inputsNames.push_back(layer.input(i));
+     }
+     int id = dstNet.addLayer(name, type, layerParams);
+     layer_id[name] = id;
  
-             // All the attributes are added to LayerParams.
-             google::protobuf::Map<std::string, tensorflow::AttrValue> attr = layer.attr();
-             for (google::protobuf::Map<std::string, tensorflow::AttrValue>::const_iterator ai = attr.begin();
-                  ai != attr.end(); ++ai)
-             {
-                 if (ai->second.value_case() == tensorflow::AttrValue::kS)  // string
-                     layerParams.set(ai->first, ai->second.s());
-                 if (ai->second.value_case() == tensorflow::AttrValue::kI)  // int64
-                     layerParams.set(ai->first, ai->second.i());
-                 if (ai->second.value_case() == tensorflow::AttrValue::kF)  // float
-                     layerParams.set(ai->first, ai->second.f());
-                 if (ai->second.value_case() == tensorflow::AttrValue::kB)  // bool
-                     layerParams.set(ai->first, ai->second.b());
-             }
+     for (int i = 0; i < inputsNames.size(); ++i)
+     {
+         connect(layer_id, dstNet, parsePin(inputsNames[i]), id, i);
+     }
+ }
  
-             // All the Const input nodes are added to layer's blobs.
-             std::vector<std::string> inputsNames;
-             for (int i = 0; i < num_inputs; ++i)
-             {
-                 // Check if input is a Const node.
-                 if (value_id.find(layer.input(i)) != value_id.end())
-                 {
-                     Mat blob = getTensorContent(getConstBlob(layer, value_id, i));
-                     layerParams.blobs.push_back(blob);
-                 }
-                 else
-                     inputsNames.push_back(layer.input(i));
-             }
-             int id = dstNet.addLayer(name, type, layerParams);
-             layer_id[name] = id;
+ TFImporter::TFImporter(Net& net, const char *model, const char *config)
+     : dstNet(net), dispatch(buildDispatchMap())
+ {
+     if (model && model[0])
+     {
+         CV_LOG_DEBUG(NULL, "DNN/TF: processing TensorFlow model from file: " << model);
+         ReadTFNetParamsFromBinaryFileOrDie(model, &netBin);
+     }
+     if (config && config[0])
+     {
+         CV_LOG_DEBUG(NULL, "DNN/TF: processing TensorFlow config from file: " << config);
+         ReadTFNetParamsFromTextFileOrDie(config, &netTxt);
+     }
  
-             for (int i = 0; i < inputsNames.size(); ++i)
-             {
-                 connect(layer_id, dstNet, parsePin(inputsNames[i]), id, i);
-             }
+     populateNet();
+ }
+ TFImporter::TFImporter(
+         Net& net,
+         const char *dataModel, size_t lenModel,
+         const char *dataConfig, size_t lenConfig
+ )
+     : dstNet(net), dispatch(buildDispatchMap())
+ {
+     if (dataModel != NULL && lenModel > 0)
+     {
+         CV_LOG_DEBUG(NULL, "DNN/TF: processing TensorFlow model from memory (" << lenModel << " bytes)");
+         ReadTFNetParamsFromBinaryBufferOrDie(dataModel, lenModel, &netBin);
+     }
+     if (dataConfig != NULL && lenConfig > 0)
+     {
+         CV_LOG_DEBUG(NULL, "DNN/TF: processing TensorFlow config from memory (" << lenConfig << " bytes)");
+         ReadTFNetParamsFromTextBufferOrDie(dataConfig, lenConfig, &netTxt);
+     }
+     populateNet();
+ }
+ void TFImporter::kernelFromTensor(const tensorflow::TensorProto &tensor, Mat &dstBlob)
+ {
+     MatShape shape;
+     blobShapeFromTensor(tensor, shape);
+     int dims = (int)shape.size();
+     // TODO: other blob types
+     CV_Assert(tensor.dtype() == tensorflow::DT_FLOAT ||
+               tensor.dtype() == tensorflow::DT_HALF);
+     CV_Assert(dims == 4 || dims == 5);
+     int out_c, input_c, depth, height, width;
+     if (dims == 4)
+     {
+         // REORDER kernel HWIO to OIHW
+         swap(shape[0], shape[2]); // IWHO
+         swap(shape[1], shape[3]); // IOHW
+         swap(shape[0], shape[1]); // OIHW
+         depth = 1; height = shape[2]; width = shape[3];
+     }
+     else
+     {
+         // REORDER kernel DHWIO to OIDHW
+         swap(shape[0], shape[4]); // OHWID
+         swap(shape[1], shape[3]); // OIWHD
+         swap(shape[2], shape[4]); // OIDHW
+         depth = shape[2]; height = shape[3]; width = shape[4];
+     }
+     out_c = shape[0]; input_c = shape[1];
+     dstBlob.create(shape, CV_32F);
+     Mat tensorContent = getTensorContent(tensor, /*no copy*/false);
+     int size = tensorContent.total();
+     CV_Assert(size == (int)dstBlob.total());
+     float *dstData = dstBlob.ptr<float>();
+     const float *data = reinterpret_cast<const float*>(tensorContent.data);
+     int total = out_c * input_c * depth * height * width;
+     for (int i_oc = 0; i_oc < out_c; i_oc++) {
+         for (int i_ic = 0; i_ic < input_c; i_ic++) {
+             for (int i_d = 0; i_d < depth; i_d++) {
+                 for (int i_h = 0; i_h < height; i_h++) {
+                     for (int i_w = 0; i_w < width; i_w++) {
+                         int dst_i = input_c * depth * height * width * i_oc +
+                                     depth * height * width * i_ic + height * width * i_d + width * i_h + i_w;
+                         int src_i = out_c * input_c * width * height * i_d +
+                                     out_c * input_c * width * i_h + out_c * input_c * i_w + out_c * i_ic + i_oc;
+                         CV_Assert(dst_i < total);
+                         CV_Assert(src_i < total);
+                        dstData[dst_i] = data[src_i];
+                    }
+                 }
+             }
+         }
+     }
+ }
+ void TFImporter::connect(const std::map<String, int>& layers_name_id_map, Net& network, const Pin& outPin,
+              const int input_layer_id, const int input_blob_id)
+ {
+     std::map<String, int>::const_iterator it = layers_name_id_map.find(outPin.name);
+     if (it == layers_name_id_map.end())
+         CV_Error(Error::StsError, "Input layer not found: " + outPin.name);
+     std::vector<String>::iterator inpNameIt = std::find(netInputsNames.begin(), netInputsNames.end(), outPin.name);
+     int blobIndex;
+     if (inpNameIt == netInputsNames.end())
+         blobIndex = outPin.blobIndex;
+     else
+         blobIndex = inpNameIt - netInputsNames.begin();
+     network.connect(it->second, blobIndex, input_layer_id, input_blob_id);
+ }
+ void TFImporter::connectToAllBlobs(const std::map<String, int>& layer_id, Net& network, const Pin& outPin,
+                      const int input_layer_id, const int input_blobs_count)
+ {
+     for (int input_blob_id = 0; input_blob_id < input_blobs_count; input_blob_id++)
+         connect(layer_id, network, outPin, input_layer_id, input_blob_id);
+ }
+ const tensorflow::TensorProto& TFImporter::getConstBlob(const tensorflow::NodeDef &layer, std::map<String, int> const_layers,
+                                               int input_blob_index, int* actual_inp_blob_idx) {
+     if (input_blob_index == -1) {
+         for(int i = 0; i < layer.input_size(); i++) {
+             Pin input = parsePin(layer.input(i));
+             if (const_layers.find(input.name) != const_layers.end()) {
+                 if (input_blob_index != -1)
+                     CV_Error(Error::StsError, "More than one input is Const op");
+                 input_blob_index = i;
+             }
+         }
+     }
+     if (input_blob_index == -1)
+         CV_Error(Error::StsError, "Const input blob for weights not found");
+     Pin kernel_inp = parsePin(layer.input(input_blob_index));
+     if (const_layers.find(kernel_inp.name) == const_layers.end())
+         CV_Error(Error::StsError, "Input [" + layer.input(input_blob_index) +
+                                   "] for node [" + layer.name() + "] not found");
+     if (kernel_inp.blobIndex != 0)
+         CV_Error(Error::StsError, "Unsupported kernel input");
+     if(actual_inp_blob_idx) {
+         *actual_inp_blob_idx = input_blob_index;
+     }
+     int nodeIdx = const_layers.at(kernel_inp.name);
+     if (nodeIdx < netBin.node_size() && netBin.node(nodeIdx).name() == kernel_inp.name)
+     {
+         return netBin.node(nodeIdx).attr().at("value").tensor();
+     }
+     else
+     {
+         CV_Assert_N(nodeIdx < netTxt.node_size(),
+                     netTxt.node(nodeIdx).name() == kernel_inp.name);
+         return netTxt.node(nodeIdx).attr().at("value").tensor();
+     }
+ }
+ static void addConstNodes(tensorflow::GraphDef& net, std::map<String, int>& const_layers,
+                           std::set<String>& layers_to_ignore)
+ {
+     CV_LOG_DEBUG(NULL, "DNN/TF: addConstNodes(): handling " << net.node_size() << " nodes...");
+     for (int li = 0; li < net.node_size(); li++)
+     {
+         const tensorflow::NodeDef &layer = net.node(li);
+         String name = layer.name();
+         String type = layer.op();
+         //CV_LOG_DEBUG(NULL, "DNN/TF: layer_id=" << li << " - '" << name << "' @ " << type);
+         try
+         {
+             if (type == "Dequantize")
+             {
+                 // Example of Dequantize node:
+                 //   name: "conv2d_1/bias"
+                 //   op: "Dequantize"
+                 //   input: "conv2d_1/bias_quantized_const" (tensor of dtype DT_QUINT8)
+                 //   input: "conv2d_1/bias_quantized_min"
+                 //   input: "conv2d_1/bias_quantized_max"
+                 //   attr { key: "T" value { type: DT_QUINT8 } }   (quantized type)
+                 //   attr { key: "mode" value { s: "MIN_FIRST" } } (quantization technique)
+                 CV_CheckEQ(layer.input_size(), 3, "Dequantize: 3 inputs is supported only");
+                 for (int i = 0; i < 3; ++i)
+                     CV_Assert(const_layers.find(layer.input(i)) != const_layers.end());
+                 CV_Assert(hasLayerAttr(layer, "mode") &&
+                           getLayerAttr(layer, "mode").s() == "MIN_FIRST");
+                 int tensorId = const_layers[layer.input(0)];
+                 int minId = const_layers[layer.input(1)];
+                 int maxId = const_layers[layer.input(2)];
+                 tensorflow::TensorProto* tensor = net.mutable_node(tensorId)
+                                                     ->mutable_attr()->at("value")
+                                                      .mutable_tensor();
+                 CV_CheckEQ((int)tensor->dtype(), (int)tensorflow::DT_QUINT8, "");
+                 Mat qMin = getTensorContent(net.node(minId).attr().at("value").tensor());
+                 Mat qMax = getTensorContent(net.node(maxId).attr().at("value").tensor());
+                 CV_CheckEQ(qMin.total(), (size_t)1, "");
+                 CV_CheckTypeEQ(qMin.type(), CV_32FC1, "");
+                 CV_CheckEQ(qMax.total(), (size_t)1, "");
+                 CV_CheckTypeEQ(qMax.type(), CV_32FC1, "");
+                 Mat content = getTensorContent(*tensor);
+                 float minVal = qMin.at<float>(0);
+                 float rangeScale = (qMax.at<float>(0) - minVal) / 255;
+                 CV_Assert(rangeScale >= 0);
+                 content.convertTo(content, CV_32FC1, rangeScale,
+                                   rangeScale * cvRound(minVal / rangeScale));
+                 tensor->set_dtype(tensorflow::DT_FLOAT);
+                 tensor->set_tensor_content(content.data, content.total() * content.elemSize1());
+                 net.mutable_node(tensorId)->set_name(name);
+                 CV_Assert(const_layers.insert(std::make_pair(name, tensorId)).second);
+                 layers_to_ignore.insert(name);
+                 continue;
+             }
+             else if (type != "Const")
+                 continue;  // only Const parameters are supported
+             if (layer.attr().find("value") != layer.attr().end())
+             {
+                 CV_Assert(const_layers.insert(std::make_pair(name, li)).second);
+             }
+             layers_to_ignore.insert(name);
+         }
+         catch (const std::exception& e)
+         {
+             CV_LOG_ERROR(NULL, "DNN/TF: Can't handle node='" << name << "'. Exception: " << e.what());
+             throw;
+         }
+     }
+     CV_LOG_DEBUG(NULL, "DNN/TF: layers_to_ignore.size() = " << layers_to_ignore.size());
+ }
+ // If all inputs of specific layer have the same data layout we can say that
+ // this layer's output has this data layout too. Returns DATA_LAYOUT_UNKNOWN otherwise.
+ DataLayout TFImporter::predictOutputDataLayout(const tensorflow::NodeDef& layer)
+ {
+     DataLayout layout = getDataLayout(layer);
+     if (layout != DATA_LAYOUT_UNKNOWN)
+     {
+         CV_LOG_DEBUG(NULL, "DNN/TF: predictOutputDataLayout(" << layer.name() << " @ " << layer.op() << ") => " << (int)layout << " (from attrs)");
+         return layout;
+     }
+     // Determine layout by layer's inputs
+     for (int i = 0, n = layer.input_size(); i < n; ++i)
+     {
+         std::map<String, DataLayout>::const_iterator it = data_layouts.find(getNodeName(layer.input(i)));
+         if (it != data_layouts.end())
+         {
+             if (layout != DATA_LAYOUT_UNKNOWN)
+             {
+                 if (it->second != layout && it->second != DATA_LAYOUT_UNKNOWN)
+                     return DATA_LAYOUT_UNKNOWN;
+             }
+             else
+                 layout = it->second;
+         }
+     }
+     if (layout != DATA_LAYOUT_UNKNOWN)
+     {
+         CV_LOG_DEBUG(NULL, "DNN/TF: predictOutputDataLayout(" << layer.name() << " @ " << layer.op() << ") => " << (int)layout << " (from inputs)");
+         return layout;
+     }
+     // Determine layout by layer's consumers recursively.
+     std::map<String, DataLayout>::const_iterator it = data_layouts.find(layer.name());
+     CV_Assert(it != data_layouts.end());
+     return it->second;
+ }
+ void TFImporter::populateNet()
+ {
+     CV_Assert(netBin.ByteSize() || netTxt.ByteSize());
+     CV_LOG_INFO(NULL, "DNN/TF: parsing model"
+         << (netBin.has_versions() ? cv::format(" produced by TF v%d (min_consumer=%d)", (int)netBin.versions().producer(), (int)netBin.versions().min_consumer()) : cv::String(" (N/A version info)"))
+         << ". Number of nodes = " << netBin.node_size()
+     );
+     if (netTxt.ByteSize())
+     {
+         CV_LOG_INFO(NULL, "DNN/TF: parsing config"
+             << (netTxt.has_versions() ? cv::format(" produced by TF v%d (min_consumer=%d)", (int)netTxt.versions().producer(), (int)netTxt.versions().min_consumer()) : cv::String(" (N/A version info)"))
+             << ". Number of nodes = " << netTxt.node_size()
+         );
+         RemoveIdentityOps(netBin);
+         CV_LOG_DEBUG(NULL, "DNN/TF: RemoveIdentityOps(model) => " << netBin.node_size() << " nodes");
+         RemoveIdentityOps(netTxt);
+         CV_LOG_DEBUG(NULL, "DNN/TF: RemoveIdentityOps(config) => " << netTxt.node_size() << " nodes");
+         sortByExecutionOrder(netTxt);
+         CV_LOG_DEBUG(NULL, "DNN/TF: sortByExecutionOrder(config) => " << netTxt.node_size() << " nodes");
+     }
+     else
+     {
+         removePhaseSwitches(netBin);
+         CV_LOG_DEBUG(NULL, "DNN/TF: removePhaseSwitches(model) => " << netBin.node_size() << " nodes");
+         RemoveIdentityOps(netBin);
+         CV_LOG_DEBUG(NULL, "DNN/TF: RemoveIdentityOps(model) => " << netBin.node_size() << " nodes");
+         simplifySubgraphs(netBin);
+         CV_LOG_DEBUG(NULL, "DNN/TF: simplifySubgraphs(model) => " << netBin.node_size() << " nodes");
+         sortByExecutionOrder(netBin);
+         CV_LOG_DEBUG(NULL, "DNN/TF: sortByExecutionOrder(model) => " << netBin.node_size() << " nodes");
+     }
+     tensorflow::GraphDef& net = netTxt.ByteSize() != 0 ? netTxt : netBin;
+     int layersSize = net.node_size();
+     // Pre-fill data layouts where they are set explicitly.
+     // Assuming that nodes are in topological order
+     for (int i = layersSize - 1; i >= 0; --i)
+     {
+         const tensorflow::NodeDef& layer = net.node(i);
+         std::string name = layer.name();
+         CV_LOG_DEBUG(NULL, "DNN/TF: node(" << i << " - '" << name << "') propagating layout...");
+         try
+         {
+             DataLayout layout = getDataLayout(layer);
+             std::map<String, DataLayout>::iterator it = data_layouts.find(name);
+             if (it != data_layouts.end())
+             {
+                 if (layout != DATA_LAYOUT_UNKNOWN)
+                 {
+                     if (it->second == DATA_LAYOUT_UNKNOWN)
+                         it->second = layout;
+                     else if (it->second != layout)
+                     {
+                         it->second = DATA_LAYOUT_UNKNOWN;
+                         layout = DATA_LAYOUT_UNKNOWN;
+                     }
+                 }
+                 else
+                     layout = it->second;
+             }
+             else
+                 data_layouts[name] = layout;
+             // Specify input layers to have the same data layout.
+             for (int j = 0; j < layer.input_size(); ++j)
+             {
+                 name = getNodeName(layer.input(j));
+                 it = data_layouts.find(name);
+                 if (it != data_layouts.end())
+                 {
+                     if (layout != DATA_LAYOUT_UNKNOWN)
+                     {
+                         if (it->second == DATA_LAYOUT_UNKNOWN)
+                             it->second = layout;
+                         else if (it->second != layout)
+                             it->second = DATA_LAYOUT_UNKNOWN;
+                     }
+                 }
+                 else
+                     data_layouts[name] = layout;
+             }
+         }
+         catch (const std::exception& e)
+         {
+             CV_LOG_ERROR(NULL, "DNN/TF: Can't propagate layout for node='" << name << "'. Exception: " << e.what());
+             throw;
+         }
+     }
+     addConstNodes(netBin, value_id, layers_to_ignore);
+     addConstNodes(netTxt, value_id, layers_to_ignore);
+     for (int li = 0; li < layersSize; li++)
+     {
+         const tensorflow::NodeDef& layer = net.node(li);
+         const std::string name = layer.name();
+         const std::string type = layer.op();
+         const int ninputs = layer.input_size();
+         CV_LOG_DEBUG(NULL, "DNN/TF: (" << li << "/" << layersSize << ") Parse layer " << name << " @ " << type << " with " << ninputs << " inputs");
+         parseNode(layer);
+     }
+     for (size_t i = 0; i < netInputsNames.size(); i++)
+     {
+         CV_LOG_DEBUG(NULL, "DNN/TF: Model input: " << i << " - '" << netInputsNames[i] << "'");
+         CV_Assert(!netInputsNames[i].empty());
+     }
+     dstNet.setInputsNames(netInputsNames);
+     CV_LOG_DEBUG(NULL, "DNN/TF: ===================== Import completed =====================");
+ }
+ void TFImporter::addPermuteLayer(const int* order, const std::string& permName, Pin& inpId)
+ {
+     LayerParams permLP;
+     permLP.set("order", DictValue::arrayInt<const int*>(order, 4));
+     CV_Assert(layer_id.find(permName) == layer_id.end());
+     int permId = dstNet.addLayer(permName, "Permute", permLP);
+     layer_id[permName] = permId;
+     connect(layer_id, dstNet, inpId, permId, 0);
+     inpId = Pin(permName);
+ }
+ void TFImporter::parseNode(const tensorflow::NodeDef& layer)
+ {
+     tensorflow::GraphDef& net = netTxt.ByteSize() != 0 ? netTxt : netBin;
+     const std::string& name = layer.name();
+     const std::string& type = layer.op();
+     try
+     {
+         LayerParams layerParams;
+         if (layers_to_ignore.find(name) != layers_to_ignore.end())
+         {
+             CV_LOG_DEBUG(NULL, "DNN/TF:     ignored");
+             return;
+         }
+         DataLayout predictedLayout = predictOutputDataLayout(layer);
+         data_layouts[name] = predictedLayout;
+         DispatchMap::const_iterator iter = dispatch.find(type);
+         if (iter != dispatch.end())
+         {
+             ((*this).*(iter->second))(net, layer, layerParams);
+         }
+         else
+         {
+             parseCustomLayer(net, layer, layerParams);
          }
      }
      catch (const std::exception& e)
@@@ -2621,5 -2844,5 +2844,5 @@@ void writeTextGraph(const String& _mode
      ofs.close();
  }
  
 -CV__DNN_EXPERIMENTAL_NS_END
 +CV__DNN_INLINE_NS_END
  }} // namespace
@@@ -363,18 -363,6 +363,18 @@@ public
          const std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f,
          const std::vector<int>& indexChange=std::vector<int>());
      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
 +
 +    /** @brief Set detection threshold.
 +    @param threshold AGAST detection threshold score.
 +    */
 +    CV_WRAP virtual void setThreshold(int threshold) { CV_UNUSED(threshold); return; }
 +    CV_WRAP virtual int getThreshold() const { return -1; }
 +
 +    /** @brief Set detection octaves.
 +    @param octaves detection octaves. Use 0 to do single scale.
 +    */
 +    CV_WRAP virtual void setOctaves(int octaves) { CV_UNUSED(octaves); return; }
 +    CV_WRAP virtual int getOctaves() const { return -1; }
  };
  
  /** @brief Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor
@@@ -387,8 -375,7 +387,8 @@@ k-tuples) are rotated according to the 
  class CV_EXPORTS_W ORB : public Feature2D
  {
  public:
 -    enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
 +    enum ScoreType { HARRIS_SCORE=0, FAST_SCORE=1 };
 +    static const int kBytes = 32;
  
      /** @brief The ORB constructor
  
      @param fastThreshold the fast threshold
       */
      CV_WRAP static Ptr<ORB> create(int nfeatures=500, float scaleFactor=1.2f, int nlevels=8, int edgeThreshold=31,
 -        int firstLevel=0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20);
 +        int firstLevel=0, int WTA_K=2, ORB::ScoreType scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20);
  
      CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;
      CV_WRAP virtual int getMaxFeatures() const = 0;
      CV_WRAP virtual void setWTA_K(int wta_k) = 0;
      CV_WRAP virtual int getWTA_K() const = 0;
  
 -    CV_WRAP virtual void setScoreType(int scoreType) = 0;
 -    CV_WRAP virtual int getScoreType() const = 0;
 +    CV_WRAP virtual void setScoreType(ORB::ScoreType scoreType) = 0;
 +    CV_WRAP virtual ORB::ScoreType getScoreType() const = 0;
  
      CV_WRAP virtual void setPatchSize(int patchSize) = 0;
      CV_WRAP virtual int getPatchSize() const = 0;
@@@ -512,41 -499,6 +512,41 @@@ public
      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
  };
  
 +//! @} features2d_main
 +
 +//! @addtogroup features2d_main
 +//! @{
 +
 +/** @brief Wrapping class for feature detection using the FAST method. :
 + */
 +class CV_EXPORTS_W FastFeatureDetector : public Feature2D
 +{
 +public:
 +    enum DetectorType
 +    {
 +        TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2
 +    };
 +    enum
 +    {
 +        THRESHOLD = 10000, NONMAX_SUPPRESSION=10001, FAST_N=10002
 +    };
 +
 +
 +    CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10,
 +                                                    bool nonmaxSuppression=true,
 +                                                    FastFeatureDetector::DetectorType type=FastFeatureDetector::TYPE_9_16 );
 +
 +    CV_WRAP virtual void setThreshold(int threshold) = 0;
 +    CV_WRAP virtual int getThreshold() const = 0;
 +
 +    CV_WRAP virtual void setNonmaxSuppression(bool f) = 0;
 +    CV_WRAP virtual bool getNonmaxSuppression() const = 0;
 +
 +    CV_WRAP virtual void setType(FastFeatureDetector::DetectorType type) = 0;
 +    CV_WRAP virtual FastFeatureDetector::DetectorType getType() const = 0;
 +    CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
 +};
 +
  /** @overload */
  CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
                        int threshold, bool nonmaxSuppression=true );
@@@ -565,36 -517,32 +565,36 @@@ FastFeatureDetector::TYPE_5_
  
  Detects corners using the FAST algorithm by @cite Rosten06 .
  
 -@note In Python API, types are given as cv2.FAST_FEATURE_DETECTOR_TYPE_5_8,
 -cv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner
 -detection, use cv2.FAST.detect() method.
 +@note In Python API, types are given as cv.FAST_FEATURE_DETECTOR_TYPE_5_8,
 +cv.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner
 +detection, use cv.FAST.detect() method.
   */
  CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
 -                      int threshold, bool nonmaxSuppression, int type );
 +                      int threshold, bool nonmaxSuppression, FastFeatureDetector::DetectorType type );
  
  //! @} features2d_main
  
  //! @addtogroup features2d_main
  //! @{
  
 -/** @brief Wrapping class for feature detection using the FAST method. :
 +/** @brief Wrapping class for feature detection using the AGAST method. :
   */
 -class CV_EXPORTS_W FastFeatureDetector : public Feature2D
 +class CV_EXPORTS_W AgastFeatureDetector : public Feature2D
  {
  public:
 +    enum DetectorType
 +    {
 +        AGAST_5_8 = 0, AGAST_7_12d = 1, AGAST_7_12s = 2, OAST_9_16 = 3,
 +    };
 +
      enum
      {
 -        TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2,
 -        THRESHOLD = 10000, NONMAX_SUPPRESSION=10001, FAST_N=10002,
 +        THRESHOLD = 10000, NONMAX_SUPPRESSION = 10001,
      };
  
 -    CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10,
 -                                                    bool nonmaxSuppression=true,
 -                                                    int type=FastFeatureDetector::TYPE_9_16 );
 +    CV_WRAP static Ptr<AgastFeatureDetector> create( int threshold=10,
 +                                                     bool nonmaxSuppression=true,
 +                                                     AgastFeatureDetector::DetectorType type = AgastFeatureDetector::OAST_9_16);
  
      CV_WRAP virtual void setThreshold(int threshold) = 0;
      CV_WRAP virtual int getThreshold() const = 0;
      CV_WRAP virtual void setNonmaxSuppression(bool f) = 0;
      CV_WRAP virtual bool getNonmaxSuppression() const = 0;
  
 -    CV_WRAP virtual void setType(int type) = 0;
 -    CV_WRAP virtual int getType() const = 0;
 +    CV_WRAP virtual void setType(AgastFeatureDetector::DetectorType type) = 0;
 +    CV_WRAP virtual AgastFeatureDetector::DetectorType getType() const = 0;
      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
  };
  
@@@ -630,7 -578,37 +630,7 @@@ Detects corners using the AGAST algorit
  
   */
  CV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
 -                      int threshold, bool nonmaxSuppression, int type );
 -//! @} features2d_main
 -
 -//! @addtogroup features2d_main
 -//! @{
 -
 -/** @brief Wrapping class for feature detection using the AGAST method. :
 - */
 -class CV_EXPORTS_W AgastFeatureDetector : public Feature2D
 -{
 -public:
 -    enum
 -    {
 -        AGAST_5_8 = 0, AGAST_7_12d = 1, AGAST_7_12s = 2, OAST_9_16 = 3,
 -        THRESHOLD = 10000, NONMAX_SUPPRESSION = 10001,
 -    };
 -
 -    CV_WRAP static Ptr<AgastFeatureDetector> create( int threshold=10,
 -                                                     bool nonmaxSuppression=true,
 -                                                     int type=AgastFeatureDetector::OAST_9_16 );
 -
 -    CV_WRAP virtual void setThreshold(int threshold) = 0;
 -    CV_WRAP virtual int getThreshold() const = 0;
 -
 -    CV_WRAP virtual void setNonmaxSuppression(bool f) = 0;
 -    CV_WRAP virtual bool getNonmaxSuppression() const = 0;
 -
 -    CV_WRAP virtual void setType(int type) = 0;
 -    CV_WRAP virtual int getType() const = 0;
 -    CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
 -};
 +                      int threshold, bool nonmaxSuppression, AgastFeatureDetector::DetectorType type );
  
  /** @brief Wrapping class for feature detection using the goodFeaturesToTrack function. :
   */
@@@ -742,7 -720,7 +742,7 @@@ F. Alcantarilla, Adrien Bartoli and And
  class CV_EXPORTS_W KAZE : public Feature2D
  {
  public:
 -    enum
 +    enum DiffusivityType
      {
          DIFF_PM_G1 = 0,
          DIFF_PM_G2 = 1,
      CV_WRAP static Ptr<KAZE> create(bool extended=false, bool upright=false,
                                      float threshold = 0.001f,
                                      int nOctaves = 4, int nOctaveLayers = 4,
 -                                    int diffusivity = KAZE::DIFF_PM_G2);
 +                                    KAZE::DiffusivityType diffusivity = KAZE::DIFF_PM_G2);
  
      CV_WRAP virtual void setExtended(bool extended) = 0;
      CV_WRAP virtual bool getExtended() const = 0;
      CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0;
      CV_WRAP virtual int getNOctaveLayers() const = 0;
  
 -    CV_WRAP virtual void setDiffusivity(int diff) = 0;
 -    CV_WRAP virtual int getDiffusivity() const = 0;
 +    CV_WRAP virtual void setDiffusivity(KAZE::DiffusivityType diff) = 0;
 +    CV_WRAP virtual KAZE::DiffusivityType getDiffusivity() const = 0;
      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
  };
  
@@@ -805,7 -783,7 +805,7 @@@ class CV_EXPORTS_W AKAZE : public Featu
  {
  public:
      // AKAZE descriptor type
 -    enum
 +    enum DescriptorType
      {
          DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation
          DESCRIPTOR_KAZE = 3,
      @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or
      DIFF_CHARBONNIER
       */
 -    CV_WRAP static Ptr<AKAZE> create(int descriptor_type=AKAZE::DESCRIPTOR_MLDB,
 +    CV_WRAP static Ptr<AKAZE> create(AKAZE::DescriptorType descriptor_type = AKAZE::DESCRIPTOR_MLDB,
                                       int descriptor_size = 0, int descriptor_channels = 3,
                                       float threshold = 0.001f, int nOctaves = 4,
 -                                     int nOctaveLayers = 4, int diffusivity = KAZE::DIFF_PM_G2);
 +                                     int nOctaveLayers = 4, KAZE::DiffusivityType diffusivity = KAZE::DIFF_PM_G2);
  
 -    CV_WRAP virtual void setDescriptorType(int dtype) = 0;
 -    CV_WRAP virtual int getDescriptorType() const = 0;
 +    CV_WRAP virtual void setDescriptorType(AKAZE::DescriptorType dtype) = 0;
 +    CV_WRAP virtual AKAZE::DescriptorType getDescriptorType() const = 0;
  
      CV_WRAP virtual void setDescriptorSize(int dsize) = 0;
      CV_WRAP virtual int getDescriptorSize() const = 0;
      CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0;
      CV_WRAP virtual int getNOctaveLayers() const = 0;
  
 -    CV_WRAP virtual void setDiffusivity(int diff) = 0;
 -    CV_WRAP virtual int getDiffusivity() const = 0;
 +    CV_WRAP virtual void setDiffusivity(KAZE::DiffusivityType diff) = 0;
 +    CV_WRAP virtual KAZE::DiffusivityType getDiffusivity() const = 0;
      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
  };
  
@@@ -876,7 -854,7 +876,7 @@@ template<> struct Accumulator<short>  
  template<class T>
  struct CV_EXPORTS SL2
  {
 -    enum { normType = NORM_L2SQR };
 +    static const NormTypes normType = NORM_L2SQR;
      typedef T ValueType;
      typedef typename Accumulator<T>::Type ResultType;
  
  template<class T>
  struct L2
  {
 -    enum { normType = NORM_L2 };
 +    static const NormTypes normType = NORM_L2;
      typedef T ValueType;
      typedef typename Accumulator<T>::Type ResultType;
  
  template<class T>
  struct L1
  {
 -    enum { normType = NORM_L1 };
 +    static const NormTypes normType = NORM_L1;
      typedef T ValueType;
      typedef typename Accumulator<T>::Type ResultType;
  
@@@ -933,7 -911,7 +933,7 @@@ an image set
  class CV_EXPORTS_W DescriptorMatcher : public Algorithm
  {
  public:
 -   enum
 +   enum MatcherType
      {
          FLANNBASED            = 1,
          BRUTEFORCE            = 2,
          BRUTEFORCE_HAMMINGLUT = 5,
          BRUTEFORCE_SL2        = 6
      };
 +
      virtual ~DescriptorMatcher();
  
      /** @brief Adds descriptors to train a CPU(trainDescCollectionis) or GPU(utrainDescCollectionis) descriptor
       */
      CV_WRAP static Ptr<DescriptorMatcher> create( const String& descriptorMatcherType );
  
 -    CV_WRAP static Ptr<DescriptorMatcher> create( int matcherType );
 +    CV_WRAP static Ptr<DescriptorMatcher> create( const DescriptorMatcher::MatcherType& matcherType );
  
  
      // see corresponding cv::Algorithm method
@@@ -1275,20 -1252,20 +1275,20 @@@ protected
  //! @addtogroup features2d_draw
  //! @{
  
 -struct CV_EXPORTS DrawMatchesFlags
 +enum struct DrawMatchesFlags
  {
 -    enum{ DEFAULT = 0, //!< Output image matrix will be created (Mat::create),
 -                       //!< i.e. existing memory of output image may be reused.
 -                       //!< Two source image, matches and single keypoints will be drawn.
 -                       //!< For each keypoint only the center point will be drawn (without
 -                       //!< the circle around keypoint with keypoint size and orientation).
 -          DRAW_OVER_OUTIMG = 1, //!< Output image matrix will not be created (Mat::create).
 -                                //!< Matches will be drawn on existing content of output image.
 -          NOT_DRAW_SINGLE_POINTS = 2, //!< Single keypoints will not be drawn.
 -          DRAW_RICH_KEYPOINTS = 4 //!< For each keypoint the circle around keypoint with keypoint size and
 -                                  //!< orientation will be drawn.
 -        };
 +  DEFAULT = 0, //!< Output image matrix will be created (Mat::create),
 +               //!< i.e. existing memory of output image may be reused.
 +               //!< Two source image, matches and single keypoints will be drawn.
 +               //!< For each keypoint only the center point will be drawn (without
 +               //!< the circle around keypoint with keypoint size and orientation).
 +  DRAW_OVER_OUTIMG = 1, //!< Output image matrix will not be created (Mat::create).
 +                        //!< Matches will be drawn on existing content of output image.
 +  NOT_DRAW_SINGLE_POINTS = 2, //!< Single keypoints will not be drawn.
 +  DRAW_RICH_KEYPOINTS = 4 //!< For each keypoint the circle around keypoint with keypoint size and
 +                          //!< orientation will be drawn.
  };
 +CV_ENUM_FLAGS(DrawMatchesFlags)
  
  /** @brief Draws keypoints.
  
@@@ -1301,12 -1278,12 +1301,12 @@@ output image. See possible flags bit va
  DrawMatchesFlags. See details above in drawMatches .
  
  @note
 -For Python API, flags are modified as cv2.DRAW_MATCHES_FLAGS_DEFAULT,
 -cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG,
 -cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
 +For Python API, flags are modified as cv.DRAW_MATCHES_FLAGS_DEFAULT,
 +cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG,
 +cv.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
   */
  CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
 -                               const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT );
 +                               const Scalar& color=Scalar::all(-1), DrawMatchesFlags flags=DrawMatchesFlags::DEFAULT );
  
  /** @brief Draws the found matches of keypoints from two images.
  
@@@ -1334,14 -1311,21 +1334,21 @@@ CV_EXPORTS_W void drawMatches( InputArr
                               InputArray img2, const std::vector<KeyPoint>& keypoints2,
                               const std::vector<DMatch>& matches1to2, InputOutputArray outImg,
                               const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),
 -                             const std::vector<char>& matchesMask=std::vector<char>(), int flags=DrawMatchesFlags::DEFAULT );
 +                             const std::vector<char>& matchesMask=std::vector<char>(), DrawMatchesFlags flags=DrawMatchesFlags::DEFAULT );
  
  /** @overload */
 -                             int flags=DrawMatchesFlags::DEFAULT );
+ CV_EXPORTS_W void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
+                              InputArray img2, const std::vector<KeyPoint>& keypoints2,
+                              const std::vector<DMatch>& matches1to2, InputOutputArray outImg,
+                              const int matchesThickness, const Scalar& matchColor=Scalar::all(-1),
+                              const Scalar& singlePointColor=Scalar::all(-1), const std::vector<char>& matchesMask=std::vector<char>(),
++                             DrawMatchesFlags flags=DrawMatchesFlags::DEFAULT );
  CV_EXPORTS_AS(drawMatchesKnn) void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
                               InputArray img2, const std::vector<KeyPoint>& keypoints2,
                               const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg,
                               const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),
 -                             const std::vector<std::vector<char> >& matchesMask=std::vector<std::vector<char> >(), int flags=DrawMatchesFlags::DEFAULT );
 +                             const std::vector<std::vector<char> >& matchesMask=std::vector<std::vector<char> >(), DrawMatchesFlags flags=DrawMatchesFlags::DEFAULT );
  
  //! @} features2d_draw
  
@@@ -50,12 -50,12 +50,12 @@@ namespace c
  /*
   * Functions to draw keypoints and matches.
   */
 -static inline void _drawKeypoint( InputOutputArray img, const KeyPoint& p, const Scalar& color, int flags )
 +static inline void _drawKeypoint( InputOutputArray img, const KeyPoint& p, const Scalar& color, DrawMatchesFlags flags )
  {
      CV_Assert( !img.empty() );
      Point center( cvRound(p.pt.x * draw_multiplier), cvRound(p.pt.y * draw_multiplier) );
  
 -    if( flags & DrawMatchesFlags::DRAW_RICH_KEYPOINTS )
 +    if( !!(flags & DrawMatchesFlags::DRAW_RICH_KEYPOINTS) )
      {
          int radius = cvRound(p.size/2 * draw_multiplier); // KeyPoint::size is a diameter
  
@@@ -89,7 -89,7 +89,7 @@@
  }
  
  void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
 -                    const Scalar& _color, int flags )
 +                    const Scalar& _color, DrawMatchesFlags flags )
  {
      CV_INSTRUMENT_REGION();
  
@@@ -144,12 -144,12 +144,12 @@@ static void _prepareImage(InputArray sr
  static void _prepareImgAndDrawKeypoints( InputArray img1, const std::vector<KeyPoint>& keypoints1,
                                           InputArray img2, const std::vector<KeyPoint>& keypoints2,
                                           InputOutputArray _outImg, Mat& outImg1, Mat& outImg2,
 -                                         const Scalar& singlePointColor, int flags )
 +                                         const Scalar& singlePointColor, DrawMatchesFlags flags )
  {
      Mat outImg;
      Size img1size = img1.size(), img2size = img2.size();
      Size size( img1size.width + img2size.width, MAX(img1size.height, img2size.height) );
 -    if( flags & DrawMatchesFlags::DRAW_OVER_OUTIMG )
 +    if( !!(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
      {
          outImg = _outImg.getMat();
          if( size.width > outImg.cols || size.height > outImg.rows )
  }
  
  static inline void _drawMatch( InputOutputArray outImg, InputOutputArray outImg1, InputOutputArray outImg2 ,
-                           const KeyPoint& kp1, const KeyPoint& kp2, const Scalar& matchColor, DrawMatchesFlags flags )
 -                          const KeyPoint& kp1, const KeyPoint& kp2, const Scalar& matchColor, int flags,
++                          const KeyPoint& kp1, const KeyPoint& kp2, const Scalar& matchColor, DrawMatchesFlags flags,
+                           const int matchesThickness )
  {
      RNG& rng = theRNG();
      bool isRandMatchColor = matchColor == Scalar::all(-1);
      line( outImg,
            Point(cvRound(pt1.x*draw_multiplier), cvRound(pt1.y*draw_multiplier)),
            Point(cvRound(dpt2.x*draw_multiplier), cvRound(dpt2.y*draw_multiplier)),
-           color, 1, LINE_AA, draw_shift_bits );
+           color, matchesThickness, LINE_AA, draw_shift_bits );
  }
  
  void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
                    InputArray img2, const std::vector<KeyPoint>& keypoints2,
                    const std::vector<DMatch>& matches1to2, InputOutputArray outImg,
                    const Scalar& matchColor, const Scalar& singlePointColor,
 -                  const std::vector<char>& matchesMask, int flags )
 +                  const std::vector<char>& matchesMask, DrawMatchesFlags flags )
  {
 -                  int flags )
+     drawMatches( img1, keypoints1,
+                  img2, keypoints2,
+                  matches1to2, outImg,
+                  1, matchColor,
+                  singlePointColor, matchesMask,
+                  flags);
+ }
+ void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
+                   InputArray img2, const std::vector<KeyPoint>& keypoints2,
+                   const std::vector<DMatch>& matches1to2, InputOutputArray outImg,
+                   const int matchesThickness, const Scalar& matchColor,
+                   const Scalar& singlePointColor, const std::vector<char>& matchesMask,
++                  DrawMatchesFlags flags )
+ {
      if( !matchesMask.empty() && matchesMask.size() != matches1to2.size() )
          CV_Error( Error::StsBadSize, "matchesMask must have the same size as matches1to2" );
  
              CV_Assert(i2 >= 0 && i2 < static_cast<int>(keypoints2.size()));
  
              const KeyPoint &kp1 = keypoints1[i1], &kp2 = keypoints2[i2];
-             _drawMatch( outImg, outImg1, outImg2, kp1, kp2, matchColor, flags );
+             _drawMatch( outImg, outImg1, outImg2, kp1, kp2, matchColor, flags, matchesThickness );
          }
      }
  }
  
  void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
                    InputArray img2, const std::vector<KeyPoint>& keypoints2,
                    const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg,
                    const Scalar& matchColor, const Scalar& singlePointColor,
 -                  const std::vector<std::vector<char> >& matchesMask, int flags )
 +                  const std::vector<std::vector<char> >& matchesMask, DrawMatchesFlags flags )
  {
      if( !matchesMask.empty() && matchesMask.size() != matches1to2.size() )
          CV_Error( Error::StsBadSize, "matchesMask must have the same size as matches1to2" );
              if( matchesMask.empty() || matchesMask[i][j] )
              {
                  const KeyPoint &kp1 = keypoints1[i1], &kp2 = keypoints2[i2];
-                 _drawMatch( outImg, outImg1, outImg2, kp1, kp2, matchColor, flags );
+                 _drawMatch( outImg, outImg1, outImg2, kp1, kp2, matchColor, flags, 1 );
              }
          }
      }
  //M*/
  
  #include "precomp.hpp"
 -
 -#ifndef _WIN32
 +#include "backend.hpp"
  
  #if defined (HAVE_GTK)
  
  #include <gtk/gtk.h>
 +
 +#if (GTK_MAJOR_VERSION == 3) && defined(HAVE_OPENGL)
 +  #undef HAVE_OPENGL  // no support with GTK3
 +#endif
 +#if defined(HAVE_OPENGL) && !defined(HAVE_GTKGLEXT)
 +  #undef HAVE_OPENGL  // gtkglext is required
 +#endif
 +
  #include <gdk/gdkkeysyms.h>
  #include <gdk-pixbuf/gdk-pixbuf.h>
  #include <stdio.h>
@@@ -111,6 -104,9 +111,6 @@@ struct _CvImageWidgetClas
  /** Allocate new image viewer widget */
  GtkWidget*     cvImageWidgetNew      (int flags);
  
 -/** Set the image to display in the widget */
 -void           cvImageWidgetSetImage(CvImageWidget * widget, const CvArr *arr);
 -
  // standard GTK object macros
  #define CV_IMAGE_WIDGET(obj)          G_TYPE_CHECK_INSTANCE_CAST (obj, cvImageWidget_get_type (), CvImageWidget)
  #define CV_IMAGE_WIDGET_CLASS(klass)  GTK_CHECK_CLASS_CAST (klass, cvImageWidget_get_type (), CvImageWidgetClass)
@@@ -126,10 -122,7 +126,10 @@@ static GtkWidgetClass * parent_class = 
  // flag to help size initial window
  #define CV_WINDOW_NO_IMAGE 2
  
 -void cvImageWidgetSetImage(CvImageWidget * widget, const CvArr *arr){
 +/** Set the image to display in the widget */
 +static
 +void cvImageWidgetSetImage(CvImageWidget * widget, const CvArr *arr)
 +{
      CvMat * mat, stub;
      int origin=0;
  
          widget->original_image = cvCreateMat( mat->rows, mat->cols, CV_8UC3 );
          gtk_widget_queue_resize( GTK_WIDGET( widget ) );
      }
 -    cvConvertImage( mat, widget->original_image,
 -                            (origin != 0 ? CV_CVTIMG_FLIP : 0) + CV_CVTIMG_SWAP_RB );
 +    CV_Assert(origin == 0);
 +    convertToShow(cv::cvarrToMat(arr), widget->original_image);
      if(widget->scaled_image){
          cvResize( widget->original_image, widget->scaled_image, CV_INTER_AREA );
      }
@@@ -163,7 -156,6 +163,7 @@@ cvImageWidgetNew (int flags
    CvImageWidget *image_widget;
  
    image_widget = CV_IMAGE_WIDGET( gtk_widget_new (cvImageWidget_get_type (), NULL) );
 +  CV_Assert(image_widget && "GTK widget creation is failed. Ensure that there is no GTK2/GTK3 libraries conflict");
    image_widget->original_image = 0;
    image_widget->scaled_image = 0;
    image_widget->flags = flags | CV_WINDOW_NO_IMAGE;
@@@ -530,13 -522,12 +530,13 @@@ struct CvUIBase 
  
  struct CvTrackbar : CvUIBase
  {
 -    CvTrackbar(const char* trackbar_name) :
 +    CvTrackbar(const std::string& trackbar_name) :
          CvUIBase(CV_TRACKBAR_MAGIC_VAL),
          widget(NULL), name(trackbar_name),
          parent(NULL), data(NULL),
          pos(0), maxval(0), minval(0),
 -        notify(NULL), notify2(NULL), userdata(NULL)
 +        notify(NULL), notify2(NULL),  // deprecated
 +        onChangeCallback(NULL), userdata(NULL)
      {
          // nothing
      }
  
      GtkWidget* widget;
      std::string name;
 -    CvWindow* parent;
 +    CvWindow* parent;  // TODO weak_ptr
      int* data;
      int pos;
      int maxval;
      int minval;
 -    CvTrackbarCallback notify;
 -    CvTrackbarCallback2 notify2;
 +    CvTrackbarCallback notify;  // deprecated
 +    CvTrackbarCallback2 notify2;  // deprecated
 +    TrackbarCallback onChangeCallback;
      void* userdata;
  };
  
  
  struct CvWindow : CvUIBase
  {
 -    CvWindow(const char* window_name) :
 +    CvWindow(const std::string& window_name) :
          CvUIBase(CV_WINDOW_MAGIC_VAL),
          widget(NULL), frame(NULL), paned(NULL), name(window_name),
          last_key(0), flags(0), status(0),
          ,useGl(false), glDrawCallback(NULL), glDrawData(NULL)
  #endif
      {
 -        // nothing
 +        CV_LOG_INFO(NULL, "OpenCV/UI: creating GTK window: " << window_name);
      }
      ~CvWindow();
 +    void destroy();
  
      GtkWidget* widget;
      GtkWidget* frame;
      CvMouseCallback on_mouse;
      void* on_mouse_param;
  
 -    std::vector< Ptr<CvTrackbar> > trackbars;
 +    std::vector< std::shared_ptr<CvTrackbar> > trackbars;
  
  #ifdef HAVE_OPENGL
      bool useGl;
@@@ -611,15 -600,15 +611,15 @@@ GCond*                             cond_have_key = NULL
  GThread*                         window_thread = NULL;
  #endif
  
 -static cv::Mutex& getWindowMutex()
 +static int             last_key = -1;
 +
 +static
 +std::vector< std::shared_ptr<CvWindow> >& getGTKWindows()
  {
 -    static cv::Mutex* g_window_mutex = new cv::Mutex();
 -    return *g_window_mutex;
 +    static std::vector< std::shared_ptr<CvWindow> > g_windows;
 +    return g_windows;
  }
  
 -static int             last_key = -1;
 -static std::vector< Ptr<CvWindow> > g_windows;
 -
  CV_IMPL int cvInitSystem( int argc, char** argv )
  {
      static int wasInitialized = 0;
@@@ -711,32 -700,19 +711,32 @@@ gpointer icvWindowThreadLoop(gpointer /
  
  #define CV_LOCK_MUTEX() cv::AutoLock lock(getWindowMutex())
  
 -static CvWindow* icvFindWindowByName( const char* name )
 +static
 +std::shared_ptr<CvWindow> icvFindWindowByName(const std::string& name)
  {
 +    auto& g_windows = getGTKWindows();
      for(size_t i = 0; i < g_windows.size(); ++i)
      {
 -        CvWindow* window = g_windows[i].get();
 +        auto window = g_windows[i];
 +        if (!window)
 +            continue;
          if (window->name == name)
              return window;
      }
 -    return NULL;
 +    return std::shared_ptr<CvWindow>();
 +}
 +
 +static inline
 +std::shared_ptr<CvWindow> icvFindWindowByName(const char* name)
 +{
 +    CV_Assert(name);
 +    return icvFindWindowByName(std::string(name));
  }
  
 +
  static CvWindow* icvWindowByWidget( GtkWidget* widget )
  {
 +    auto& g_windows = getGTKWindows();
      for (size_t i = 0; i < g_windows.size(); ++i)
      {
          CvWindow* window = g_windows[i].get();
      return NULL;
  }
  
 +static Rect getImageRect_(const std::shared_ptr<CvWindow>& window);
 +
  CvRect cvGetWindowRect_GTK(const char* name)
  {
      CV_Assert(name && "NULL name string");
  
      CV_LOCK_MUTEX();
 -    CvWindow* window = icvFindWindowByName(name);
 +    const auto window = icvFindWindowByName(name);
      if (!window)
          CV_Error( CV_StsNullPtr, "NULL window" );
  
 +    return cvRect(getImageRect_(window));
 +}
 +
 +static Rect getImageRect_(const std::shared_ptr<CvWindow>& window)
 +{
 +    CV_Assert(window);
 +
      gint wx, wy;
  #ifdef HAVE_OPENGL
      if (window->useGl) {
          gtk_widget_translate_coordinates(window->widget, gtk_widget_get_toplevel(window->widget), 0, 0, &wx, &wy);
 -        return cvRect(wx, wy, window->widget->allocation.width, window->widget->allocation.height);
 +        return Rect(wx, wy, window->widget->allocation.width, window->widget->allocation.height);
      }
  #endif
  
      gtk_widget_translate_coordinates(&image_widget->widget, gtk_widget_get_toplevel(&image_widget->widget), 0, 0, &wx, &wy);
      if (image_widget->scaled_image) {
  #if defined (GTK_VERSION3)
 -      return cvRect(wx, wy, MIN(image_widget->scaled_image->cols, gtk_widget_get_allocated_width(window->widget)),
 +      return Rect(wx, wy, MIN(image_widget->scaled_image->cols, gtk_widget_get_allocated_width(window->widget)),
            MIN(image_widget->scaled_image->rows, gtk_widget_get_allocated_height(window->widget)));
  #else
 -      return cvRect(wx, wy, MIN(image_widget->scaled_image->cols, window->widget->allocation.width),
 +      return Rect(wx, wy, MIN(image_widget->scaled_image->cols, window->widget->allocation.width),
            MIN(image_widget->scaled_image->rows, window->widget->allocation.height));
  #endif //GTK_VERSION3
      } else if (image_widget->original_image) {
  #if defined (GTK_VERSION3)
 -      return cvRect(wx, wy, MIN(image_widget->original_image->cols, gtk_widget_get_allocated_width(window->widget)),
 +      return Rect(wx, wy, MIN(image_widget->original_image->cols, gtk_widget_get_allocated_width(window->widget)),
            MIN(image_widget->original_image->rows, gtk_widget_get_allocated_height(window->widget)));
  #else
 -      return cvRect(wx, wy, MIN(image_widget->original_image->cols, window->widget->allocation.width),
 +      return Rect(wx, wy, MIN(image_widget->original_image->cols, window->widget->allocation.width),
            MIN(image_widget->original_image->rows, window->widget->allocation.height));
  #endif //GTK_VERSION3
      }
  
 -    return cvRect(-1, -1, -1, -1);
 +    return Rect(-1, -1, -1, -1);
  }
  
  double cvGetModeWindow_GTK(const char* name)//YV
      CV_Assert(name && "NULL name string");
  
      CV_LOCK_MUTEX();
 -    CvWindow* window = icvFindWindowByName(name);
 +    const auto window = icvFindWindowByName(name);
      if (!window)
          CV_Error( CV_StsNullPtr, "NULL window" );
  
      return result;
  }
  
 -
 +static bool setModeWindow_(const std::shared_ptr<CvWindow>& window, int mode);
  void cvSetModeWindow_GTK( const char* name, double prop_value)//Yannick Verdie
  {
      CV_Assert(name && "NULL name string");
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(name);
 -    if( !window )
 +    const auto window = icvFindWindowByName(name);
 +    if (!window)
          CV_Error( CV_StsNullPtr, "NULL window" );
  
 -    if(window->flags & CV_WINDOW_AUTOSIZE)//if the flag CV_WINDOW_AUTOSIZE is set
 -        return;
 +    setModeWindow_(window, (int)prop_value);
 +}
 +
 +static bool setModeWindow_(const std::shared_ptr<CvWindow>& window, int mode)
 +{
 +    if (window->flags & CV_WINDOW_AUTOSIZE) //if the flag CV_WINDOW_AUTOSIZE is set
 +        return false;
  
      //so easy to do fullscreen here, Linux rocks !
  
 -    if (window->status==CV_WINDOW_FULLSCREEN && prop_value==CV_WINDOW_NORMAL)
 +    if (window->status == mode)
 +        return true;
 +
 +    if (window->status==CV_WINDOW_FULLSCREEN && mode==CV_WINDOW_NORMAL)
      {
          gtk_window_unfullscreen(GTK_WINDOW(window->frame));
          window->status=CV_WINDOW_NORMAL;
 -        return;
 +        return true;
      }
  
 -    if (window->status==CV_WINDOW_NORMAL && prop_value==CV_WINDOW_FULLSCREEN)
 +    if (window->status==CV_WINDOW_NORMAL && mode==CV_WINDOW_FULLSCREEN)
      {
          gtk_window_fullscreen(GTK_WINDOW(window->frame));
          window->status=CV_WINDOW_FULLSCREEN;
 -        return;
 +        return true;
      }
 +
 +    return false;
  }
  
  void cv::setWindowTitle(const String& winname, const String& title)
  {
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(winname.c_str());
 +    auto window = icvFindWindowByName(winname.c_str());
  
      if (!window)
      {
@@@ -871,7 -828,7 +871,7 @@@ double cvGetPropWindowAutoSize_GTK(cons
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(name);
 +    const auto window = icvFindWindowByName(name);
      if (!window)
          return -1; // keep silence here
  
      return result;
  }
  
 +static double getRatioWindow_(const std::shared_ptr<CvWindow>& window);
  double cvGetRatioWindow_GTK(const char* name)
  {
      CV_Assert(name && "NULL name string");
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(name);
 +    const auto window = icvFindWindowByName(name);
      if (!window)
          return -1; // keep silence here
  
 +    return getRatioWindow_(window);
 +}
 +
 +static double getRatioWindow_(const std::shared_ptr<CvWindow>& window)
 +{
  #if defined (GTK_VERSION3)
      double result = static_cast<double>(
          gtk_widget_get_allocated_width(window->widget)) / gtk_widget_get_allocated_height(window->widget);
@@@ -911,7 -862,7 +911,7 @@@ double cvGetOpenGlProp_GTK(const char* 
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(name);
 +    const auto window = icvFindWindowByName(name);
      if (!window)
          return -1; // keep silence here
  
@@@ -1097,7 -1048,6 +1097,7 @@@ static gboolean cvImageWidget_expose(Gt
  }
  #endif //GTK_VERSION3
  
 +static std::shared_ptr<CvWindow> namedWindow_(const std::string& name, int flags);
  CV_IMPL int cvNamedWindow( const char* name, int flags )
  {
      cvInitSystem(name ? 1 : 0,(char**)&name);
      {
          return 1;
      }
 +    auto window = namedWindow_(name, flags);
 +    return window ? 1 : 0;
 +}
  
 -    Ptr<CvWindow> window = makePtr<CvWindow>(name);
 +static std::shared_ptr<CvWindow> namedWindow_(const std::string& name, int flags)
 +{
 +    cvInitSystem(0, NULL);
 +
 +    auto window_ptr = std::make_shared<CvWindow>(name);
 +    CvWindow* window = window_ptr.get();
      window->flags = flags;
      window->status = CV_WINDOW_NORMAL;//YV
  
  #endif //GTK_VERSION3_4
  
      gtk_widget_show( window->frame );
 -    gtk_window_set_title( GTK_WINDOW(window->frame), name );
 +    gtk_window_set_title(GTK_WINDOW(window->frame), name.c_str());
  
 -    g_windows.push_back(window);
 +    {
 +        AutoLock lock(getWindowMutex());
 +        getGTKWindows().push_back(window_ptr);
 +    }
  
      bool b_nautosize = ((flags & CV_WINDOW_AUTOSIZE) == 0);
      gtk_window_set_resizable( GTK_WINDOW(window->frame), b_nautosize );
  
  #ifdef HAVE_OPENGL
      if (window->useGl)
 -        cvSetOpenGlContext(name);
 +        cvSetOpenGlContext(name.c_str());
  #endif
  
 -    return 1;
 +    return window_ptr;
  }
  
  
@@@ -1213,7 -1152,7 +1213,7 @@@ CV_IMPL void cvSetOpenGlContext(const c
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(name);
 +    auto window = icvFindWindowByName(name);
      if (!window)
          CV_Error( CV_StsNullPtr, "NULL window" );
  
@@@ -1233,7 -1172,7 +1233,7 @@@ CV_IMPL void cvUpdateWindow(const char
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(name);
 +    auto window = icvFindWindowByName(name);
      if (!window)
          return;
  
@@@ -1247,7 -1186,7 +1247,7 @@@ CV_IMPL void cvSetOpenGlDrawCallback(co
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(name);
 +    auto window = icvFindWindowByName(name);
      if( !window )
          return;
  
  
  CvWindow::~CvWindow()
  {
 +    if (frame)
 +        destroy();
 +}
 +
 +inline void CvWindow::destroy()
 +{
 +    CV_LOG_INFO(NULL, "OpenCV/UI: destroying GTK window: " << name);
      gtk_widget_destroy(frame);
 +    frame = nullptr;
  }
  
  static void checkLastWindow()
  {
      // if last window...
 -    if (g_windows.empty())
 +    if (getGTKWindows().empty())
      {
  #ifdef HAVE_GTHREAD
          if( thread_started )
      }
  }
  
 -static void icvDeleteWindow( CvWindow* window )
 +static
 +void icvDeleteWindow_( CvWindow* window )
  {
 +    AutoLock lock(getWindowMutex());
 +    auto& g_windows = getGTKWindows();
      bool found = false;
 -    for (std::vector< Ptr<CvWindow> >::iterator i = g_windows.begin();
 -         i != g_windows.end(); ++i)
 +    for (auto i = g_windows.begin(); i != g_windows.end(); ++i)
      {
          if (i->get() == window)
          {
              break;
          }
      }
 -    CV_Assert(found && "Can't destroy non-registered window");
 -
 +    CV_LOG_IF_WARNING(NULL, !found, "OpenCV/GTK: Can't destroy non-registered window");
      checkLastWindow();
  }
  
@@@ -1329,10 -1259,10 +1329,10 @@@ CV_IMPL void cvDestroyWindow( const cha
      CV_Assert(name && "NULL name string");
  
      CV_LOCK_MUTEX();
 +    auto& g_windows = getGTKWindows();
  
      bool found = false;
 -    for (std::vector< Ptr<CvWindow> >::iterator i = g_windows.begin();
 -         i != g_windows.end(); ++i)
 +    for (auto i = g_windows.begin(); i != g_windows.end(); ++i)
      {
          if (i->get()->name == name)
          {
              break;
          }
      }
 -    CV_Assert(found && "Can't destroy non-registered window");
 +    CV_LOG_IF_ERROR(NULL, !found, "OpenCV/GTK: Can't destroy non-registered window: '" << name << "'");
  
      checkLastWindow();
  }
@@@ -1352,7 -1282,7 +1352,7 @@@ cvDestroyAllWindows( void 
  {
      CV_LOCK_MUTEX();
  
 -    g_windows.clear();
 +    getGTKWindows().clear();
      checkLastWindow();
  }
  
@@@ -1375,7 -1305,7 +1375,7 @@@ cvShowImage( const char* name, const Cv
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(name);
 +    auto window = icvFindWindowByName(name);
      if(!window)
      {
          cvNamedWindow(name, 1);
      }
  }
  
 +static void resizeWindow_(const std::shared_ptr<CvWindow>& window, int width, int height);
  CV_IMPL void cvResizeWindow(const char* name, int width, int height )
  {
      CV_Assert(name && "NULL name string");
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(name);
 +    auto window = icvFindWindowByName(name);
      if(!window)
          return;
  
 +    return resizeWindow_(window, width, height);
 +}
 +
 +static
 +void resizeWindow_(const std::shared_ptr<CvWindow>& window, int width, int height)
 +{
 +    CV_Assert(window);
      CvImageWidget* image_widget = CV_IMAGE_WIDGET( window->widget );
      //if(image_widget->flags & CV_WINDOW_AUTOSIZE)
          //EXIT;
@@@ -1435,30 -1357,26 +1435,30 @@@ CV_IMPL void cvMoveWindow( const char* 
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(name);
 +    const auto window = icvFindWindowByName(name);
      if(!window)
          return;
  
      gtk_window_move( GTK_WINDOW(window->frame), x, y );
  }
  
 -
 -static CvTrackbar*
 -icvFindTrackbarByName( const CvWindow* window, const char* name )
 +static
 +std::shared_ptr<CvTrackbar> icvFindTrackbarByName(const std::shared_ptr<CvWindow>& window, const std::string& name)
  {
 -    for (size_t i = 0; i < window->trackbars.size(); ++i)
 +    CV_Assert(window);
 +    auto& trackbars = window->trackbars;
 +    for(size_t i = 0; i < trackbars.size(); ++i)
      {
 -        CvTrackbar* trackbar = window->trackbars[i].get();
 +        auto trackbar = trackbars[i];
 +        if (!trackbar)
 +            continue;
          if (trackbar->name == name)
              return trackbar;
      }
 -    return NULL;
 +    return std::shared_ptr<CvTrackbar>();
  }
  
 +
  static int
  icvCreateTrackbar( const char* trackbar_name, const char* window_name,
                     int* val, int count, CvTrackbarCallback on_notify,
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(window_name);
 +    const auto window = icvFindWindowByName(window_name);
      if(!window)
          return 0;
  
 -    CvTrackbar* trackbar = icvFindTrackbarByName(window, trackbar_name);
 -    if (!trackbar)
 +    auto trackbar_ = icvFindTrackbarByName(window, trackbar_name);
 +    if (!trackbar_)
      {
 -        Ptr<CvTrackbar> trackbar_ = makePtr<CvTrackbar>(trackbar_name);
 -        trackbar = trackbar_.get();
 -        trackbar->parent = window;
 +        trackbar_ = std::make_shared<CvTrackbar>(trackbar_name);
 +        CvTrackbar* trackbar = trackbar_.get();
 +        trackbar->parent = window.get();
          window->trackbars.push_back(trackbar_);
  
          GtkWidget* hscale_box = gtk_hbox_new( FALSE, 10 );
          gtk_widget_show( hscale_box );
      }
  
 +    CvTrackbar* trackbar = trackbar_.get(); CV_DbgAssert(trackbar);
 +
      if( val )
      {
          int value = *val;
      return 1;
  }
  
 -
  CV_IMPL int
  cvCreateTrackbar( const char* trackbar_name, const char* window_name,
                    int* val, int count, CvTrackbarCallback on_notify )
                               on_notify, 0, 0);
  }
  
 -
  CV_IMPL int
  cvCreateTrackbar2( const char* trackbar_name, const char* window_name,
                     int* val, int count, CvTrackbarCallback2 on_notify2,
                               0, on_notify2, userdata);
  }
  
 +static
 +std::shared_ptr<CvTrackbar> createTrackbar_(
 +    const std::shared_ptr<CvWindow>& window, const std::string& name,
 +    int count,
 +    TrackbarCallback onChange, void* userdata
 +)
 +{
 +    CV_Assert(window);
 +    CV_Assert(!name.empty());
 +
 +    if (count <= 0)
 +        CV_Error(Error::StsOutOfRange, "Bad trackbar maximal value");
 +
 +    auto trackbar_ = std::make_shared<CvTrackbar>(name);
 +    CvTrackbar* trackbar = trackbar_.get();
 +    trackbar->parent = window.get();
 +    window->trackbars.push_back(trackbar_);
 +
 +    GtkWidget* hscale_box = gtk_hbox_new( FALSE, 10 );
 +    GtkWidget* hscale_label = gtk_label_new(name.c_str());
 +    GtkWidget* hscale = gtk_hscale_new_with_range( 0, count, 1 );
 +    gtk_scale_set_digits( GTK_SCALE(hscale), 0 );
 +    //gtk_scale_set_value_pos( hscale, GTK_POS_TOP );
 +    gtk_scale_set_draw_value( GTK_SCALE(hscale), TRUE );
 +
 +    trackbar->widget = hscale;
 +    gtk_box_pack_start( GTK_BOX(hscale_box), hscale_label, FALSE, FALSE, 5 );
 +    gtk_widget_show( hscale_label );
 +    gtk_box_pack_start( GTK_BOX(hscale_box), hscale, TRUE, TRUE, 5 );
 +    gtk_widget_show( hscale );
 +    gtk_box_pack_start( GTK_BOX(window->paned), hscale_box, FALSE, FALSE, 5 );
 +    gtk_widget_show( hscale_box );
 +
 +    trackbar->maxval = count;
 +    trackbar->onChangeCallback = onChange;
 +    trackbar->userdata = userdata;
 +    g_signal_connect(trackbar->widget, "value-changed",
 +                     G_CALLBACK(icvOnTrackbar), trackbar);
 +
 +    // queue a widget resize to trigger a window resize to
 +    // compensate for the addition of trackbars
 +    gtk_widget_queue_resize(GTK_WIDGET(window->widget));
 +
 +    return trackbar_;
 +}
 +
  
  CV_IMPL void
  cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, void* param )
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(window_name);
 +    const auto window = icvFindWindowByName(window_name);
      if (!window)
          return;
  
@@@ -1615,18 -1487,18 +1615,18 @@@ CV_IMPL int cvGetTrackbarPos( const cha
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(window_name);
 +    const auto window = icvFindWindowByName(window_name);
      if (!window)
          return -1;
  
 -    CvTrackbar* trackbar = icvFindTrackbarByName(window,trackbar_name);
 +    const auto trackbar = icvFindTrackbarByName(window,trackbar_name);
      if (!trackbar)
          return -1;
  
      return trackbar->pos;
  }
  
 -
 +static void setTrackbarPos_(const std::shared_ptr<CvTrackbar>& trackbar, int pos);
  CV_IMPL void cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos )
  {
      CV_Assert(window_name && "NULL window name");
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(window_name);
 +    const auto window = icvFindWindowByName(window_name);
      if(!window)
          return;
  
 -    CvTrackbar* trackbar = icvFindTrackbarByName(window,trackbar_name);
 -    if( trackbar )
 -    {
 -        if( pos < trackbar->minval )
 -            pos = trackbar->minval;
 -
 -        if( pos > trackbar->maxval )
 -            pos = trackbar->maxval;
 -    }
 -    else
 +    const auto trackbar = icvFindTrackbarByName(window, trackbar_name);
 +    if (!trackbar)
      {
          CV_Error( CV_StsNullPtr, "No trackbar found" );
      }
  
 +    return setTrackbarPos_(trackbar, pos);
 +}
 +
 +static void setTrackbarPos_(const std::shared_ptr<CvTrackbar>& trackbar, int pos)
 +{
 +    CV_Assert(trackbar);
 +    CV_CheckLE(trackbar->minval, trackbar->maxval, "");
 +
 +    pos = std::max(pos, trackbar->minval);
 +    pos = std::min(pos, trackbar->maxval);
 +
      gtk_range_set_value( GTK_RANGE(trackbar->widget), pos );
  }
  
@@@ -1666,11 -1535,11 +1666,11 @@@ CV_IMPL void cvSetTrackbarMax(const cha
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(window_name);
 +    const auto window = icvFindWindowByName(window_name);
      if(!window)
          return;
  
 -    CvTrackbar* trackbar = icvFindTrackbarByName(window,trackbar_name);
 +    const auto trackbar = icvFindTrackbarByName(window,trackbar_name);
      if(!trackbar)
          return;
  
@@@ -1687,11 -1556,11 +1687,11 @@@ CV_IMPL void cvSetTrackbarMin(const cha
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(window_name);
 +    const auto window = icvFindWindowByName(window_name);
      if(!window)
          return;
  
 -    CvTrackbar* trackbar = icvFindTrackbarByName(window,trackbar_name);
 +    const auto trackbar = icvFindTrackbarByName(window,trackbar_name);
      if(!trackbar)
          return;
  
@@@ -1707,7 -1576,7 +1707,7 @@@ CV_IMPL void* cvGetWindowHandle( const 
  
      CV_LOCK_MUTEX();
  
 -    CvWindow* window = icvFindWindowByName(window_name);
 +    const auto window = icvFindWindowByName(window_name);
      if(!window)
          return NULL;
  
@@@ -1878,10 -1747,6 +1878,10 @@@ static void icvOnTrackbar( GtkWidget* w
          trackbar->widget == widget )
      {
          trackbar->pos = pos;
 +        if (trackbar->onChangeCallback)
 +            trackbar->onChangeCallback(pos, trackbar->userdata);
 +
 +        // deprecated
          if( trackbar->data )
              *trackbar->data = pos;
          if( trackbar->notify2 )
@@@ -1897,14 -1762,7 +1897,14 @@@ static gboolean icvOnClose( GtkWidget* 
      if( window->signature == CV_WINDOW_MAGIC_VAL &&
          window->frame == widget )
      {
 -        icvDeleteWindow(window);
 +        try
 +        {
 +            icvDeleteWindow_(window);
 +        }
 +        catch (...)
 +        {
 +            CV_LOG_WARNING(NULL, "OpenCV/GTK: unexpected C++ exception in icvDeleteWindow_");
 +        }
      }
      return TRUE;
  }
@@@ -2023,6 -1881,7 +2023,7 @@@ static gboolean icvOnMouse( GtkWidget *
                 (unsigned)pt.y < (unsigned)(image_widget->original_image->height)
              ))
          {
+             state &= gtk_accelerator_get_default_mod_mask();
              flags |= BIT_MAP(state, GDK_SHIFT_MASK,   CV_EVENT_FLAG_SHIFTKEY) |
                  BIT_MAP(state, GDK_CONTROL_MASK, CV_EVENT_FLAG_CTRLKEY)  |
                  BIT_MAP(state, GDK_MOD1_MASK,    CV_EVENT_FLAG_ALTKEY)   |
@@@ -2062,7 -1921,7 +2063,7 @@@ CV_IMPL int cvWaitKey( int delay 
              expired = !g_cond_timed_wait(cond_have_key, last_key_mutex, &timer);
          }
          else{
 -            if (g_windows.empty())
 +            if (getGTKWindows().empty())
              {
                  CV_LOG_WARNING(NULL, "cv::waitKey() is called without timeout and missing active windows. Ignoring");
              }
          }
          my_last_key = last_key;
          g_mutex_unlock(last_key_mutex);
 -        if(expired || g_windows.empty()){
 +        if (expired || getGTKWindows().empty())
 +        {
              return -1;
          }
          return my_last_key;
          if( delay > 0 )
              timer = g_timeout_add( delay, icvAlarm, &expired );
          last_key = -1;
 -        while( gtk_main_iteration_do(TRUE) && last_key < 0 && !expired && (delay > 0 || !g_windows.empty()))
 +        while( gtk_main_iteration_do(TRUE) && last_key < 0 && !expired && (delay > 0 || !getGTKWindows().empty()))
              ;
  
          if( delay > 0 && !expired )
      return last_key;
  }
  
 +namespace cv { namespace impl {
  
 -#endif  // HAVE_GTK
 -#endif  // _WIN32
 +using namespace cv::highgui_backend;
 +
 +class GTKTrackbar;
 +
 +class GTKWindow
 +        : public UIWindow
 +        , public std::enable_shared_from_this<GTKWindow>
 +{
 +protected:
 +    const std::string name_;
 +    std::weak_ptr<CvWindow> window_;
 +    std::map<std::string, std::shared_ptr<GTKTrackbar> > trackbars_;
 +public:
 +    GTKWindow(const std::string& name, const std::shared_ptr<CvWindow>& window)
 +        : name_(name)
 +        , window_(window)
 +    {
 +        // nothing
 +    }
 +
 +    ~GTKWindow() CV_OVERRIDE
 +    {
 +        if (!window_.expired())
 +            destroy();
 +        CV_LOG_DEBUG(NULL, "OpenCV/UI/GTK: GTKWindow(" << name_ << ") is disposed");
 +    }
 +
 +    const std::string& getID() const CV_OVERRIDE { return name_; }
 +
 +    bool isActive() const CV_OVERRIDE { return !window_.expired(); }
 +
 +    void destroy() CV_OVERRIDE
 +    {
 +        cv::AutoLock lock(getWindowMutex());
 +        if (!window_.expired())
 +        {
 +            auto window = window_.lock();
 +            if (window)
 +                window->destroy();
 +            window_.reset();
 +        }
 +    }
 +
 +    void imshow(InputArray image) CV_OVERRIDE
 +    {
 +        auto window = window_.lock();
 +        CV_Assert(window);
 +        CvImageWidget* image_widget = CV_IMAGE_WIDGET(window->widget);
 +        CV_Assert(image_widget);
 +        Mat img = image.getMat();
 +        CvMat c_img = cvMat(img);  // TODO Drop C-API
 +        cvImageWidgetSetImage(image_widget, &c_img);
 +    }
  
 -/* End of file. */
 +    double getProperty(int prop) const CV_OVERRIDE
 +    {
 +        auto window = window_.lock();
 +        CV_Assert(window);
 +        // see cvGetWindowProperty
 +        switch (prop)
 +        {
 +        case CV_WND_PROP_FULLSCREEN:
 +            return (double)window->status;
 +
 +        case CV_WND_PROP_AUTOSIZE:
 +            return (window->flags & CV_WINDOW_AUTOSIZE) ? 1.0 : 0.0;
 +
 +        case CV_WND_PROP_ASPECTRATIO:
 +            return getRatioWindow_(window);
 +
 +#ifdef HAVE_OPENGL
 +        case CV_WND_PROP_OPENGL:
 +            return window->useGl ? 1.0 : 0.0;
 +#endif
 +
 +        default:
 +            break;
 +        }
 +        return std::numeric_limits<double>::quiet_NaN();
 +    }
 +
 +    bool setProperty(int prop, double value) CV_OVERRIDE
 +    {
 +        auto window = window_.lock();
 +        CV_Assert(window);
 +        // see cvSetWindowProperty
 +        switch (prop)
 +        {
 +        case CV_WND_PROP_FULLSCREEN:
 +            if (value != CV_WINDOW_NORMAL && value != CV_WINDOW_FULLSCREEN)  // bad arg
 +                break;
 +            setModeWindow_(window, value);
 +            return true;
 +
 +        default:
 +            break;
 +        }
 +        return false;
 +    }
 +
 +    void resize(int width, int height) CV_OVERRIDE
 +    {
 +        auto window = window_.lock();
 +        CV_Assert(window);
 +        resizeWindow_(window, width, height);
 +    }
 +
 +    void move(int x, int y) CV_OVERRIDE
 +    {
 +        auto window = window_.lock();
 +        CV_Assert(window);
 +        gtk_window_move(GTK_WINDOW(window->frame), x, y);
 +    }
 +
 +    Rect getImageRect() const CV_OVERRIDE
 +    {
 +        auto window = window_.lock();
 +        CV_Assert(window);
 +        return getImageRect_(window);
 +    }
 +
 +    void setTitle(const std::string& title) CV_OVERRIDE
 +    {
 +        auto window = window_.lock();
 +        CV_Assert(window);
 +        gtk_window_set_title(GTK_WINDOW(window->frame), title.c_str());
 +    }
 +
 +    void setMouseCallback(MouseCallback onMouse, void* userdata /*= 0*/) CV_OVERRIDE
 +    {
 +        auto window = window_.lock();
 +        CV_Assert(window);
 +        window->on_mouse = onMouse;
 +        window->on_mouse_param = userdata;
 +    }
 +
 +    std::shared_ptr<UITrackbar> createTrackbar(
 +        const std::string& name,
 +        int count,
 +        TrackbarCallback onChange /*= 0*/,
 +        void* userdata /*= 0*/
 +    ) CV_OVERRIDE
 +    {
 +        auto window = window_.lock();
 +        CV_Assert(window);
 +        CV_LOG_INFO(NULL, "OpenCV/UI: Creating GTK trackbar at '" << name_ << "': '" << name << "'");
 +        auto trackbar = createTrackbar_(window, name, count, onChange, userdata);
 +        auto ui_trackbar = std::make_shared<GTKTrackbar>(name, trackbar, shared_from_this());
 +        {
 +            cv::AutoLock lock(getWindowMutex());
 +            trackbars_.emplace(name, ui_trackbar);
 +        }
 +        return std::static_pointer_cast<UITrackbar>(ui_trackbar);
 +    }
 +
 +    std::shared_ptr<UITrackbar> findTrackbar(const std::string& name) CV_OVERRIDE
 +    {
 +        cv::AutoLock lock(getWindowMutex());
 +        auto i = trackbars_.find(name);
 +        if (i != trackbars_.end())
 +        {
 +            return std::static_pointer_cast<UITrackbar>(i->second);
 +        }
 +        return std::shared_ptr<UITrackbar>();
 +    }
 +};  // GTKWindow
 +
 +
 +class GTKTrackbar : public UITrackbar
 +{
 +protected:
 +    /*const*/ std::string name_;
 +    std::weak_ptr<CvTrackbar> trackbar_;
 +    std::weak_ptr<GTKWindow> parent_;
 +    std::map<std::string, std::shared_ptr<GTKTrackbar> > trackbars_;
 +public:
 +    GTKTrackbar(const std::string& name, const std::shared_ptr<CvTrackbar>& trackbar, const std::shared_ptr<GTKWindow>& parent)
 +        : trackbar_(trackbar)
 +        , parent_(parent)
 +    {
 +        name_ = std::string("<") + name + ">@" + parent->getID();
 +    }
 +
 +    ~GTKTrackbar() CV_OVERRIDE
 +    {
 +        if (!trackbar_.expired())
 +            destroy();
 +        CV_LOG_DEBUG(NULL, "OpenCV/UI/GTK: GTKTrackbar(" << name_ << ") is disposed");
 +    }
 +
 +    const std::string& getID() const CV_OVERRIDE { return name_; }
 +
 +    bool isActive() const CV_OVERRIDE { return !trackbar_.expired(); }
 +
 +    void destroy() CV_OVERRIDE
 +    {
 +        // nothing (destroyed with parent window, dedicated trackbar removal is not supported)
 +    }
 +
 +    int getPos() const CV_OVERRIDE
 +    {
 +        auto trackbar = trackbar_.lock();
 +        CV_Assert(trackbar);
 +        return trackbar->pos;
 +    }
 +    void setPos(int pos) CV_OVERRIDE
 +    {
 +        auto trackbar = trackbar_.lock();
 +        CV_Assert(trackbar);
 +        return setTrackbarPos_(trackbar, pos);
 +    }
 +
 +    cv::Range getRange() const CV_OVERRIDE
 +    {
 +        auto trackbar = trackbar_.lock();
 +        CV_Assert(trackbar);
 +        return cv::Range(trackbar->minval, trackbar->maxval);
 +    }
 +
 +    void setRange(const cv::Range& range) CV_OVERRIDE
 +    {
 +        auto trackbar = trackbar_.lock();
 +        CV_Assert(trackbar);
 +        CV_CheckLE(range.start, range.end, "Invalid trackbar range");
 +        gtk_range_set_range(GTK_RANGE(trackbar->widget), range.start, range.end);
 +    }
 +};  // GTKTrackbar
 +
 +
 +class GTKBackendUI : public UIBackend
 +{
 +public:
 +    GTKBackendUI()
 +    {
 +        // NB: avoid static initialization order fiasco
 +        (void)getGTKWindows();
 +    }
 +    ~GTKBackendUI() CV_OVERRIDE
 +    {
 +        destroyAllWindows();
 +    }
 +
 +    void destroyAllWindows() CV_OVERRIDE
 +    {
 +        cvDestroyAllWindows();
 +    }
 +
 +    // namedWindow
 +    virtual std::shared_ptr<UIWindow> createWindow(
 +        const std::string& winname,
 +        int flags
 +    ) CV_OVERRIDE
 +    {
 +        CV_LOG_INFO(NULL, "OpenCV/UI: Creating GTK window: " << winname << " (" << flags << ")");
 +        auto window = namedWindow_(winname, flags);
 +        auto ui_window = std::make_shared<GTKWindow>(winname, window);
 +        return ui_window;
 +    }
 +
 +    int waitKeyEx(int delay) CV_OVERRIDE
 +    {
 +        return cvWaitKey(delay);
 +    }
 +    int pollKey() CV_OVERRIDE
 +    {
 +        return cvWaitKey(1);  // TODO
 +    }
 +};  // GTKBackendUI
 +
 +static
 +std::shared_ptr<GTKBackendUI>& getInstance()
 +{
 +    static std::shared_ptr<GTKBackendUI> g_instance = std::make_shared<GTKBackendUI>();
 +    return g_instance;
 +}
 +
 +} // namespace impl
 +
 +#ifndef BUILD_PLUGIN
 +namespace highgui_backend {
 +
 +std::shared_ptr<UIBackend> createUIBackendGTK()
 +{
 +    return impl::getInstance();
 +}
 +
 +}  // namespace highgui_backend
 +#endif
 +
 +}  // namespace
 +
 +#ifdef BUILD_PLUGIN
 +
 +#define ABI_VERSION 0
 +#define API_VERSION 0
 +#include "plugin_api.hpp"
 +
 +static
 +CvResult cv_getInstance(CV_OUT CvPluginUIBackend* handle) CV_NOEXCEPT
 +{
 +    try
 +    {
 +        if (!handle)
 +            return CV_ERROR_FAIL;
 +        *handle = cv::impl::getInstance().get();
 +        return CV_ERROR_OK;
 +    }
 +    catch (...)
 +    {
 +        return CV_ERROR_FAIL;
 +    }
 +}
 +
 +static const OpenCV_UI_Plugin_API plugin_api =
 +{
 +    {
 +        sizeof(OpenCV_UI_Plugin_API), ABI_VERSION, API_VERSION,
 +        CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
 +        "GTK" CVAUX_STR(GTK_MAJOR_VERSION) " OpenCV UI plugin"
 +    },
 +    {
 +        /*  1*/cv_getInstance
 +    }
 +};
 +
 +const OpenCV_UI_Plugin_API* CV_API_CALL opencv_ui_plugin_init_v0(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
 +{
 +    if (requested_abi_version == ABI_VERSION && requested_api_version <= API_VERSION)
 +        return &plugin_api;
 +    return NULL;
 +}
 +
 +#endif  // BUILD_PLUGIN
 +
 +#endif  // HAVE_GTK