Merge pull request #18430 from alalek:ippicv_tpp
[platform/upstream/opencv.git] / modules / dnn / src / onnx / onnx_importer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2018, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8 #include "../precomp.hpp"
9 #include <opencv2/dnn/shape_utils.hpp>
10
11 #ifdef HAVE_PROTOBUF
12
13 #include <iostream>
14 #include <fstream>
15 #include <string>
16 #include <limits>
17 #include <algorithm>
18
19
20 #if defined(__GNUC__) && __GNUC__ >= 5
21 #pragma GCC diagnostic push
22 #pragma GCC diagnostic ignored "-Wsuggest-override"
23 #endif
24 #include "opencv-onnx.pb.h"
25 #if defined(__GNUC__) && __GNUC__ >= 5
26 #pragma GCC diagnostic pop
27 #endif
28
29 #include "onnx_graph_simplifier.hpp"
30
31 namespace cv {
32 namespace dnn {
33 CV__DNN_EXPERIMENTAL_NS_BEGIN
34
35
36 class ONNXImporter
37 {
38     opencv_onnx::ModelProto model_proto;
39     struct LayerInfo {
40         int layerId;
41         int outputId;
42         LayerInfo(int _layerId = 0, int _outputId = 0) : layerId(_layerId), outputId(_outputId) {}
43     };
44
45     std::map<std::string, Mat> getGraphTensors(
46                                     const opencv_onnx::GraphProto& graph_proto);
47     Mat getBlob(const opencv_onnx::NodeProto& node_proto, const std::map<std::string, Mat>& constBlobs, int index);
48
49     LayerParams getLayerParams(const opencv_onnx::NodeProto& node_proto);
50     bool isCeilMode(const LayerParams& layerParams);
51
52     void addLayer(Net& dstNet, LayerParams& layerParams,
53                   const opencv_onnx::NodeProto& node_proto,
54                   std::map<std::string, LayerInfo>& layer_id,
55                   std::map<std::string, MatShape>& outShapes);
56
57 public:
58
59     ONNXImporter(const char *onnxFile)
60     {
61         std::fstream input(onnxFile, std::ios::in | std::ios::binary);
62
63         if (!model_proto.ParseFromIstream(&input))
64             CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model");
65     }
66
67     ONNXImporter(const char* buffer, size_t sizeBuffer)
68     {
69         struct _Buf : public std::streambuf
70         {
71             _Buf(const char* buffer, size_t sizeBuffer)
72             {
73                 char* p = const_cast<char*>(buffer);
74                 setg(p, p, p + sizeBuffer);
75             }
76         };
77
78         _Buf buf(buffer, sizeBuffer);
79         std::istream input(&buf);
80
81         if (!model_proto.ParseFromIstream(&input))
82             CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model from in-memory byte array.");
83     }
84
85     void populateNet(Net dstNet);
86 };
87
88 inline void replaceLayerParam(LayerParams& layerParams, const String& oldKey, const String& newKey)
89 {
90     if (layerParams.has(oldKey)) {
91         layerParams.set(newKey, layerParams.get(oldKey));
92         layerParams.erase(oldKey);
93     }
94 }
95
96 void releaseONNXTensor(opencv_onnx::TensorProto& tensor_proto)
97 {
98     if (!tensor_proto.raw_data().empty()) {
99         delete tensor_proto.release_raw_data();
100     }
101 }
102
103 void runLayer(LayerParams& params, const std::vector<Mat>& inputs,
104               std::vector<Mat>& outputs)
105 {
106     Ptr<Layer> layer = LayerFactory::createLayerInstance(params.type, params);
107     CV_Assert((bool)layer);
108
109     std::vector<MatShape> inpShapes(inputs.size());
110     int ddepth = CV_32F;
111     for (size_t i = 0; i < inputs.size(); ++i)
112     {
113         inpShapes[i] = shape(inputs[i]);
114         if (i > 0 && ddepth != inputs[i].depth())
115             CV_Error(Error::StsNotImplemented, "Mixed input data types.");
116         ddepth = inputs[i].depth();
117     }
118
119     std::vector<MatShape> outShapes, internalShapes;
120     layer->getMemoryShapes(inpShapes, 0, outShapes, internalShapes);
121
122     std::vector<Mat> internals(internalShapes.size());
123     outputs.resize(outShapes.size());
124     for (size_t i = 0; i < outShapes.size(); ++i)
125         outputs[i].create(outShapes[i], ddepth);
126     for (size_t i = 0; i < internalShapes.size(); ++i)
127         internals[i].create(internalShapes[i], ddepth);
128
129     layer->finalize(inputs, outputs);
130     layer->forward(inputs, outputs, internals);
131 }
132
133 std::map<std::string, Mat> ONNXImporter::getGraphTensors(
134                                         const opencv_onnx::GraphProto& graph_proto)
135 {
136   opencv_onnx::TensorProto tensor_proto;
137   std::map<std::string, Mat> layers_weights;
138
139   for (int i = 0; i < graph_proto.initializer_size(); i++)
140   {
141     tensor_proto = graph_proto.initializer(i);
142     Mat mat = getMatFromTensor(tensor_proto);
143     releaseONNXTensor(tensor_proto);
144     layers_weights.insert(std::make_pair(tensor_proto.name(), mat));
145   }
146   return layers_weights;
147 }
148
149 static DictValue parse(const ::google::protobuf::RepeatedField< ::google::protobuf::int64>& src) {
150     std::vector<int32_t> dst(src.size());
151     convertInt64ToInt32(src, dst, src.size());
152     return DictValue::arrayInt(&dst[0], src.size());
153 }
154
155 LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_proto)
156 {
157     LayerParams lp;
158     for(int i = 0; i < node_proto.attribute_size(); i++)
159     {
160         opencv_onnx::AttributeProto attribute_proto = node_proto.attribute(i);
161         std::string attribute_name = attribute_proto.name();
162
163         if(attribute_name == "kernel_shape")
164         {
165             CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
166             lp.set("kernel_size", parse(attribute_proto.ints()));
167         }
168         else if(attribute_name == "strides")
169         {
170             CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
171             lp.set("stride", parse(attribute_proto.ints()));
172         }
173         else if(attribute_name == "pads")
174         {
175             if (node_proto.op_type() == "Pad")
176             {
177                 // Padding layer.
178                 // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
179                 // We need to shuffle it to begin0, end0, begin1, end1, ...
180                 CV_Assert(attribute_proto.ints_size() % 2 == 0);
181                 const int dims = attribute_proto.ints_size() / 2;
182                 std::vector<int32_t> paddings;
183                 paddings.reserve(attribute_proto.ints_size());
184                 for (int i = 0; i < dims; ++i)
185                 {
186                     paddings.push_back(attribute_proto.ints(i));
187                     paddings.push_back(attribute_proto.ints(dims + i));
188                 }
189                 lp.set("paddings", DictValue::arrayInt(&paddings[0], paddings.size()));
190             }
191             else
192             {
193                 // Convolution or pooling.
194                 CV_Assert(attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6);
195                 lp.set("pad", parse(attribute_proto.ints()));
196             }
197         }
198         else if(attribute_name == "auto_pad")
199         {
200             if (attribute_proto.s() == "SAME_UPPER" || attribute_proto.s() == "SAME_LOWER") {
201                 lp.set("pad_mode",  "SAME");
202             }
203             else if (attribute_proto.s() == "VALID") {
204                 lp.set("pad_mode", "VALID");
205             }
206         }
207         else if(attribute_name == "dilations")
208         {
209             CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
210             lp.set("dilation", parse(attribute_proto.ints()));
211         }
212         else if (attribute_proto.has_i())
213         {
214             ::google::protobuf::int64 src = attribute_proto.i();
215             if (src < std::numeric_limits<int32_t>::min() || src > std::numeric_limits<int32_t>::max())
216                 CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
217             else
218                 lp.set(attribute_name, saturate_cast<int32_t>(src));
219         }
220         else if (attribute_proto.has_f())
221         {
222             lp.set(attribute_name, attribute_proto.f());
223         }
224         else if (attribute_proto.has_s())
225         {
226             lp.set(attribute_name, attribute_proto.s());
227         }
228         else if (attribute_proto.floats_size() > 0)
229         {
230             lp.set(attribute_name, DictValue::arrayReal(
231                 attribute_proto.floats().data(), attribute_proto.floats_size()));
232         }
233         else if (attribute_proto.ints_size() > 0)
234         {
235             lp.set(attribute_proto.name(), parse(attribute_proto.ints()));
236         }
237         else if (attribute_proto.has_t())
238         {
239             opencv_onnx::TensorProto tensor = attribute_proto.t();
240             Mat blob = getMatFromTensor(tensor);
241             lp.blobs.push_back(blob);
242         }
243         else if (attribute_proto.has_g() || attribute_proto.strings_size() > 0 ||
244                     attribute_proto.tensors_size() > 0 || attribute_proto.graphs_size() > 0)
245         {
246                 CV_Error(Error::StsNotImplemented, "Unexpected attribute type");
247         }
248         else
249             CV_Error(Error::StsNotImplemented, "Unsupported attribute type");
250     }
251     return lp;
252 }
253
254 Mat ONNXImporter::getBlob(const opencv_onnx::NodeProto& node_proto,
255                     const std::map<std::string, Mat>& constBlobs, int index)
256 {
257     CV_Assert(index < node_proto.input_size());
258     std::map<std::string, Mat>::const_iterator constBlob;
259     constBlob = constBlobs.find(node_proto.input(index));
260     if (constBlob == constBlobs.end()) {
261         CV_Error(Error::StsObjectNotFound,
262              "Blob " + node_proto.input(index) + " not found in const blobs");
263     }
264     return constBlob->second;
265 }
266
267 void ONNXImporter::addLayer(Net& dstNet, LayerParams& layerParams,
268                             const opencv_onnx::NodeProto& node_proto,
269                             std::map<std::string, LayerInfo>& layer_id,
270                             std::map<std::string, MatShape>& outShapes)
271 {
272     std::map<std::string, LayerInfo>::iterator layerId;
273     std::map<std::string, MatShape>::iterator shapeIt;
274
275     int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
276     for (int i = 0; i < node_proto.output_size(); ++i)
277     {
278         layer_id.insert(std::make_pair(node_proto.output(i), LayerInfo(id, i)));
279     }
280
281     std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
282     int inpNum = 0;
283     for (int j = 0; j < node_proto.input_size(); j++) {
284         layerId = layer_id.find(node_proto.input(j));
285         if (layerId != layer_id.end()) {
286             dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, inpNum);
287             ++inpNum;
288             // Collect input shapes.
289             shapeIt = outShapes.find(node_proto.input(j));
290             CV_Assert(shapeIt != outShapes.end());
291             layerInpShapes.push_back(shapeIt->second);
292         }
293     }
294     // Compute shape of output blob for this layer.
295     Ptr<Layer> layer = dstNet.getLayer(id);
296     layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes);
297     for (int i = 0; i < node_proto.output_size() && i < (int)layerOutShapes.size(); ++i)
298     {
299         outShapes[node_proto.output(i)] = layerOutShapes[i];
300     }
301 }
302
303 static void addConstant(const std::string& name,
304                         const Mat& blob,
305                         std::map<std::string, Mat>& constBlobs,
306                         std::map<std::string, MatShape>& outShapes)
307 {
308     constBlobs.insert(std::make_pair(name, blob));
309     outShapes.insert(std::make_pair(name, shape(blob)));
310 }
311
312 void ONNXImporter::populateNet(Net dstNet)
313 {
314     CV_Assert(model_proto.has_graph());
315     opencv_onnx::GraphProto graph_proto = model_proto.graph();
316
317     simplifySubgraphs(graph_proto);
318
319     std::map<std::string, Mat> constBlobs = getGraphTensors(graph_proto);
320     // List of internal blobs shapes.
321     std::map<std::string, MatShape> outShapes;
322     // Add all the inputs shapes. It includes as constant blobs as network's inputs shapes.
323     for (int i = 0; i < graph_proto.input_size(); ++i)
324     {
325         opencv_onnx::ValueInfoProto valueInfoProto = graph_proto.input(i);
326         CV_Assert(valueInfoProto.has_type());
327         opencv_onnx::TypeProto typeProto = valueInfoProto.type();
328         CV_Assert(typeProto.has_tensor_type());
329         opencv_onnx::TypeProto::Tensor tensor = typeProto.tensor_type();
330         CV_Assert(tensor.has_shape());
331         opencv_onnx::TensorShapeProto tensorShape = tensor.shape();
332
333         MatShape inpShape(tensorShape.dim_size());
334         for (int j = 0; j < inpShape.size(); ++j)
335         {
336             inpShape[j] = tensorShape.dim(j).dim_value();
337         }
338         if (!inpShape.empty())
339         {
340             inpShape[0] = std::max(inpShape[0], 1); // It's OK to have undetermined batch size
341         }
342         outShapes[valueInfoProto.name()] = inpShape;
343     }
344
345     std::string framework_name;
346     if (model_proto.has_producer_name()) {
347         framework_name = model_proto.producer_name();
348     }
349
350     // create map with network inputs (without const blobs)
351     std::map<std::string, LayerInfo> layer_id;
352     std::map<std::string, LayerInfo>::iterator layerId;
353     std::map<std::string, MatShape>::iterator shapeIt;
354     // fill map: push layer name, layer id and output id
355     std::vector<String> netInputs;
356     for (int j = 0; j < graph_proto.input_size(); j++)
357     {
358         const std::string& name = graph_proto.input(j).name();
359         if (constBlobs.find(name) == constBlobs.end()) {
360             netInputs.push_back(name);
361             layer_id.insert(std::make_pair(name, LayerInfo(0, netInputs.size() - 1)));
362         }
363     }
364     dstNet.setInputsNames(netInputs);
365
366     int layersSize = graph_proto.node_size();
367     LayerParams layerParams;
368     opencv_onnx::NodeProto node_proto;
369
370     for(int li = 0; li < layersSize; li++)
371     {
372         node_proto = graph_proto.node(li);
373         layerParams = getLayerParams(node_proto);
374         CV_Assert(node_proto.output_size() >= 1);
375         layerParams.name = node_proto.output(0);
376
377         std::string layer_type = node_proto.op_type();
378         layerParams.type = layer_type;
379
380
381         if (layer_type == "MaxPool")
382         {
383             layerParams.type = "Pooling";
384             layerParams.set("pool", "MAX");
385             layerParams.set("ceil_mode", layerParams.has("pad_mode"));
386         }
387         else if (layer_type == "AveragePool")
388         {
389             layerParams.type = "Pooling";
390             layerParams.set("pool", "AVE");
391             layerParams.set("ceil_mode", layerParams.has("pad_mode"));
392             layerParams.set("ave_pool_padded_area", framework_name == "pytorch");
393         }
394         else if (layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool" ||
395                 layer_type == "ReduceMean" || layer_type == "ReduceSum" || layer_type == "ReduceMax")
396         {
397             CV_Assert(node_proto.input_size() == 1);
398             layerParams.type = "Pooling";
399             String pool;
400             if (layer_type == "GlobalMaxPool" || layer_type == "ReduceMax")
401                 pool = "MAX";
402             else if (layer_type == "ReduceSum")
403                 pool = "SUM";
404             else
405                 pool = "AVE";
406             layerParams.set("pool", pool);
407             layerParams.set("global_pooling", !layerParams.has("axes"));
408             if (layerParams.has("axes") && (layer_type == "ReduceMean" || layer_type == "ReduceSum" || layer_type == "ReduceMax"))
409             {
410                 MatShape inpShape = outShapes[node_proto.input(0)];
411                 DictValue axes = layerParams.get("axes");
412                 bool keepdims = layerParams.get<int>("keepdims");
413                 MatShape targetShape = inpShape;
414                 for (int i = 0; i < axes.size(); i++) {
415                     int axis = clamp(axes.get<int>(i), inpShape.size());
416                     if (keepdims) {
417                         targetShape[axis] = 1;
418                     } else {
419                         targetShape.erase(targetShape.begin() + axis);
420                     }
421                 }
422
423                 if (inpShape.size() == 3 && axes.size() <= 2)
424                 {
425                     int axis = clamp(axes.get<int>(0), inpShape.size());
426                     CV_CheckNE(axis, 0, "");
427
428                     LayerParams reshapeLp;
429                     reshapeLp.name = layerParams.name + "/reshape";
430                     reshapeLp.type = "Reshape";
431                     CV_Assert(layer_id.find(reshapeLp.name) == layer_id.end());
432                     reshapeLp.set("axis", 0);
433                     reshapeLp.set("num_axes", 1);
434                     int newShape[] = {1, -1};
435                     reshapeLp.set("dim", DictValue::arrayInt(&newShape[0], 2));
436
437                     opencv_onnx::NodeProto proto;
438                     proto.add_input(node_proto.input(0));
439                     proto.add_output(reshapeLp.name);
440                     addLayer(dstNet, reshapeLp, proto, layer_id, outShapes);
441
442                     LayerParams avgLp;
443                     avgLp.name = layerParams.name + "/avg";
444                     avgLp.type = "Pooling";
445                     CV_Assert(layer_id.find(avgLp.name) == layer_id.end());
446                     avgLp.set("pool", pool);
447                     if (axes.size() == 2)
448                     {
449                         CV_CheckEQ(clamp(axes.get<int>(0), inpShape.size()), 1, "Unsupported mode");
450                         CV_CheckEQ(clamp(axes.get<int>(1), inpShape.size()), 2, "Unsupported mode");
451                         avgLp.set("global_pooling", true);
452                     }
453                     else
454                     {
455                         avgLp.set(axis == 2 ? "global_pooling_w" : "global_pooling_h", true);
456                         avgLp.set(axis == 2 ? "kernel_h" : "kernel_w", 1);
457                     }
458
459                     node_proto.set_input(0, reshapeLp.name);
460                     node_proto.set_output(0, avgLp.name);
461                     addLayer(dstNet, avgLp, node_proto, layer_id, outShapes);
462                 }
463                 else
464                 {
465                     if (inpShape.size() != 4 && inpShape.size() != 5)
466                         CV_Error(Error::StsNotImplemented, "Unsupported input shape of " + layer_type + " operation.");
467
468                     CV_Assert(axes.size() <= inpShape.size() - 2);
469                     std::vector<int> kernel_size(inpShape.size() - 2, 1);
470                     for (int i = 0; i < axes.size(); i++) {
471                         int axis = clamp(axes.get<int>(i), inpShape.size());
472                         CV_Assert_N(axis >= 2 + i, axis < inpShape.size());
473                         kernel_size[axis - 2] = inpShape[axis];
474                     }
475                     LayerParams poolLp = layerParams;
476                     poolLp.name = layerParams.name + "/avg";
477                     CV_Assert(layer_id.find(poolLp.name) == layer_id.end());
478                     poolLp.set("kernel_size", DictValue::arrayInt(&kernel_size[0], kernel_size.size()));
479
480                     node_proto.set_output(0, poolLp.name);
481                     addLayer(dstNet, poolLp, node_proto, layer_id, outShapes);
482                 }
483
484                 layerParams.type = "Reshape";
485                 layerParams.set("dim", DictValue::arrayInt(&targetShape[0], targetShape.size()));
486
487                 node_proto.set_input(0, node_proto.output(0));
488                 node_proto.set_output(0, layerParams.name);
489             }
490             else if (!layerParams.has("axes") && (layer_type == "ReduceMean" || layer_type == "ReduceSum" || layer_type == "ReduceMax"))
491             {
492                 CV_CheckEQ(layerParams.get<int>("keepdims"), 0, "layer only supports keepdims = false");
493                 LayerParams reshapeLp;
494                 reshapeLp.name = layerParams.name + "/reshape";
495                 reshapeLp.type = "Reshape";
496                 CV_Assert(layer_id.find(reshapeLp.name) == layer_id.end());
497                 int newShape[] = {1, 1, 1, -1};
498                 reshapeLp.set("dim", DictValue::arrayInt(&newShape[0], 4));
499
500                 opencv_onnx::NodeProto proto;
501                 proto.add_input(node_proto.input(0));
502                 proto.add_output(reshapeLp.name);
503                 addLayer(dstNet, reshapeLp, proto, layer_id, outShapes);
504
505                 LayerParams poolLp = layerParams;
506                 poolLp.name = layerParams.name + "/pool";
507                 CV_Assert(layer_id.find(poolLp.name) == layer_id.end());
508
509                 node_proto.set_input(0, reshapeLp.name);
510                 node_proto.set_output(0, poolLp.name);
511                 addLayer(dstNet, poolLp, node_proto, layer_id, outShapes);
512
513                 layerParams.type = "Reshape";
514                 int targetShape[] = {1};
515                 layerParams.set("dim", DictValue::arrayInt(&targetShape[0], 1));
516
517                 node_proto.set_input(0, node_proto.output(0));
518                 node_proto.set_output(0, layerParams.name);
519             }
520         }
521         else if (layer_type == "Slice")
522         {
523             int axis = 0;
524             std::vector<int> begin;
525             std::vector<int> end;
526             int inp_size = node_proto.input_size();
527
528             if (inp_size == 1)
529             {
530                 if (layerParams.has("steps"))
531                 {
532                     DictValue steps = layerParams.get("steps");
533                     for (int i = 0; i < steps.size(); ++i)
534                     {
535                         if (steps.get<int>(i) != 1)
536                             CV_Error(Error::StsNotImplemented,
537                                 "Slice layer only supports steps = 1");
538                     }
539                 }
540                 if (layerParams.has("axes")) {
541                     DictValue axes = layerParams.get("axes");
542                     for (int i = 1; i < axes.size(); ++i) {
543                         CV_Assert(axes.get<int>(i - 1) == axes.get<int>(i) - 1);
544                     }
545                     axis = axes.get<int>(0);
546                 }
547
548                 DictValue starts = layerParams.get("starts");
549                 DictValue ends = layerParams.get("ends");
550                 CV_Assert(starts.size() == ends.size());
551
552                 if (axis > 0) {
553                     begin.resize(axis, 0);
554                     end.resize(axis, -1);
555                 }
556                 for (int i = 0; i < starts.size(); ++i)
557                 {
558                     begin.push_back(starts.get<int>(i));
559                     int finish = ends.get<int>(i);
560                     end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
561                 }
562             } else {
563                 CV_Assert(inp_size >= 3);
564                 for (int i = 1; i < inp_size; i++) {
565                     CV_Assert(constBlobs.find(node_proto.input(i)) != constBlobs.end());
566                 }
567                 Mat start_blob = getBlob(node_proto, constBlobs, 1);
568                 Mat end_blob   = getBlob(node_proto, constBlobs, 2);
569                 CV_Assert(start_blob.total() == end_blob.total());
570
571                 if (inp_size > 3) {
572                     Mat axes_blob = getBlob(node_proto, constBlobs, 3);
573                     const int* axes = (int*)axes_blob.data;
574                     for (int i = 1; i < axes_blob.total(); ++i) {
575                         CV_Assert(axes[i - 1] == axes[i] - 1);
576                     }
577                     axis = axes[0];
578                 }
579
580                 const int* starts = start_blob.ptr<int>();
581                 const int* ends   = end_blob.ptr<int>();
582                 if (axis > 0) {
583                     begin.resize(axis, 0);
584                     end.resize(axis, -1);
585                 }
586                 std::copy(starts, starts + start_blob.total(), std::back_inserter(begin));
587                 for (int i = 0; i < end_blob.total(); ++i)
588                 {
589                     int finish = ends[i];
590                     end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
591                 }
592
593                 if (inp_size == 5) {
594                     CV_Assert(constBlobs.find(node_proto.input(4)) != constBlobs.end());
595                     Mat step_blob = getBlob(node_proto, constBlobs, 4);
596
597                     // Very strange application for Slice op with tensor reversing.
598                     // We just workaround it for 2d constants.
599                     if (constBlobs.find(node_proto.input(0)) != constBlobs.end() &&
600                         axis == 0 &&
601                         start_blob.at<int>(0) == -1 && step_blob.at<int>(0) == -1 &&
602                         end_blob.at<int>(0) == std::numeric_limits<int32_t>::min())
603                     {
604                         Mat inp = getBlob(node_proto, constBlobs, 0);
605                         if (inp.dims == 2)
606                         {
607                             Mat flipped;
608                             flip(inp, flipped, 0);
609                             addConstant(layerParams.name, flipped, constBlobs, outShapes);
610                             continue;
611                         }
612                     }
613                     CV_CheckEQ(countNonZero(step_blob != 1), 0, "Slice layer only supports steps = 1");
614                 }
615             }
616             layerParams.set("begin", DictValue::arrayInt(&begin[0], begin.size()));
617             layerParams.set("end", DictValue::arrayInt(&end[0], end.size()));
618             layerParams.set("axis", axis);
619
620             if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
621             {
622                 Mat inp = getBlob(node_proto, constBlobs, 0);
623                 std::vector<Mat> inputs, sliced;
624                 inputs.push_back(inp);
625                 runLayer(layerParams, inputs, sliced);
626                 CV_Assert(sliced.size() == 1);
627                 addConstant(layerParams.name, sliced[0], constBlobs, outShapes);
628                 continue;
629             }
630         }
631         else if (layer_type == "Split")
632         {
633             if (layerParams.has("split"))
634             {
635                 DictValue splits = layerParams.get("split");
636                 const int numSplits = splits.size();
637                 CV_Assert(numSplits > 1);
638
639                 std::vector<int> slicePoints(numSplits - 1, splits.get<int>(0));
640                 for (int i = 1; i < splits.size() - 1; ++i)
641                 {
642                     slicePoints[i] = slicePoints[i - 1] + splits.get<int>(i - 1);
643                 }
644                 layerParams.set("slice_point", DictValue::arrayInt(&slicePoints[0], slicePoints.size()));
645             }
646             else
647             {
648                 layerParams.set("num_split", node_proto.output_size());
649             }
650             layerParams.type = "Slice";
651         }
652         else if (layer_type == "Add" || layer_type == "Sum" || layer_type == "Sub")
653         {
654             bool isSub = layer_type == "Sub";
655             CV_CheckEQ(node_proto.input_size(), 2, "");
656             bool is_const_0 = layer_id.find(node_proto.input(0)) == layer_id.end();
657             bool is_const_1 = layer_id.find(node_proto.input(1)) == layer_id.end();
658             if (is_const_0 && is_const_1)
659             {
660                 Mat blob_0 = getBlob(node_proto, constBlobs, 0);
661                 Mat blob_1 = getBlob(node_proto, constBlobs, 1);
662                 CV_Assert(blob_0.size == blob_1.size);
663                 Mat output = isSub ? (blob_0 - blob_1) : (blob_0 + blob_1);
664                 addConstant(layerParams.name, output, constBlobs, outShapes);
665                 continue;
666             }
667             else if (is_const_0 || is_const_1)
668             {
669                 int const_blob_id = is_const_0 ? 0 : 1;
670                 Mat blob = getBlob(node_proto, constBlobs, const_blob_id);
671                 int blob_total = blob.total();
672                 if (blob_total == 1) {
673                     layerParams.type = "Power";
674                     layerParams.set("shift", (isSub ? -1 : 1) * blob.at<float>(0));
675                 }
676                 else {
677                     MatShape inpShape = outShapes[node_proto.input(1 - const_blob_id)];
678                     if (shape(blob) == inpShape)
679                     {
680                         LayerParams constParams;
681                         constParams.name = layerParams.name + "/const";
682                         constParams.type = "Const";
683                         constParams.blobs.push_back((isSub ? -1 : 1) * blob);
684                         int id = dstNet.addLayer(constParams.name, constParams.type, constParams);
685                         layer_id.insert(std::make_pair(constParams.name, LayerInfo(id, 0)));
686                         outShapes[constParams.name] = shape(blob);
687
688                         layerParams.type = "Eltwise";
689                         node_proto.set_input(const_blob_id, constParams.name);
690                     }
691                     else
692                     {
693                         layerParams.type = "Scale";
694                         layerParams.set("bias_term", true);
695                         int axis = 1;
696                         for (int i = 0; i < graph_proto.initializer_size(); i++)
697                         {
698                             opencv_onnx::TensorProto tensor_proto = graph_proto.initializer(i);
699                             if (tensor_proto.name() == node_proto.input(const_blob_id))
700                             {
701                                 axis = inpShape.size() - tensor_proto.dims_size();
702                                 break;
703                             }
704                         }
705                         layerParams.set("axis", axis);
706                         blob = blob.reshape(1, 1);
707                         layerParams.blobs.push_back((isSub ? -1 : 1) * blob);
708                     }
709                 }
710             }
711             else if (outShapes[node_proto.input(0)] == outShapes[node_proto.input(1)])
712             {
713                 layerParams.type = "Eltwise";
714                 if (isSub)
715                 {
716                     static float subCoeffs[] = {1.f, -1.f};
717                     layerParams.set("coeff", DictValue::arrayReal<float*>(subCoeffs, 2));
718                 }
719             }
720             else
721             {
722                 if (isSub)
723                 {
724                     LayerParams powerParams;
725                     powerParams.name = layerParams.name + "/neg";
726                     powerParams.type = "Power";
727                     powerParams.set("scale", -1);
728
729                     //Create Power layer
730                     int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
731                     //Connect to input
732                     layerId = layer_id.find(node_proto.input(1));
733                     CV_Assert(layerId != layer_id.end());
734                     dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
735                     //Add shape
736                     layer_id.insert(std::make_pair(powerParams.name, LayerInfo(id, 0)));
737                     outShapes[powerParams.name] = outShapes[node_proto.input(1)];
738
739                     //Replace input to Power
740                     node_proto.set_input(1, powerParams.name);
741                 }
742                 layerParams.type = "Scale";
743                 layerParams.set("bias_term", true);
744             }
745         }
746         else if (layer_type == "Pow")
747         {
748             if (layer_id.find(node_proto.input(1)) != layer_id.end())
749                 CV_Error(Error::StsNotImplemented, "Unsupported Pow op with variable power");
750
751             Mat blob = getBlob(node_proto, constBlobs, 1);
752             if (blob.total() != 1)
753                 CV_Error(Error::StsNotImplemented, "Pow op supports only scalar power");
754
755             blob.convertTo(blob, CV_32F);
756             layerParams.type = "Power";
757             layerParams.set("power", blob.at<float>(0));
758         }
759         else if (layer_type == "Max")
760         {
761             layerParams.type = "Eltwise";
762             layerParams.set("operation", "max");
763         }
764         else if (layer_type == "Neg")
765         {
766             layerParams.type = "Power";
767             layerParams.set("scale", -1);
768         }
769         else if (layer_type == "Constant")
770         {
771             CV_Assert(node_proto.input_size() == 0);
772             CV_Assert(layerParams.blobs.size() == 1);
773             addConstant(layerParams.name, layerParams.blobs[0], constBlobs, outShapes);
774             continue;
775         }
776         else if (layer_type == "LSTM")
777         {
778             LayerParams lstmParams = layerParams;
779             lstmParams.name += "/lstm";
780
781             // https://pytorch.org/docs/stable/nn.html#lstm
782             CV_Assert(node_proto.input_size() == 7);
783             Mat Wx = getBlob(node_proto, constBlobs, 1);
784             Mat Wh = getBlob(node_proto, constBlobs, 2);
785             Mat b = getBlob(node_proto, constBlobs, 3);
786             CV_CheckEQ(countNonZero(getBlob(node_proto, constBlobs, 5)), 0, "Unsupported non zero initial_h");
787             CV_CheckEQ(countNonZero(getBlob(node_proto, constBlobs, 6)), 0, "Unsupported non zero initial_c");
788             b = b.reshape(1, b.size[0]);
789
790             const int numHidden = lstmParams.get<int>("hidden_size");
791             const int numDirs = Wx.size[0];  // Is 1 for forward only and 2 for bidirectional LSTM.
792             const int numFeatures = Wx.size[2];
793             Mat bx = b.colRange(0, b.cols / 2);
794             Mat bh = b.colRange(b.cols / 2, b.cols);
795             b = bx + bh;
796
797             // IFGO->IGFO
798             for (int k = 0; k < numDirs; ++k)
799             {
800                 float* WxData = Wx.ptr<float>(k);
801                 float* WhData = Wh.ptr<float>(k);
802                 float* biasData = b.ptr<float>(k);
803                 for (int j = 0; j < numHidden; ++j)
804                 {
805                     for (int i = 0; i < numFeatures; ++i)
806                     {
807                         std::swap(WxData[(numHidden + j) * numFeatures + i],
808                                   WxData[(numHidden * 2 + j) * numFeatures + i]);
809                     }
810                     for (int i = 0; i < numHidden; ++i)
811                     {
812                         std::swap(WhData[(numHidden + j) * numHidden + i],
813                                   WhData[(numHidden * 2 + j) * numHidden + i]);
814                     }
815                     std::swap(biasData[numHidden + j], biasData[numHidden * 2 + j]);
816                 }
817             }
818             Wx = Wx.reshape(1, Wx.size[0] * Wx.size[1]);
819             Wh = Wh.reshape(1, Wh.size[0] * Wh.size[1]);
820
821             lstmParams.blobs.resize(3);
822             lstmParams.blobs[0] = Wh;
823             lstmParams.blobs[1] = Wx;
824             lstmParams.blobs[2] = b;
825             lstmParams.set("bidirectional", lstmParams.get<String>("direction", "") == "bidirectional");
826
827             node_proto.set_output(0, lstmParams.name);  // set different name so output shapes will be registered on that name
828             addLayer(dstNet, lstmParams, node_proto, layer_id, outShapes);
829
830             MatShape lstmShape = outShapes[node_proto.output(0)];
831
832             // Add fake 1 as it is done in ONNX
833             lstmShape.insert(lstmShape.begin() + 1, 1);
834
835             layerParams.type = "Reshape";
836             layerParams.set("dim", DictValue::arrayInt(&lstmShape[0], lstmShape.size()));
837             node_proto.set_input(0, lstmParams.name);  // redirect input to LSTM
838             node_proto.set_output(0, layerParams.name);  // keep origin LSTM's name
839         }
840         else if (layer_type == "ImageScaler")
841         {
842             const float scale = layerParams.has("scale") ? layerParams.get<float>("scale") : 1.0f;
843             layerParams.erase("scale");
844
845             if (layerParams.has("bias"))
846             {
847                 layerParams.type = "Scale";
848                 layerParams.blobs.push_back(
849                     Mat(Size(1,  layerParams.get("bias").size()), CV_32FC1, scale));
850
851                 layerParams.set("bias_term", true);
852                 Mat bias(1, layerParams.get("bias").size(), CV_32FC1);
853                 for (int j = 0; j < bias.total(); j++) {
854                     bias.at<float>(0, j) = layerParams.get("bias").getRealValue(j);
855                 }
856                 layerParams.blobs.push_back(bias);
857                 layerParams.erase("bias");
858             }
859             else {
860                 layerParams.set("scale", scale);
861                 layerParams.type = "Power";
862             }
863         }
864         else if (layer_type == "Clip")
865         {
866             layerParams.type = "ReLU6";
867             replaceLayerParam(layerParams, "min", "min_value");
868             replaceLayerParam(layerParams, "max", "max_value");
869
870         }
871         else if (layer_type == "LeakyRelu")
872         {
873             layerParams.type = "ReLU";
874             replaceLayerParam(layerParams, "alpha", "negative_slope");
875         }
876         else if (layer_type == "Relu")
877         {
878             layerParams.type = "ReLU";
879         }
880         else if (layer_type == "Elu")
881         {
882             layerParams.type = "ELU";
883         }
884         else if (layer_type == "Tanh")
885         {
886             layerParams.type = "TanH";
887         }
888         else if (layer_type == "PRelu")
889         {
890             layerParams.type = "PReLU";
891             layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1));
892         }
893         else if (layer_type == "LRN")
894         {
895             replaceLayerParam(layerParams, "size", "local_size");
896         }
897         else if (layer_type == "InstanceNormalization")
898         {
899             if (node_proto.input_size() != 3)
900                 CV_Error(Error::StsNotImplemented,
901                          "Expected input, scale, bias");
902
903             layerParams.blobs.resize(4);
904             layerParams.blobs[2] = getBlob(node_proto, constBlobs, 1);  // weightData
905             layerParams.blobs[3] = getBlob(node_proto, constBlobs, 2);  // biasData
906             layerParams.set("has_bias", true);
907             layerParams.set("has_weight", true);
908
909             // Get number of channels in input
910             int size = layerParams.blobs[2].total();
911             layerParams.blobs[0] = Mat::zeros(size, 1, CV_32F); // mean
912             layerParams.blobs[1] = Mat::ones(size, 1, CV_32F); // std
913
914             LayerParams mvnParams;
915             mvnParams.name = layerParams.name + "/MVN";
916             mvnParams.type = "MVN";
917             mvnParams.set("eps", layerParams.get<float>("epsilon"));
918             layerParams.erase("epsilon");
919
920             //Create MVN layer
921             int id = dstNet.addLayer(mvnParams.name, mvnParams.type, mvnParams);
922             //Connect to input
923             layerId = layer_id.find(node_proto.input(0));
924             CV_Assert(layerId != layer_id.end());
925             dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
926             //Add shape
927             layer_id.insert(std::make_pair(mvnParams.name, LayerInfo(id, 0)));
928             outShapes[mvnParams.name] = outShapes[node_proto.input(0)];
929
930             //Replace Batch Norm's input to MVN
931             node_proto.set_input(0, mvnParams.name);
932             layerParams.type = "BatchNorm";
933         }
934         else if (layer_type == "BatchNormalization")
935         {
936             if (node_proto.input_size() != 5)
937                 CV_Error(Error::StsNotImplemented,
938                          "Expected input, scale, bias, mean and var");
939
940             layerParams.type = "BatchNorm";
941             replaceLayerParam(layerParams, "epsilon", "eps");
942             replaceLayerParam(layerParams, "spatial", "use_global_stats");
943
944             Mat meanData = getBlob(node_proto, constBlobs, 3);
945             Mat stdData =  getBlob(node_proto, constBlobs, 4);
946
947             layerParams.blobs.push_back(meanData);
948             layerParams.blobs.push_back(stdData);
949
950             if (!node_proto.input(1).empty()) {
951                 layerParams.set("has_weight", true);
952                 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1));  // weightData
953             } else {
954                 layerParams.set("has_weight", false);
955             }
956
957             if (!node_proto.input(2).empty()) {
958                 layerParams.set("has_bias", true);
959                 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 2)); // biasData
960             } else {
961                 layerParams.set("has_bias", false);
962             }
963         }
964         else if (layer_type == "Gemm")
965         {
966             CV_Assert(node_proto.input_size() >= 2);
967             layerParams.type = "InnerProduct";
968             Mat weights = getBlob(node_proto, constBlobs, 1);
969             int ind_num_out = 0;
970             if (layerParams.has("transB") && !layerParams.get<int>("transB")) {
971                 transpose(weights, weights);
972                 ind_num_out = 1;
973             }
974             layerParams.blobs.push_back(weights);
975
976             if (node_proto.input_size() == 3) {
977                 Mat bias = getBlob(node_proto, constBlobs, 2);
978                 layerParams.blobs.push_back(bias);
979             }
980             if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
981             {
982                 Mat inputBuf = getBlob(node_proto, constBlobs, 0);
983
984                 LayerParams constParams;
985                 constParams.name = node_proto.input(0);
986                 constParams.type = "Const";
987                 constParams.blobs.push_back(inputBuf);
988
989                 opencv_onnx::NodeProto proto;
990                 proto.add_output(constParams.name);
991                 addLayer(dstNet, constParams, proto, layer_id, outShapes);
992             }
993
994             layerParams.set("num_output", layerParams.blobs[0].size[ind_num_out]);
995             layerParams.set("bias_term", node_proto.input_size() == 3);
996         }
997         else if (layer_type == "MatMul")
998         {
999             CV_Assert(node_proto.input_size() == 2);
1000             layerParams.type = "InnerProduct";
1001             layerParams.set("bias_term", false);
1002             CV_Assert(constBlobs.find(node_proto.input(0)) == constBlobs.end());
1003             int firstInpDims = outShapes[node_proto.input(0)].size();
1004             int secondInpDims;
1005
1006             if (constBlobs.find(node_proto.input(1)) != constBlobs.end())
1007             {
1008                 Mat blob = getBlob(node_proto, constBlobs, 1);
1009                 secondInpDims = blob.dims;
1010                 layerParams.blobs.push_back(blob.t());
1011                 layerParams.set("num_output", layerParams.blobs[0].size[0]);
1012             } else {
1013                 secondInpDims = outShapes[node_proto.input(1)].size();
1014             }
1015             layerParams.set("axis", firstInpDims - secondInpDims + 1);
1016         }
1017         else if (layer_type == "Mul" || layer_type == "Div")
1018         {
1019             CV_Assert(node_proto.input_size() == 2);
1020
1021             bool isDiv = layer_type == "Div";
1022             int constId = -1;
1023             bool haveVariables = false;
1024             for (int i = 0; i < 2; ++i)
1025             {
1026                 if (constBlobs.find(node_proto.input(i)) != constBlobs.end())
1027                     constId = i;
1028                 else
1029                     haveVariables = true;
1030             }
1031             if (constId != -1 && haveVariables)
1032             {
1033                 Mat blob = getBlob(node_proto, constBlobs, constId);
1034                 blob = blob.reshape(1, 1);
1035                 if (blob.total() == 1) {
1036                     float coeff = isDiv ? 1.0 / blob.at<float>(0) : blob.at<float>(0);
1037                     layerParams.set("scale", coeff);
1038                     layerParams.type = "Power";
1039                 }
1040                 else {
1041                     if (isDiv)
1042                         divide(1.0, blob, blob);
1043                     layerParams.blobs.push_back(blob);
1044                     layerParams.type = "Scale";
1045                 }
1046             }
1047             else if (outShapes[node_proto.input(0)] == outShapes[node_proto.input(1)])
1048             {
1049                 layerParams.type = "Eltwise";
1050                 layerParams.set("operation", isDiv ? "div" : "prod");
1051             }
1052             else
1053             {
1054                 // Scale layer allocate output with the first input shape
1055                 if (total(outShapes[node_proto.input(0)]) < total(outShapes[node_proto.input(1)]))
1056                 {
1057                     opencv_onnx::NodeProto proto;
1058                     proto.add_input(node_proto.input(1));
1059                     proto.add_input(node_proto.input(0));
1060                     proto.add_output(layerParams.name);
1061                     node_proto = proto;
1062                 }
1063
1064                 if (isDiv)
1065                 {
1066                     LayerParams powerParams;
1067                     powerParams.name = layerParams.name + "/inv";
1068                     powerParams.type = "Power";
1069                     powerParams.set("power", -1);
1070
1071                     //Create Power layer
1072                     int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
1073                     //Connect to input
1074                     layerId = layer_id.find(node_proto.input(1));
1075                     CV_Assert(layerId != layer_id.end());
1076                     dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
1077                     //Add shape
1078                     layer_id.insert(std::make_pair(powerParams.name, LayerInfo(id, 0)));
1079                     outShapes[powerParams.name] = outShapes[node_proto.input(1)];
1080
1081                     //Replace input to Power
1082                     node_proto.set_input(1, powerParams.name);
1083                 }
1084                 layerParams.type = "Scale";
1085             }
1086
1087             if (!haveVariables)
1088             {
1089                 Mat inp0 = getBlob(node_proto, constBlobs, 0);
1090                 Mat inp1 = getBlob(node_proto, constBlobs, 1);
1091                 if (inp0.size != inp1.size && inp1.total() != 1)
1092                     CV_Error(Error::StsNotImplemented, "Constant multiply with different shapes");
1093
1094                 Mat out = isDiv ? inp0 / inp1 : inp0.mul(inp1);
1095                 out = out.reshape(1, inp0.dims, inp0.size);
1096                 out.dims = inp0.dims;  // to workaround dims == 1
1097                 addConstant(layerParams.name, out, constBlobs, outShapes);
1098                 continue;
1099             }
1100         }
1101         else if (layer_type == "Conv")
1102         {
1103             CV_Assert(node_proto.input_size() >= 2);
1104             layerParams.type = "Convolution";
1105             for (int j = 1; j < node_proto.input_size(); j++) {
1106                 if (constBlobs.find(node_proto.input(j)) != constBlobs.end())
1107                 {
1108                     layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
1109                 }
1110             }
1111             int outCn = layerParams.blobs.empty() ? outShapes[node_proto.input(1)][0] : layerParams.blobs[0].size[0];
1112             layerParams.set("num_output", outCn);
1113         }
1114         else if (layer_type == "ConvTranspose")
1115         {
1116             CV_Assert(node_proto.input_size() >= 2);
1117             layerParams.type = "Deconvolution";
1118             for (int j = 1; j < node_proto.input_size(); j++) {
1119                 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
1120             }
1121             layerParams.set("num_output", layerParams.blobs[0].size[1] * layerParams.get<int>("group", 1));
1122             layerParams.set("bias_term", node_proto.input_size() == 3);
1123
1124             if (!layerParams.has("kernel_size"))
1125                 CV_Error(Error::StsNotImplemented,
1126                          "Required attribute 'kernel_size' is not present.");
1127
1128             if (layerParams.has("output_shape"))
1129             {
1130                 const DictValue& outShape = layerParams.get("output_shape");
1131                 DictValue strides = layerParams.get("stride");
1132                 DictValue kernel = layerParams.get("kernel_size");
1133
1134                 String padMode;
1135                 std::vector<int> adjust_pads;
1136                 if (layerParams.has("pad_mode"))
1137                 {
1138                     padMode = toUpperCase(layerParams.get<String>("pad_mode"));
1139                     if (padMode != "SAME" && padMode != "VALID")
1140                         CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
1141
1142                     for (int i = 0; i < strides.size(); i++)
1143                     {
1144                         int sz = outShape.get<int>(2 + i);
1145                         int stride = strides.get<int>(i);
1146                         adjust_pads.push_back(padMode == "SAME"? (sz - 1) % stride :
1147                                                                  (sz - kernel.get<int>(i)) % stride);
1148                     }
1149                     layerParams.set("adj", DictValue::arrayInt(&adjust_pads[0], adjust_pads.size()));
1150                 }
1151             }
1152             else if (layerParams.has("output_padding"))
1153             {
1154                 replaceLayerParam(layerParams, "output_padding", "adj");
1155             }
1156         }
1157         else if (layer_type == "Transpose")
1158         {
1159             layerParams.type = "Permute";
1160             replaceLayerParam(layerParams, "perm", "order");
1161
1162             CV_Assert(node_proto.input_size() == 1);
1163             if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
1164             {
1165                 std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), transposed;
1166                 runLayer(layerParams, inputs, transposed);
1167                 CV_Assert(transposed.size() == 1);
1168                 addConstant(layerParams.name, transposed[0], constBlobs, outShapes);
1169                 continue;
1170             }
1171         }
1172         else if (layer_type == "Squeeze")
1173         {
1174             CV_Assert_N(node_proto.input_size() == 1, layerParams.has("axes"));
1175             DictValue axes_dict = layerParams.get("axes");
1176             MatShape inpShape = outShapes[node_proto.input(0)];
1177
1178             std::vector<bool> maskedAxes(inpShape.size(), false);
1179             for (int i = 0; i < axes_dict.size(); ++i)
1180             {
1181                 int axis = axes_dict.getIntValue(i);
1182                 CV_CheckLE(axis, static_cast<int>(inpShape.size()), "Squeeze axis");
1183                 maskedAxes[axis] = inpShape[axis] == 1;
1184             }
1185             MatShape outShape;
1186             for (int i = 0; i < inpShape.size(); ++i)
1187             {
1188                 if (!maskedAxes[i])
1189                     outShape.push_back(inpShape[i]);
1190             }
1191             if (outShape.size() != inpShape.size())
1192             {
1193                 layerParams.type = "Reshape";
1194                 layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size()));
1195             }
1196             else
1197                 layerParams.type = "Identity";
1198
1199             if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
1200             {
1201                 Mat inp = getBlob(node_proto, constBlobs, 0);
1202                 Mat out = inp.reshape(1, outShape);
1203                 out.dims = outShape.size();  // to workaround dims == 1
1204                 addConstant(layerParams.name, out, constBlobs, outShapes);
1205                 continue;
1206             }
1207         }
1208         else if (layer_type == "Flatten")
1209         {
1210             CV_CheckEQ(node_proto.input_size(), 1, "");
1211             if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
1212             {
1213                 Mat input = getBlob(node_proto, constBlobs, 0);
1214                 int axis = clamp(layerParams.get<int>("axis", 1), input.dims);
1215
1216                 std::vector<int> out_size(&input.size[0], &input.size[0] + axis);
1217                 out_size.push_back(input.total(axis));
1218                 Mat output = input.reshape(1, out_size);
1219                 addConstant(layerParams.name, output, constBlobs, outShapes);
1220                 continue;
1221             }
1222         }
1223         else if (layer_type == "Unsqueeze")
1224         {
1225             CV_Assert(node_proto.input_size() == 1);
1226             DictValue axes = layerParams.get("axes");
1227             if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
1228             {
1229                 // Constant input.
1230                 Mat input = getBlob(node_proto, constBlobs, 0);
1231
1232                 std::vector<int> dims;
1233                 for (int j = 0; j < input.dims; j++) {
1234                     dims.push_back(input.size[j]);
1235                 }
1236                 CV_Assert(axes.getIntValue(axes.size()-1) <= dims.size());
1237                 for (int j = 0; j < axes.size(); j++) {
1238                     dims.insert(dims.begin() + axes.getIntValue(j), 1);
1239                 }
1240
1241                 Mat out = input.reshape(0, dims);
1242                 addConstant(layerParams.name, out, constBlobs, outShapes);
1243                 continue;
1244             }
1245
1246             // Variable input.
1247             if (axes.size() != 1)
1248                 CV_Error(Error::StsNotImplemented, "Multidimensional unsqueeze");
1249
1250             MatShape inpShape = outShapes[node_proto.input(0)];
1251             int axis = axes.getIntValue(0);
1252             CV_Assert(0 <= axis && axis <= inpShape.size());
1253             std::vector<int> outShape = inpShape;
1254             outShape.insert(outShape.begin() + axis, 1);
1255             layerParams.type = "Reshape";
1256             layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size()));
1257         }
1258         else if (layer_type == "Expand")
1259         {
1260             CV_CheckEQ(node_proto.input_size(), 2, "");
1261             CV_Assert(constBlobs.find(node_proto.input(1)) != constBlobs.end());
1262             Mat newShapeMat = getBlob(node_proto, constBlobs, 1);
1263             MatShape targetShape(newShapeMat.ptr<int>(), newShapeMat.ptr<int>() + newShapeMat.total());
1264
1265             MatShape inpShape;
1266             bool haveVariables = constBlobs.find(node_proto.input(0)) == constBlobs.end();
1267             if (haveVariables)
1268             {
1269                 shapeIt = outShapes.find(node_proto.input(0));
1270                 CV_Assert(shapeIt != outShapes.end());
1271                 inpShape = shapeIt->second;
1272             }
1273             else
1274             {
1275                 inpShape = shape(getBlob(node_proto, constBlobs, 0));
1276             }
1277
1278             String srcName = node_proto.input(0);
1279             // Unsqueeze and repeat along new axis
1280             if (targetShape.size() == inpShape.size() + 1)
1281             {
1282                 for (int i = 0; i < targetShape.size(); i++)
1283                 {
1284                     if (targetShape[i] == -1 && i < inpShape.size())
1285                         targetShape[i] = inpShape[i];
1286                     else if (i < inpShape.size() && targetShape[i] != inpShape[i])
1287                         inpShape.insert(inpShape.begin() + i, 1);
1288                 }
1289                 if (haveVariables)
1290                 {
1291                     LayerParams reshapeLp;
1292                     reshapeLp.name = layerParams.name + "/reshape";
1293                     reshapeLp.type = "Reshape";
1294                     CV_Assert(layer_id.find(reshapeLp.name) == layer_id.end());
1295                     reshapeLp.set("dim", DictValue::arrayInt(&inpShape[0], inpShape.size()));
1296
1297                     opencv_onnx::NodeProto proto;
1298                     proto.add_input(node_proto.input(0));
1299                     proto.add_output(reshapeLp.name);
1300                     addLayer(dstNet, reshapeLp, proto, layer_id, outShapes);
1301                     srcName = reshapeLp.name;
1302                 }
1303             }
1304             CV_CheckEQ(inpShape.size(), targetShape.size(), "Unsupported Expand op with different dims");
1305
1306             std::vector<int> broadcast_axes;
1307             for (int i = 0; i < targetShape.size(); i++)
1308             {
1309                 if (targetShape[i] != inpShape[i])
1310                 {
1311                     if (inpShape[i] == 1)
1312                         broadcast_axes.push_back(i);
1313                     else
1314                         CV_Error(Error::StsError, format("Could not be broadcast by axis: %d", i));
1315                 }
1316             }
1317
1318             if (!haveVariables)
1319             {
1320                 if (broadcast_axes.size() != 1)
1321                     CV_Error(Error::StsNotImplemented, "Expand op doesn't support multiple axes for constant input");
1322
1323                 Mat input = getBlob(node_proto, constBlobs, 0);
1324                 input = input.reshape(0, total(inpShape, 0, broadcast_axes[0]));
1325                 Mat output = cv::repeat(input, 1, targetShape[broadcast_axes[0]]);
1326                 output = output.reshape(0, targetShape);
1327                 addConstant(layerParams.name, output, constBlobs, outShapes);
1328                 continue;
1329             }
1330
1331             if (broadcast_axes.size() == 2 &&
1332                 broadcast_axes[0] == broadcast_axes[1] - 1 && broadcast_axes[1] == inpShape.size() - 1)
1333             {
1334                 LayerParams constParams;
1335                 constParams.name = layerParams.name + "/const";
1336                 CV_Assert(layer_id.find(constParams.name) == layer_id.end());
1337                 constParams.type = "Const";
1338
1339                 Mat inp = Mat::ones(newShapeMat.total(), newShapeMat.ptr<int>(), CV_32F);
1340                 constParams.blobs.push_back(inp);
1341
1342                 opencv_onnx::NodeProto proto;
1343                 proto.add_output(constParams.name);
1344                 addLayer(dstNet, constParams, proto, layer_id, outShapes);
1345
1346                 layerParams.type = "Scale";
1347                 layerParams.set("bias_term", false);
1348                 node_proto.set_input(0, constParams.name);
1349                 node_proto.set_input(1, shapeIt->first);
1350             }
1351             else if (broadcast_axes.size() == 1 && broadcast_axes[0] <= 1)
1352             {
1353                 String base_name = layerParams.name + "/copy_";
1354                 std::vector<std::string> input_names;
1355                 for (int j = 0; j < targetShape[broadcast_axes[0]]; j++)
1356                 {
1357                     std::ostringstream ss;
1358                     ss << j;
1359                     LayerParams copyLP;
1360                     copyLP.name = base_name + ss.str();
1361                     copyLP.type = "Identity";
1362                     CV_Assert(layer_id.find(copyLP.name) == layer_id.end());
1363                     input_names.push_back(copyLP.name);
1364
1365                     node_proto.set_input(0, srcName);
1366                     node_proto.set_output(0, copyLP.name);
1367                     addLayer(dstNet, copyLP, node_proto, layer_id, outShapes);
1368                 }
1369                 node_proto.clear_input();
1370                 for (int i = 0; i < input_names.size(); i++)
1371                 {
1372                     node_proto.add_input(input_names[i]);
1373                 }
1374                 layerParams.set("axis", broadcast_axes[0]);
1375                 layerParams.type = "Concat";
1376                 node_proto.set_output(0, layerParams.name);
1377             }
1378             else
1379                 CV_Error(Error::StsNotImplemented, "Unsupported Expand op");
1380         }
1381         else if (layer_type == "Reshape")
1382         {
1383             CV_Assert(node_proto.input_size() == 2 || layerParams.has("shape"));
1384
1385             if (node_proto.input_size() == 2) {
1386                 Mat blob = getBlob(node_proto, constBlobs, 1);
1387                 CV_Assert(blob.type() == CV_32SC1);
1388
1389                 layerParams.set("dim", DictValue::arrayInt<int*>(
1390                             blob.ptr<int>(), blob.total() ));
1391
1392                 if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
1393                     std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), outputs;
1394                     runLayer(layerParams, inputs, outputs);
1395                     addConstant(layerParams.name, outputs[0], constBlobs, outShapes);
1396                     continue;
1397                 }
1398             }
1399             else {
1400                 DictValue shape = layerParams.get("shape");
1401                 std::vector<int> dim;
1402                 for (int j = 0; j < shape.size(); j++) {
1403                     dim.push_back(shape.getIntValue(j));
1404                 }
1405
1406                 if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
1407                     Mat input = getBlob(node_proto, constBlobs, 0);
1408                     Mat out = input.reshape(0, dim);
1409                     addConstant(layerParams.name, out, constBlobs, outShapes);
1410                     continue;
1411                 }
1412                 replaceLayerParam(layerParams, "shape", "dim");
1413             }
1414         }
1415         else if (layer_type == "Pad")
1416         {
1417             layerParams.type = "Padding";
1418             replaceLayerParam(layerParams, "mode", "type");
1419             if (node_proto.input_size() == 3 || node_proto.input_size() == 2)
1420             {
1421                 // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
1422                 // We need to shuffle it to begin0, end0, begin1, end1, ...
1423                 Mat paddings = getBlob(node_proto, constBlobs, 1).reshape(1, 2);
1424                 paddings = paddings.t();
1425                 layerParams.set("paddings", DictValue::arrayInt(paddings.ptr<int>(), paddings.total()));
1426
1427                 if (node_proto.input_size() == 3)
1428                 {
1429                     Mat value = getBlob(node_proto, constBlobs, 2);
1430                     layerParams.set("value", value.at<float>(0));
1431                 }
1432             }
1433         }
1434         else if (layer_type == "Shape")
1435         {
1436             CV_Assert(node_proto.input_size() == 1);
1437             shapeIt = outShapes.find(node_proto.input(0));
1438             CV_Assert(shapeIt != outShapes.end());
1439             MatShape inpShape = shapeIt->second;
1440
1441             Mat shapeMat(inpShape.size(), 1, CV_32S);
1442             for (int j = 0; j < inpShape.size(); ++j)
1443                 shapeMat.at<int>(j) = inpShape[j];
1444             shapeMat.dims = 1;
1445
1446             addConstant(layerParams.name, shapeMat, constBlobs, outShapes);
1447             continue;
1448         }
1449         else if (layer_type == "Cast")
1450         {
1451             if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
1452             {
1453                 Mat blob = getBlob(node_proto, constBlobs, 0);
1454                 int type;
1455                 switch (layerParams.get<int>("to"))
1456                 {
1457                     case opencv_onnx::TensorProto_DataType_FLOAT:   type = CV_32F; break;
1458                     case opencv_onnx::TensorProto_DataType_UINT8:   type = CV_8U; break;
1459                     case opencv_onnx::TensorProto_DataType_UINT16:  type = CV_16U; break;
1460                     case opencv_onnx::TensorProto_DataType_FLOAT16: type = CV_16S; break;
1461                     case opencv_onnx::TensorProto_DataType_INT8:
1462                     case opencv_onnx::TensorProto_DataType_INT16:
1463                     case opencv_onnx::TensorProto_DataType_INT32:
1464                     case opencv_onnx::TensorProto_DataType_INT64:   type = CV_32S; break;
1465                     default: type = blob.type();
1466                 }
1467                 Mat dst;
1468                 blob.convertTo(dst, type);
1469                 dst.dims = blob.dims;
1470                 addConstant(layerParams.name, dst, constBlobs, outShapes);
1471                 continue;
1472             }
1473             else
1474                 layerParams.type = "Identity";
1475         }
1476         else if (layer_type == "ConstantOfShape" || layer_type == "ConstantFill")
1477         {
1478             int depth = CV_32F;
1479             float fill_value;
1480             if (!layerParams.blobs.empty())
1481             {
1482                 CV_Assert(!layerParams.has("value"));
1483                 depth = layerParams.blobs[0].depth();
1484                 Mat floats;
1485                 layerParams.blobs[0].convertTo(floats, CV_32F);
1486                 fill_value = floats.at<float>(0, 0);
1487             }
1488             else
1489                 fill_value = layerParams.get("value", 0);
1490
1491             MatShape inpShape = getBlob(node_proto, constBlobs, 0);
1492             for (int i = 0; i < inpShape.size(); i++)
1493                 CV_CheckGT(inpShape[i], 0, "");
1494             Mat tensor(inpShape.size(), &inpShape[0], depth, Scalar(fill_value));
1495             addConstant(layerParams.name, tensor, constBlobs, outShapes);
1496             continue;
1497         }
1498         else if (layer_type == "Gather")
1499         {
1500             CV_Assert(node_proto.input_size() == 2);
1501             Mat indexMat = getBlob(node_proto, constBlobs, 1);
1502             CV_Assert_N(indexMat.type() == CV_32S, indexMat.total() == 1);
1503             int index = indexMat.at<int>(0);
1504             int axis = layerParams.get<int>("axis", 0);
1505
1506             if ((constBlobs.find(node_proto.input(0)) != constBlobs.end()))
1507             {
1508                 Mat input = getBlob(node_proto, constBlobs, 0);
1509                 Mat out;
1510                 std::vector<cv::Range> ranges(input.dims, Range::all());
1511                 ranges[axis] = Range(index, index + 1);
1512
1513                 out = input(ranges);
1514                 MatShape outShape = shape(out);
1515                 if (outShape.size() > 1)
1516                 {
1517                     outShape.erase(outShape.begin() + axis);
1518                     out.reshape(0, outShape);
1519                 } else {
1520                     out.dims = 1;
1521                 }
1522                 addConstant(layerParams.name, out, constBlobs, outShapes);
1523                 continue;
1524             }
1525             else
1526             {
1527                 shapeIt = outShapes.find(node_proto.input(0));
1528                 CV_Assert(shapeIt != outShapes.end());
1529                 MatShape inpShape = shapeIt->second;
1530
1531                 LayerParams sliceLp;
1532                 sliceLp.type = "Slice";
1533                 sliceLp.name = inpShape.size() > 1 ? layerParams.name + "/slice" : layerParams.name;
1534                 std::vector<int> begin(inpShape.size(), 0);
1535                 std::vector<int> end(inpShape.size(), -1);
1536                 begin[axis] = index;
1537                 end[axis] = index + 1;
1538
1539                 cv::dnn::DictValue paramBegin = cv::dnn::DictValue::arrayInt(begin.data(), begin.size());
1540                 cv::dnn::DictValue paramEnd = cv::dnn::DictValue::arrayInt(end.data(), end.size());
1541                 sliceLp.set("begin", paramBegin);
1542                 sliceLp.set("end", paramEnd);
1543
1544                 if (inpShape.size() > 1)
1545                 {
1546                     opencv_onnx::NodeProto proto;
1547                     proto.add_input(node_proto.input(0));
1548                     proto.add_output(sliceLp.name);
1549                     addLayer(dstNet, sliceLp, proto, layer_id, outShapes);
1550
1551                     inpShape.erase(inpShape.begin() + axis);
1552                     layerParams.type = "Reshape";
1553                     layerParams.set("axis", 0);
1554                     layerParams.set("dim", DictValue::arrayInt(&inpShape[0], inpShape.size()));
1555                     node_proto.set_input(0, sliceLp.name);
1556                 }
1557                 else
1558                 {
1559                     layerParams = sliceLp;
1560                 }
1561             }
1562         }
1563         else if (layer_type == "Concat")
1564         {
1565             bool hasVariableInps = false;
1566             for (int i = 0; i < node_proto.input_size(); ++i)
1567             {
1568                 if (layer_id.find(node_proto.input(i)) != layer_id.end())
1569                 {
1570                     hasVariableInps = true;
1571                     break;
1572                 }
1573             }
1574
1575             if (!hasVariableInps)
1576             {
1577                 std::vector<Mat> inputs(node_proto.input_size()), concatenated;
1578                 for (size_t i = 0; i < inputs.size(); ++i)
1579                 {
1580                     inputs[i] = getBlob(node_proto, constBlobs, i);
1581                 }
1582                 runLayer(layerParams, inputs, concatenated);
1583
1584                 CV_Assert(concatenated.size() == 1);
1585                 addConstant(layerParams.name, concatenated[0], constBlobs, outShapes);
1586                 continue;
1587             }
1588         }
1589         else if (layer_type == "Resize")
1590         {
1591             for (int i = 1; i < node_proto.input_size(); i++)
1592                 CV_Assert(layer_id.find(node_proto.input(i)) == layer_id.end());
1593
1594             String interp_mode = layerParams.get<String>("coordinate_transformation_mode");
1595             CV_Assert_N(interp_mode != "tf_crop_and_resize", interp_mode != "tf_half_pixel_for_nn");
1596
1597             layerParams.set("align_corners", interp_mode == "align_corners");
1598             Mat shapes = getBlob(node_proto, constBlobs, node_proto.input_size() - 1);
1599             CV_CheckEQ(shapes.size[0], 4, "");
1600             CV_CheckEQ(shapes.size[1], 1, "");
1601             CV_CheckDepth(shapes.depth(), shapes.depth() == CV_32S || shapes.depth() == CV_32F, "");
1602             if (shapes.depth() == CV_32F)
1603                 shapes.convertTo(shapes, CV_32S);
1604             int height = shapes.at<int>(2);
1605             int width  = shapes.at<int>(3);
1606             if (node_proto.input_size() == 3)
1607             {
1608                 shapeIt = outShapes.find(node_proto.input(0));
1609                 CV_Assert(shapeIt != outShapes.end());
1610                 MatShape scales = shapeIt->second;
1611                 height *= scales[2];
1612                 width  *= scales[3];
1613             }
1614             layerParams.set("width", width);
1615             layerParams.set("height", height);
1616
1617             if (layerParams.get<String>("mode") == "linear") {
1618                 layerParams.set("mode", interp_mode == "pytorch_half_pixel" ?
1619                                         "opencv_linear" : "bilinear");
1620             }
1621             replaceLayerParam(layerParams, "mode", "interpolation");
1622         }
1623         else if (layer_type == "Upsample")
1624         {
1625             //fused from Resize Subgraph
1626             if (layerParams.has("coordinate_transformation_mode"))
1627             {
1628                 String interp_mode = layerParams.get<String>("coordinate_transformation_mode");
1629                 CV_Assert_N(interp_mode != "tf_crop_and_resize", interp_mode != "tf_half_pixel_for_nn");
1630
1631                 layerParams.set("align_corners", interp_mode == "align_corners");
1632                 if (layerParams.get<String>("mode") == "linear")
1633                 {
1634                     layerParams.set("mode", interp_mode == "pytorch_half_pixel" ?
1635                                             "opencv_linear" : "bilinear");
1636                 }
1637             }
1638             if (layerParams.get<String>("mode") == "linear" && framework_name == "pytorch")
1639                 layerParams.set("mode", "opencv_linear");
1640
1641             layerParams.type = "Resize";
1642             if (layerParams.has("scales"))
1643             {
1644                 // Pytorch layer
1645                 DictValue scales = layerParams.get("scales");
1646                 CV_Assert(scales.size() == 4);
1647                 layerParams.set("zoom_factor_y", scales.getIntValue(2));
1648                 layerParams.set("zoom_factor_x", scales.getIntValue(3));
1649             }
1650             else if (layerParams.has("height_scale") && layerParams.has("width_scale"))
1651             {
1652                 // Caffe2 layer
1653                 replaceLayerParam(layerParams, "height_scale", "zoom_factor_y");
1654                 replaceLayerParam(layerParams, "width_scale", "zoom_factor_x");
1655             }
1656             else
1657             {
1658                 // scales as input
1659                 Mat scales = getBlob(node_proto, constBlobs, 1);
1660                 CV_Assert(scales.total() == 4);
1661                 layerParams.set("zoom_factor_y", scales.at<float>(2));
1662                 layerParams.set("zoom_factor_x", scales.at<float>(3));
1663             }
1664             replaceLayerParam(layerParams, "mode", "interpolation");
1665         }
1666         else if (layer_type == "SoftMax" || layer_type == "LogSoftmax")
1667         {
1668             layerParams.type = "Softmax";
1669             layerParams.set("log_softmax", layer_type == "LogSoftmax");
1670         }
1671         else if (layer_type == "DetectionOutput")
1672         {
1673             CV_CheckEQ(node_proto.input_size(), 3, "");
1674             if (constBlobs.find(node_proto.input(2)) != constBlobs.end())
1675             {
1676                 Mat priors = getBlob(node_proto, constBlobs, 2);
1677
1678                 LayerParams constParams;
1679                 constParams.name = layerParams.name + "/priors";
1680                 constParams.type = "Const";
1681                 constParams.blobs.push_back(priors);
1682
1683                 opencv_onnx::NodeProto priorsProto;
1684                 priorsProto.add_output(constParams.name);
1685                 addLayer(dstNet, constParams, priorsProto, layer_id, outShapes);
1686
1687                 node_proto.set_input(2, constParams.name);
1688             }
1689         }
1690         else
1691         {
1692             for (int j = 0; j < node_proto.input_size(); j++) {
1693                 if (layer_id.find(node_proto.input(j)) == layer_id.end())
1694                     layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
1695             }
1696         }
1697         addLayer(dstNet, layerParams, node_proto, layer_id, outShapes);
1698     }
1699 }
1700
1701 Net readNetFromONNX(const String& onnxFile)
1702 {
1703     ONNXImporter onnxImporter(onnxFile.c_str());
1704     Net net;
1705     onnxImporter.populateNet(net);
1706     return net;
1707 }
1708
1709 Net readNetFromONNX(const char* buffer, size_t sizeBuffer)
1710 {
1711     ONNXImporter onnxImporter(buffer, sizeBuffer);
1712     Net net;
1713     onnxImporter.populateNet(net);
1714     return net;
1715 }
1716
1717 Net readNetFromONNX(const std::vector<uchar>& buffer)
1718 {
1719     return readNetFromONNX(reinterpret_cast<const char*>(buffer.data()), buffer.size());
1720 }
1721
1722 Mat readTensorFromONNX(const String& path)
1723 {
1724     opencv_onnx::TensorProto tensor_proto = opencv_onnx::TensorProto();
1725     std::fstream input(path.c_str(), std::ios::in | std::ios::binary);
1726     if (!tensor_proto.ParseFromIstream(&input)) {
1727         CV_Error(Error::StsUnsupportedFormat, "Failed to parse data");
1728     }
1729     Mat mat = getMatFromTensor(tensor_proto);
1730     releaseONNXTensor(tensor_proto);
1731     return mat;
1732 }
1733
1734 CV__DNN_EXPERIMENTAL_NS_END
1735 }} // namespace
1736
1737 #endif