c5745c9b8b33cae915352e875aee26f1353ba6e6
[platform/upstream/opencv.git] / modules / dnn / src / onnx / onnx_importer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2018, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8 #include "../precomp.hpp"
9 #include <opencv2/dnn/shape_utils.hpp>
10
11 #ifdef HAVE_PROTOBUF
12
13 #include <iostream>
14 #include <fstream>
15 #include <string>
16 #include <limits>
17 #include <algorithm>
18
19
20 #if defined(__GNUC__) && __GNUC__ >= 5
21 #pragma GCC diagnostic push
22 #pragma GCC diagnostic ignored "-Wsuggest-override"
23 #endif
24 #include "opencv-onnx.pb.h"
25 #if defined(__GNUC__) && __GNUC__ >= 5
26 #pragma GCC diagnostic pop
27 #endif
28
29 namespace cv {
30 namespace dnn {
31 CV__DNN_EXPERIMENTAL_NS_BEGIN
32
33
34 class ONNXImporter
35 {
36     opencv_onnx::ModelProto model_proto;
37     struct LayerInfo {
38         int layerId;
39         int outputId;
40         LayerInfo(int _layerId, int _outputId) : layerId(_layerId), outputId(_outputId) {}
41     };
42
43     std::map<std::string, Mat> getGraphTensors(
44                                     const opencv_onnx::GraphProto& graph_proto);
45     Mat getBlob(const opencv_onnx::NodeProto& node_proto, const std::map<std::string, Mat>& constBlobs, int index);
46
47     LayerParams getLayerParams(const opencv_onnx::NodeProto& node_proto);
48     bool isCeilMode(const LayerParams& layerParams);
49
50 public:
51
52     ONNXImporter(const char *onnxFile)
53     {
54         std::fstream input(onnxFile, std::ios::in | std::ios::binary);
55
56         if (!model_proto.ParseFromIstream(&input))
57             CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model");
58     }
59
60     ONNXImporter(const char* buffer, size_t sizeBuffer)
61     {
62         struct _Buf : public std::streambuf
63         {
64             _Buf(const char* buffer, size_t sizeBuffer)
65             {
66                 char* p = const_cast<char*>(buffer);
67                 setg(p, p, p + sizeBuffer);
68             }
69         };
70
71         _Buf buf(buffer, sizeBuffer);
72         std::istream input(&buf);
73
74         if (!model_proto.ParseFromIstream(&input))
75             CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model from in-memory byte array.");
76     }
77
78     void populateNet(Net dstNet);
79 };
80
81 inline void replaceLayerParam(LayerParams& layerParams, const String& oldKey, const String& newKey)
82 {
83     if (layerParams.has(oldKey)) {
84         layerParams.set(newKey, layerParams.get(oldKey));
85         layerParams.erase(oldKey);
86     }
87 }
88
89 void releaseONNXTensor(opencv_onnx::TensorProto& tensor_proto)
90 {
91     if (!tensor_proto.raw_data().empty()) {
92         delete tensor_proto.release_raw_data();
93     }
94 }
95
96 template<typename T1, typename T2>
97 void convertInt64ToInt32(const T1& src, T2& dst, int size)
98 {
99     for (int i = 0; i < size; i++) {
100         if (src[i] < std::numeric_limits<int32_t>::min() || src[i] > std::numeric_limits<int32_t>::max()) {
101             CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
102         }
103         dst[i] = saturate_cast<int32_t>(src[i]);
104     }
105 }
106
107 Mat getMatFromTensor(opencv_onnx::TensorProto& tensor_proto)
108 {
109     CV_Assert(!tensor_proto.raw_data().empty() || !tensor_proto.float_data().empty()
110                     || !tensor_proto.double_data().empty() || !tensor_proto.int64_data().empty());
111
112     opencv_onnx::TensorProto_DataType datatype = tensor_proto.data_type();
113     Mat blob;
114     std::vector<int> sizes;
115     for (int i = 0; i < tensor_proto.dims_size(); i++) {
116             sizes.push_back(tensor_proto.dims(i));
117     }
118     if (sizes.empty())
119         sizes.assign(1, 1);
120     if (datatype == opencv_onnx::TensorProto_DataType_FLOAT) {
121
122         if (!tensor_proto.float_data().empty()) {
123             const ::google::protobuf::RepeatedField<float> field = tensor_proto.float_data();
124             Mat(sizes, CV_32FC1, (void*)field.data()).copyTo(blob);
125         }
126         else {
127             char* val = const_cast<char*>(tensor_proto.raw_data().c_str());
128             Mat(sizes, CV_32FC1, val).copyTo(blob);
129         }
130     }
131     else if (datatype == opencv_onnx::TensorProto_DataType_DOUBLE)
132     {
133         const ::google::protobuf::RepeatedField<double> field = tensor_proto.double_data();
134         CV_Assert(!field.empty());
135         Mat(sizes, CV_64FC1, (void*)field.data()).convertTo(blob, CV_32FC1);
136     }
137     else if (datatype == opencv_onnx::TensorProto_DataType_INT64)
138     {
139         blob.create(sizes, CV_32SC1);
140         int32_t* dst = reinterpret_cast<int32_t*>(blob.data);
141
142         if (!tensor_proto.int64_data().empty()) {
143             ::google::protobuf::RepeatedField< ::google::protobuf::int64> src = tensor_proto.int64_data();
144             convertInt64ToInt32(src, dst, blob.total());
145         }
146         else
147         {
148             char* val = const_cast<char*>(tensor_proto.raw_data().c_str());
149             int64_t* src = reinterpret_cast<int64_t*>(val);
150             convertInt64ToInt32(src, dst, blob.total());
151         }
152     }
153     else
154         CV_Error(Error::StsUnsupportedFormat, "Unsupported data type: " +
155                         opencv_onnx::TensorProto_DataType_Name(datatype));
156     if (tensor_proto.dims_size() == 0)
157         blob.dims = 1;  // To force 1-dimensional cv::Mat for scalars.
158     return blob;
159 }
160
161 void runLayer(LayerParams& params, const std::vector<Mat>& inputs,
162               std::vector<Mat>& outputs)
163 {
164     Ptr<Layer> layer = LayerFactory::createLayerInstance(params.type, params);
165     CV_Assert((bool)layer);
166
167     std::vector<MatShape> inpShapes(inputs.size());
168     int ddepth = CV_32F;
169     for (size_t i = 0; i < inputs.size(); ++i)
170     {
171         inpShapes[i] = shape(inputs[i]);
172         if (i > 0 && ddepth != inputs[i].depth())
173             CV_Error(Error::StsNotImplemented, "Mixed input data types.");
174         ddepth = inputs[i].depth();
175     }
176
177     std::vector<MatShape> outShapes, internalShapes;
178     layer->getMemoryShapes(inpShapes, 0, outShapes, internalShapes);
179
180     std::vector<Mat> internals(internalShapes.size());
181     outputs.resize(outShapes.size());
182     for (size_t i = 0; i < outShapes.size(); ++i)
183         outputs[i].create(outShapes[i], ddepth);
184     for (size_t i = 0; i < internalShapes.size(); ++i)
185         internals[i].create(internalShapes[i], ddepth);
186
187     layer->finalize(inputs, outputs);
188     layer->forward(inputs, outputs, internals);
189 }
190
191 std::map<std::string, Mat> ONNXImporter::getGraphTensors(
192                                         const opencv_onnx::GraphProto& graph_proto)
193 {
194   opencv_onnx::TensorProto tensor_proto;
195   std::map<std::string, Mat> layers_weights;
196
197   for (int i = 0; i < graph_proto.initializer_size(); i++)
198   {
199     tensor_proto = graph_proto.initializer(i);
200     Mat mat = getMatFromTensor(tensor_proto);
201     releaseONNXTensor(tensor_proto);
202     layers_weights.insert(std::make_pair(tensor_proto.name(), mat));
203   }
204   return layers_weights;
205 }
206
207 static DictValue parse(const ::google::protobuf::RepeatedField< ::google::protobuf::int64>& src) {
208     std::vector<int32_t> dst(src.size());
209     convertInt64ToInt32(src, dst, src.size());
210     return DictValue::arrayInt(&dst[0], src.size());
211 }
212
213 LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_proto)
214 {
215     LayerParams lp;
216     for(int i = 0; i < node_proto.attribute_size(); i++)
217     {
218         opencv_onnx::AttributeProto attribute_proto = node_proto.attribute(i);
219         std::string attribute_name = attribute_proto.name();
220
221         if(attribute_name == "kernel_shape")
222         {
223             CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
224             lp.set("kernel_size", parse(attribute_proto.ints()));
225         }
226         else if(attribute_name == "strides")
227         {
228             CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
229             lp.set("stride", parse(attribute_proto.ints()));
230         }
231         else if(attribute_name == "pads")
232         {
233             if (node_proto.op_type() == "Pad")
234             {
235                 // Padding layer.
236                 // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
237                 // We need to shuffle it to begin0, end0, begin1, end1, ...
238                 CV_Assert(attribute_proto.ints_size() % 2 == 0);
239                 const int dims = attribute_proto.ints_size() / 2;
240                 std::vector<int32_t> paddings;
241                 paddings.reserve(attribute_proto.ints_size());
242                 for (int i = 0; i < dims; ++i)
243                 {
244                     paddings.push_back(attribute_proto.ints(i));
245                     paddings.push_back(attribute_proto.ints(dims + i));
246                 }
247                 lp.set("paddings", DictValue::arrayInt(&paddings[0], paddings.size()));
248             }
249             else
250             {
251                 // Convolution or pooling.
252                 CV_Assert(attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6);
253                 lp.set("pad", parse(attribute_proto.ints()));
254             }
255         }
256         else if(attribute_name == "auto_pad")
257         {
258             if (attribute_proto.s() == "SAME_UPPER" || attribute_proto.s() == "SAME_LOWER") {
259                 lp.set("pad_mode",  "SAME");
260             }
261             else if (attribute_proto.s() == "VALID") {
262                 lp.set("pad_mode", "VALID");
263             }
264         }
265         else if(attribute_name == "dilations")
266         {
267             CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
268             lp.set("dilation", parse(attribute_proto.ints()));
269         }
270         else if (attribute_proto.has_i())
271         {
272             ::google::protobuf::int64 src = attribute_proto.i();
273             if (src < std::numeric_limits<int32_t>::min() || src > std::numeric_limits<int32_t>::max())
274                 CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
275             else
276                 lp.set(attribute_name, saturate_cast<int32_t>(src));
277         }
278         else if (attribute_proto.has_f())
279         {
280             lp.set(attribute_name, attribute_proto.f());
281         }
282         else if (attribute_proto.has_s())
283         {
284             lp.set(attribute_name, attribute_proto.s());
285         }
286         else if (attribute_proto.floats_size() > 0)
287         {
288             lp.set(attribute_name, DictValue::arrayReal(
289                 attribute_proto.floats().data(), attribute_proto.floats_size()));
290         }
291         else if (attribute_proto.ints_size() > 0)
292         {
293             lp.set(attribute_proto.name(), parse(attribute_proto.ints()));
294         }
295         else if (attribute_proto.has_t())
296         {
297             opencv_onnx::TensorProto tensor = attribute_proto.t();
298             Mat blob = getMatFromTensor(tensor);
299             lp.blobs.push_back(blob);
300         }
301         else if (attribute_proto.has_g() || attribute_proto.strings_size() > 0 ||
302                     attribute_proto.tensors_size() > 0 || attribute_proto.graphs_size() > 0)
303         {
304                 CV_Error(Error::StsNotImplemented, "Unexpected attribute type");
305         }
306         else
307             CV_Error(Error::StsNotImplemented, "Unsupported attribute type");
308     }
309     return lp;
310 }
311
312 Mat ONNXImporter::getBlob(const opencv_onnx::NodeProto& node_proto,
313                     const std::map<std::string, Mat>& constBlobs, int index)
314 {
315     CV_Assert(index < node_proto.input_size());
316     std::map<std::string, Mat>::const_iterator constBlob;
317     constBlob = constBlobs.find(node_proto.input(index));
318     if (constBlob == constBlobs.end()) {
319         CV_Error(Error::StsObjectNotFound,
320              "Blob " + node_proto.input(index) + " not found in const blobs");
321     }
322     return constBlob->second;
323 }
324
325 void ONNXImporter::populateNet(Net dstNet)
326 {
327     CV_Assert(model_proto.has_graph());
328     opencv_onnx::GraphProto graph_proto = model_proto.graph();
329     std::map<std::string, Mat> constBlobs = getGraphTensors(graph_proto);
330     // List of internal blobs shapes.
331     std::map<std::string, MatShape> outShapes;
332     // Add all the inputs shapes. It includes as constant blobs as network's inputs shapes.
333     for (int i = 0; i < graph_proto.input_size(); ++i)
334     {
335         opencv_onnx::ValueInfoProto valueInfoProto = graph_proto.input(i);
336         CV_Assert(valueInfoProto.has_type());
337         opencv_onnx::TypeProto typeProto = valueInfoProto.type();
338         CV_Assert(typeProto.has_tensor_type());
339         opencv_onnx::TypeProto::Tensor tensor = typeProto.tensor_type();
340         CV_Assert(tensor.has_shape());
341         opencv_onnx::TensorShapeProto tensorShape = tensor.shape();
342
343         MatShape inpShape(tensorShape.dim_size());
344         for (int j = 0; j < inpShape.size(); ++j)
345         {
346             inpShape[j] = tensorShape.dim(j).dim_value();
347         }
348         outShapes[valueInfoProto.name()] = inpShape;
349     }
350
351     std::string framework_name;
352     if (model_proto.has_producer_name()) {
353         framework_name = model_proto.producer_name();
354     }
355
356     // create map with network inputs (without const blobs)
357     std::map<std::string, LayerInfo> layer_id;
358     std::map<std::string, LayerInfo>::iterator layerId;
359     std::map<std::string, MatShape>::iterator shapeIt;
360     // fill map: push layer name, layer id and output id
361     std::vector<String> netInputs;
362     for (int j = 0; j < graph_proto.input_size(); j++)
363     {
364         const std::string& name = graph_proto.input(j).name();
365         if (constBlobs.find(name) == constBlobs.end()) {
366             netInputs.push_back(name);
367             layer_id.insert(std::make_pair(name, LayerInfo(0, netInputs.size() - 1)));
368         }
369     }
370     dstNet.setInputsNames(netInputs);
371
372     int layersSize = graph_proto.node_size();
373     LayerParams layerParams;
374     opencv_onnx::NodeProto node_proto;
375
376     for(int li = 0; li < layersSize; li++)
377     {
378         node_proto = graph_proto.node(li);
379         layerParams = getLayerParams(node_proto);
380         CV_Assert(node_proto.output_size() >= 1);
381         layerParams.name = node_proto.output(0);
382
383         std::string layer_type = node_proto.op_type();
384         layerParams.type = layer_type;
385
386
387         if (layer_type == "MaxPool")
388         {
389             layerParams.type = "Pooling";
390             layerParams.set("pool", "MAX");
391             layerParams.set("ceil_mode", layerParams.has("pad_mode"));
392         }
393         else if (layer_type == "AveragePool")
394         {
395             layerParams.type = "Pooling";
396             layerParams.set("pool", "AVE");
397             layerParams.set("ceil_mode", layerParams.has("pad_mode"));
398             layerParams.set("ave_pool_padded_area", framework_name == "pytorch");
399         }
400         else if (layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool")
401         {
402             layerParams.type = "Pooling";
403             layerParams.set("pool", layer_type == "GlobalAveragePool" ? "AVE" : "MAX");
404             layerParams.set("global_pooling", true);
405         }
406         else if (layer_type == "Slice")
407         {
408             if (layerParams.has("steps")) {
409                 DictValue steps = layerParams.get("steps");
410                 for (int i = 0; i < steps.size(); ++i) {
411                     if (steps.get<int>(i) != 1)
412                         CV_Error(Error::StsNotImplemented,
413                                  "Slice layer only supports steps = 1");
414                 }
415             }
416
417             int axis = 0;
418             if (layerParams.has("axes")) {
419                 DictValue axes = layerParams.get("axes");
420                 for (int i = 1; i < axes.size(); ++i) {
421                     CV_Assert(axes.get<int>(i - 1) == axes.get<int>(i) - 1);
422                 }
423                 axis = axes.get<int>(0);
424             }
425             layerParams.set("axis", axis);
426
427             DictValue starts = layerParams.get("starts");
428             DictValue ends = layerParams.get("ends");
429             CV_Assert(starts.size() == ends.size());
430
431             std::vector<int> begin;
432             std::vector<int> end;
433             if (axis > 0) {
434                 begin.resize(axis, 0);
435                 end.resize(axis, -1);
436             }
437
438             for (int i = 0; i < starts.size(); ++i)
439             {
440                 begin.push_back(starts.get<int>(i));
441                 int finish = ends.get<int>(i);
442                 end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
443             }
444             layerParams.set("begin", DictValue::arrayInt(&begin[0], begin.size()));
445             layerParams.set("end", DictValue::arrayInt(&end[0], end.size()));
446         }
447         else if (layer_type == "Add" || layer_type == "Sum")
448         {
449             if (layer_id.find(node_proto.input(1)) == layer_id.end())
450             {
451                 Mat blob = getBlob(node_proto, constBlobs, 1);
452                 blob = blob.reshape(1, 1);
453                 if (blob.total() == 1) {
454                     layerParams.type = "Power";
455                     layerParams.set("shift", blob.at<float>(0));
456                 }
457                 else {
458                     layerParams.type = "Scale";
459                     layerParams.set("bias_term", true);
460                     layerParams.blobs.push_back(blob);
461                 }
462             }
463             else {
464                 layerParams.type = "Eltwise";
465             }
466         }
467         else if (layer_type == "Sub")
468         {
469             Mat blob = getBlob(node_proto, constBlobs, 1);
470             if (blob.total() == 1) {
471                 layerParams.type = "Power";
472                 layerParams.set("shift", -blob.at<float>(0));
473             }
474             else {
475                 layerParams.type = "Scale";
476                 layerParams.set("has_bias", true);
477                 layerParams.blobs.push_back(-1.0f * blob.reshape(1, 1));
478             }
479         }
480         else if (layer_type == "Div")
481         {
482             Mat blob = getBlob(node_proto, constBlobs, 1);
483             CV_Assert_N(blob.type() == CV_32F, blob.total());
484             if (blob.total() == 1)
485             {
486                 layerParams.set("scale", 1.0f / blob.at<float>(0));
487                 layerParams.type = "Power";
488             }
489             else
490             {
491                 layerParams.type = "Scale";
492                 divide(1.0, blob, blob);
493                 layerParams.blobs.push_back(blob);
494                 layerParams.set("bias_term", false);
495             }
496         }
497         else if (layer_type == "Neg")
498         {
499             layerParams.type = "Power";
500             layerParams.set("scale", -1);
501         }
502         else if (layer_type == "Constant")
503         {
504             CV_Assert(node_proto.input_size() == 0);
505             CV_Assert(layerParams.blobs.size() == 1);
506             constBlobs.insert(std::make_pair(layerParams.name, layerParams.blobs[0]));
507             continue;
508         }
509         else if (layer_type == "ImageScaler")
510         {
511             const float scale = layerParams.has("scale") ? layerParams.get<float>("scale") : 1.0f;
512             layerParams.erase("scale");
513
514             if (layerParams.has("bias"))
515             {
516                 layerParams.type = "Scale";
517                 layerParams.blobs.push_back(
518                     Mat(Size(1,  layerParams.get("bias").size()), CV_32FC1, scale));
519
520                 layerParams.set("bias_term", true);
521                 Mat bias(1, layerParams.get("bias").size(), CV_32FC1);
522                 for (int j = 0; j < bias.total(); j++) {
523                     bias.at<float>(0, j) = layerParams.get("bias").getRealValue(j);
524                 }
525                 layerParams.blobs.push_back(bias);
526                 layerParams.erase("bias");
527             }
528             else {
529                 layerParams.set("scale", scale);
530                 layerParams.type = "Power";
531             }
532         }
533         else if (layer_type == "Clip")
534         {
535             layerParams.type = "ReLU6";
536             replaceLayerParam(layerParams, "min", "min_value");
537             replaceLayerParam(layerParams, "max", "max_value");
538
539         }
540         else if (layer_type == "LeakyRelu")
541         {
542             layerParams.type = "ReLU";
543             replaceLayerParam(layerParams, "alpha", "negative_slope");
544         }
545         else if (layer_type == "LRN")
546         {
547             replaceLayerParam(layerParams, "size", "local_size");
548         }
549         else if (layer_type == "InstanceNormalization")
550         {
551             if (node_proto.input_size() != 3)
552                 CV_Error(Error::StsNotImplemented,
553                          "Expected input, scale, bias");
554
555             layerParams.blobs.resize(4);
556             layerParams.blobs[2] = getBlob(node_proto, constBlobs, 1);  // weightData
557             layerParams.blobs[3] = getBlob(node_proto, constBlobs, 2);  // biasData
558             layerParams.set("has_bias", true);
559             layerParams.set("has_weight", true);
560
561             // Get number of channels in input
562             int size = layerParams.blobs[2].total();
563             layerParams.blobs[0] = Mat::zeros(size, 1, CV_32F); // mean
564             layerParams.blobs[1] = Mat::ones(size, 1, CV_32F); // std
565
566             LayerParams mvnParams;
567             mvnParams.name = layerParams.name + "/MVN";
568             mvnParams.type = "MVN";
569             mvnParams.set("eps", layerParams.get<float>("epsilon"));
570             layerParams.erase("epsilon");
571
572             //Create MVN layer
573             int id = dstNet.addLayer(mvnParams.name, mvnParams.type, mvnParams);
574             //Connect to input
575             layerId = layer_id.find(node_proto.input(0));
576             CV_Assert(layerId != layer_id.end());
577             dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
578             //Add shape
579             layer_id.insert(std::make_pair(mvnParams.name, LayerInfo(id, 0)));
580             outShapes[mvnParams.name] = outShapes[node_proto.input(0)];
581
582             //Replace Batch Norm's input to MVN
583             node_proto.set_input(0, mvnParams.name);
584             layerParams.type = "BatchNorm";
585         }
586         else if (layer_type == "BatchNormalization")
587         {
588             if (node_proto.input_size() != 5)
589                 CV_Error(Error::StsNotImplemented,
590                          "Expected input, scale, bias, mean and var");
591
592             layerParams.type = "BatchNorm";
593             replaceLayerParam(layerParams, "epsilon", "eps");
594             replaceLayerParam(layerParams, "spatial", "use_global_stats");
595
596             Mat meanData = getBlob(node_proto, constBlobs, 3);
597             Mat stdData =  getBlob(node_proto, constBlobs, 4);
598
599             layerParams.blobs.push_back(meanData);
600             layerParams.blobs.push_back(stdData);
601
602             if (!node_proto.input(1).empty()) {
603                 layerParams.set("has_weight", true);
604                 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1));  // weightData
605             } else {
606                 layerParams.set("has_weight", false);
607             }
608
609             if (!node_proto.input(2).empty()) {
610                 layerParams.set("has_bias", true);
611                 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 2)); // biasData
612             } else {
613                 layerParams.set("has_bias", false);
614             }
615         }
616         else if (layer_type == "Gemm")
617         {
618             CV_Assert(node_proto.input_size() >= 2);
619             layerParams.type = "InnerProduct";
620             Mat weights = getBlob(node_proto, constBlobs, 1);
621             int ind_num_out = 0;
622             if (layerParams.has("transB") && !layerParams.get<int>("transB")) {
623                 transpose(weights, weights);
624                 ind_num_out = 1;
625             }
626             layerParams.blobs.push_back(weights);
627
628             if (node_proto.input_size() == 3) {
629                 Mat bias = getBlob(node_proto, constBlobs, 2);
630                 layerParams.blobs.push_back(bias);
631             }
632
633             layerParams.set("num_output", layerParams.blobs[0].size[ind_num_out]);
634             layerParams.set("bias_term", node_proto.input_size() == 3);
635         }
636         else if (layer_type == "MatMul")
637         {
638             CV_Assert(node_proto.input_size() == 2);
639             layerParams.type = "InnerProduct";
640             Mat blob = getBlob(node_proto, constBlobs, 1);
641             layerParams.blobs.push_back(blob.t());
642             layerParams.set("bias_term", false);
643             layerParams.set("num_output", layerParams.blobs[0].size[0]);
644         }
645         else if (layer_type == "Mul")
646         {
647             CV_Assert(node_proto.input_size() == 2);
648             if (layer_id.find(node_proto.input(1)) == layer_id.end()) {
649                 Mat blob = getBlob(node_proto, constBlobs, 1);
650                 blob = blob.reshape(1, 1);
651                 if (blob.total() == 1) {
652                     layerParams.set("scale", blob.at<float>(0));
653                     layerParams.type = "Power";
654                 }
655                 else {
656                     layerParams.blobs.push_back(blob);
657                     layerParams.type = "Scale";
658                 }
659             }
660             else {
661                 layerParams.type = "Eltwise";
662                 layerParams.set("operation", "prod");
663             }
664         }
665         else if (layer_type == "Conv")
666         {
667             CV_Assert(node_proto.input_size() >= 2);
668             layerParams.type = "Convolution";
669             for (int j = 1; j < node_proto.input_size(); j++) {
670                 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
671             }
672             layerParams.set("num_output", layerParams.blobs[0].size[0]);
673             layerParams.set("bias_term", node_proto.input_size() == 3);
674         }
675         else if (layer_type == "ConvTranspose")
676         {
677             CV_Assert(node_proto.input_size() >= 2);
678             layerParams.type = "Deconvolution";
679             for (int j = 1; j < node_proto.input_size(); j++) {
680                 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
681             }
682             layerParams.set("num_output", layerParams.blobs[0].size[1] * layerParams.get<int>("group", 1));
683             layerParams.set("bias_term", node_proto.input_size() == 3);
684
685             if (!layerParams.has("kernel_size"))
686                 CV_Error(Error::StsNotImplemented,
687                          "Required attribute 'kernel_size' is not present.");
688
689             if (layerParams.has("output_shape"))
690             {
691                 const DictValue& outShape = layerParams.get("output_shape");
692                 DictValue strides = layerParams.get("stride");
693                 DictValue kernel = layerParams.get("kernel_size");
694
695                 String padMode;
696                 std::vector<int> adjust_pads;
697                 if (layerParams.has("pad_mode"))
698                 {
699                     padMode = toUpperCase(layerParams.get<String>("pad_mode"));
700                     if (padMode != "SAME" && padMode != "VALID")
701                         CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
702
703                     for (int i = 0; i < strides.size(); i++)
704                     {
705                         int sz = outShape.get<int>(2 + i);
706                         int stride = strides.get<int>(i);
707                         adjust_pads.push_back(padMode == "SAME"? (sz - 1) % stride :
708                                                                  (sz - kernel.get<int>(i)) % stride);
709                     }
710                     layerParams.set("adj", DictValue::arrayInt(&adjust_pads[0], adjust_pads.size()));
711                 }
712             }
713             else if (layerParams.has("output_padding"))
714             {
715                 replaceLayerParam(layerParams, "output_padding", "adj");
716             }
717         }
718         else if (layer_type == "Transpose")
719         {
720             layerParams.type = "Permute";
721             replaceLayerParam(layerParams, "perm", "order");
722         }
723         else if (layer_type == "Unsqueeze")
724         {
725             CV_Assert(node_proto.input_size() == 1);
726             DictValue axes = layerParams.get("axes");
727             if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
728             {
729                 // Constant input.
730                 Mat input = getBlob(node_proto, constBlobs, 0);
731
732                 std::vector<int> dims;
733                 for (int j = 0; j < input.dims; j++) {
734                     dims.push_back(input.size[j]);
735                 }
736                 CV_Assert(axes.getIntValue(axes.size()-1) <= dims.size());
737                 for (int j = 0; j < axes.size(); j++) {
738                     dims.insert(dims.begin() + axes.getIntValue(j), 1);
739                 }
740
741                 Mat out = input.reshape(0, dims);
742                 constBlobs.insert(std::make_pair(layerParams.name, out));
743                 continue;
744             }
745
746             // Variable input.
747             if (axes.size() != 1)
748                 CV_Error(Error::StsNotImplemented, "Multidimensional unsqueeze");
749
750             int dims[] = {1, -1};
751             layerParams.type = "Reshape";
752             layerParams.set("axis", axes.getIntValue(0));
753             layerParams.set("num_axes", 1);
754             layerParams.set("dim", DictValue::arrayInt(&dims[0], 2));
755         }
756         else if (layer_type == "Reshape")
757         {
758             CV_Assert(node_proto.input_size() == 2 || layerParams.has("shape"));
759
760             if (node_proto.input_size() == 2) {
761                 Mat blob = getBlob(node_proto, constBlobs, 1);
762                 CV_Assert(blob.type() == CV_32SC1);
763
764                 layerParams.set("dim", DictValue::arrayInt<int*>(
765                             blob.ptr<int>(), blob.total() ));
766
767                 if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
768                     std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), outputs;
769                     runLayer(layerParams, inputs, outputs);
770                     constBlobs.insert(std::make_pair(layerParams.name, outputs[0]));
771                     continue;
772                 }
773             }
774             else {
775                 DictValue shape = layerParams.get("shape");
776                 std::vector<int> dim;
777                 for (int j = 0; j < shape.size(); j++) {
778                     dim.push_back(shape.getIntValue(j));
779                 }
780
781                 if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
782                     Mat input = getBlob(node_proto, constBlobs, 0);
783                     Mat out = input.reshape(0, dim);
784                     constBlobs.insert(std::make_pair(layerParams.name, out));
785                     continue;
786                 }
787                 replaceLayerParam(layerParams, "shape", "dim");
788             }
789         }
790         else if (layer_type == "Pad")
791         {
792             layerParams.type = "Padding";
793         }
794         else if (layer_type == "Shape")
795         {
796             CV_Assert(node_proto.input_size() == 1);
797             shapeIt = outShapes.find(node_proto.input(0));
798             CV_Assert(shapeIt != outShapes.end());
799             MatShape inpShape = shapeIt->second;
800
801             Mat shapeMat(inpShape.size(), 1, CV_32S);
802             for (int j = 0; j < inpShape.size(); ++j)
803                 shapeMat.at<int>(j) = inpShape[j];
804             shapeMat.dims = 1;
805
806             constBlobs.insert(std::make_pair(layerParams.name, shapeMat));
807             continue;
808         }
809         else if (layer_type == "Gather")
810         {
811             CV_Assert(node_proto.input_size() == 2);
812             CV_Assert(layerParams.has("axis"));
813             Mat input = getBlob(node_proto, constBlobs, 0);
814             Mat indexMat = getBlob(node_proto, constBlobs, 1);
815             CV_Assert_N(indexMat.type() == CV_32S, indexMat.total() == 1);
816             int index = indexMat.at<int>(0);
817             int axis = layerParams.get<int>("axis");
818
819             std::vector<cv::Range> ranges(input.dims, Range::all());
820             ranges[axis] = Range(index, index + 1);
821
822             Mat out = input(ranges);
823             constBlobs.insert(std::make_pair(layerParams.name, out));
824             continue;
825         }
826         else if (layer_type == "Concat")
827         {
828             bool hasVariableInps = false;
829             for (int i = 0; i < node_proto.input_size(); ++i)
830             {
831                 if (layer_id.find(node_proto.input(i)) != layer_id.end())
832                 {
833                     hasVariableInps = true;
834                     break;
835                 }
836             }
837
838             if (!hasVariableInps)
839             {
840                 std::vector<Mat> inputs(node_proto.input_size()), concatenated;
841                 for (size_t i = 0; i < inputs.size(); ++i)
842                 {
843                     inputs[i] = getBlob(node_proto, constBlobs, i);
844                 }
845                 runLayer(layerParams, inputs, concatenated);
846
847                 CV_Assert(concatenated.size() == 1);
848                 constBlobs.insert(std::make_pair(layerParams.name, concatenated[0]));
849                 continue;
850             }
851         }
852         else if (layer_type == "Upsample")
853         {
854             layerParams.type = "Resize";
855             if (layerParams.has("scales"))
856             {
857                 // Pytorch layer
858                 DictValue scales = layerParams.get("scales");
859                 CV_Assert(scales.size() == 4);
860                 layerParams.set("zoom_factor_y", scales.getIntValue(2));
861                 layerParams.set("zoom_factor_x", scales.getIntValue(3));
862             }
863             else
864             {
865                 // Caffe2 layer
866                 replaceLayerParam(layerParams, "height_scale", "zoom_factor_y");
867                 replaceLayerParam(layerParams, "width_scale", "zoom_factor_x");
868             }
869             replaceLayerParam(layerParams, "mode", "interpolation");
870         }
871         else if (layer_type == "LogSoftmax")
872         {
873             layerParams.type = "Softmax";
874             layerParams.set("log_softmax", true);
875         }
876         else
877         {
878             for (int j = 0; j < node_proto.input_size(); j++) {
879                 if (layer_id.find(node_proto.input(j)) == layer_id.end())
880                     layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
881             }
882         }
883
884         int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
885         layer_id.insert(std::make_pair(layerParams.name, LayerInfo(id, 0)));
886
887
888         std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
889         for (int j = 0; j < node_proto.input_size(); j++) {
890             layerId = layer_id.find(node_proto.input(j));
891             if (layerId != layer_id.end()) {
892                 dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, j);
893                 // Collect input shapes.
894                 shapeIt = outShapes.find(node_proto.input(j));
895                 CV_Assert(shapeIt != outShapes.end());
896                 layerInpShapes.push_back(shapeIt->second);
897             }
898         }
899
900         // Compute shape of output blob for this layer.
901         Ptr<Layer> layer = dstNet.getLayer(id);
902         layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes);
903         CV_Assert(!layerOutShapes.empty());
904         outShapes[layerParams.name] = layerOutShapes[0];
905     }
906 }
907
908 Net readNetFromONNX(const String& onnxFile)
909 {
910     ONNXImporter onnxImporter(onnxFile.c_str());
911     Net net;
912     onnxImporter.populateNet(net);
913     return net;
914 }
915
916 Net readNetFromONNX(const char* buffer, size_t sizeBuffer)
917 {
918     ONNXImporter onnxImporter(buffer, sizeBuffer);
919     Net net;
920     onnxImporter.populateNet(net);
921     return net;
922 }
923
924 Net readNetFromONNX(const std::vector<uchar>& buffer)
925 {
926     return readNetFromONNX(reinterpret_cast<const char*>(buffer.data()), buffer.size());
927 }
928
929 Mat readTensorFromONNX(const String& path)
930 {
931     opencv_onnx::TensorProto tensor_proto = opencv_onnx::TensorProto();
932     std::fstream input(path.c_str(), std::ios::in | std::ios::binary);
933     if (!tensor_proto.ParseFromIstream(&input)) {
934         CV_Error(Error::StsUnsupportedFormat, "Failed to parse data");
935     }
936     Mat mat = getMatFromTensor(tensor_proto);
937     releaseONNXTensor(tensor_proto);
938     return mat;
939 }
940
941 CV__DNN_EXPERIMENTAL_NS_END
942 }} // namespace
943
944 #endif