Merge pull request #15911 from l-bat:fix_reducel2
[platform/upstream/opencv.git] / modules / dnn / src / onnx / onnx_importer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2018, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8 #include "../precomp.hpp"
9 #include <opencv2/dnn/shape_utils.hpp>
10
11 #ifdef HAVE_PROTOBUF
12
13 #include <iostream>
14 #include <fstream>
15 #include <string>
16 #include <limits>
17 #include <algorithm>
18
19
20 #if defined(__GNUC__) && __GNUC__ >= 5
21 #pragma GCC diagnostic push
22 #pragma GCC diagnostic ignored "-Wsuggest-override"
23 #endif
24 #include "opencv-onnx.pb.h"
25 #if defined(__GNUC__) && __GNUC__ >= 5
26 #pragma GCC diagnostic pop
27 #endif
28
29 namespace cv {
30 namespace dnn {
31 CV__DNN_EXPERIMENTAL_NS_BEGIN
32
33
34 class ONNXImporter
35 {
36     opencv_onnx::ModelProto model_proto;
37     struct LayerInfo {
38         int layerId;
39         int outputId;
40         LayerInfo(int _layerId, int _outputId) : layerId(_layerId), outputId(_outputId) {}
41     };
42
43     std::map<std::string, Mat> getGraphTensors(
44                                     const opencv_onnx::GraphProto& graph_proto);
45     Mat getBlob(const opencv_onnx::NodeProto& node_proto, const std::map<std::string, Mat>& constBlobs, int index);
46
47     LayerParams getLayerParams(const opencv_onnx::NodeProto& node_proto);
48     bool isCeilMode(const LayerParams& layerParams);
49
50 public:
51
52     ONNXImporter(const char *onnxFile)
53     {
54         std::fstream input(onnxFile, std::ios::in | std::ios::binary);
55
56         if (!model_proto.ParseFromIstream(&input))
57             CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model");
58     }
59
60     ONNXImporter(const char* buffer, size_t sizeBuffer)
61     {
62         struct _Buf : public std::streambuf
63         {
64             _Buf(const char* buffer, size_t sizeBuffer)
65             {
66                 char* p = const_cast<char*>(buffer);
67                 setg(p, p, p + sizeBuffer);
68             }
69         };
70
71         _Buf buf(buffer, sizeBuffer);
72         std::istream input(&buf);
73
74         if (!model_proto.ParseFromIstream(&input))
75             CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model from in-memory byte array.");
76     }
77
78     void populateNet(Net dstNet);
79 };
80
81 inline void replaceLayerParam(LayerParams& layerParams, const String& oldKey, const String& newKey)
82 {
83     if (layerParams.has(oldKey)) {
84         layerParams.set(newKey, layerParams.get(oldKey));
85         layerParams.erase(oldKey);
86     }
87 }
88
89 void releaseONNXTensor(opencv_onnx::TensorProto& tensor_proto)
90 {
91     if (!tensor_proto.raw_data().empty()) {
92         delete tensor_proto.release_raw_data();
93     }
94 }
95
96 template<typename T1, typename T2>
97 void convertInt64ToInt32(const T1& src, T2& dst, int size)
98 {
99     for (int i = 0; i < size; i++) {
100         if (src[i] < std::numeric_limits<int32_t>::min() || src[i] > std::numeric_limits<int32_t>::max()) {
101             CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
102         }
103         dst[i] = saturate_cast<int32_t>(src[i]);
104     }
105 }
106
107 Mat getMatFromTensor(opencv_onnx::TensorProto& tensor_proto)
108 {
109     CV_Assert(!tensor_proto.raw_data().empty() || !tensor_proto.float_data().empty()
110                     || !tensor_proto.double_data().empty() || !tensor_proto.int64_data().empty());
111
112     opencv_onnx::TensorProto_DataType datatype = tensor_proto.data_type();
113     Mat blob;
114     std::vector<int> sizes;
115     for (int i = 0; i < tensor_proto.dims_size(); i++) {
116             sizes.push_back(tensor_proto.dims(i));
117     }
118     if (sizes.empty())
119         sizes.assign(1, 1);
120     if (datatype == opencv_onnx::TensorProto_DataType_FLOAT) {
121
122         if (!tensor_proto.float_data().empty()) {
123             const ::google::protobuf::RepeatedField<float> field = tensor_proto.float_data();
124             Mat(sizes, CV_32FC1, (void*)field.data()).copyTo(blob);
125         }
126         else {
127             char* val = const_cast<char*>(tensor_proto.raw_data().c_str());
128             Mat(sizes, CV_32FC1, val).copyTo(blob);
129         }
130     }
131     else if (datatype == opencv_onnx::TensorProto_DataType_DOUBLE)
132     {
133         const ::google::protobuf::RepeatedField<double> field = tensor_proto.double_data();
134         CV_Assert(!field.empty());
135         Mat(sizes, CV_64FC1, (void*)field.data()).convertTo(blob, CV_32FC1);
136     }
137     else if (datatype == opencv_onnx::TensorProto_DataType_INT64)
138     {
139         blob.create(sizes, CV_32SC1);
140         int32_t* dst = reinterpret_cast<int32_t*>(blob.data);
141
142         if (!tensor_proto.int64_data().empty()) {
143             ::google::protobuf::RepeatedField< ::google::protobuf::int64> src = tensor_proto.int64_data();
144             convertInt64ToInt32(src, dst, blob.total());
145         }
146         else
147         {
148             char* val = const_cast<char*>(tensor_proto.raw_data().c_str());
149             int64_t* src = reinterpret_cast<int64_t*>(val);
150             convertInt64ToInt32(src, dst, blob.total());
151         }
152     }
153     else
154         CV_Error(Error::StsUnsupportedFormat, "Unsupported data type: " +
155                         opencv_onnx::TensorProto_DataType_Name(datatype));
156     if (tensor_proto.dims_size() == 0)
157         blob.dims = 1;  // To force 1-dimensional cv::Mat for scalars.
158     return blob;
159 }
160
161 void runLayer(LayerParams& params, const std::vector<Mat>& inputs,
162               std::vector<Mat>& outputs)
163 {
164     Ptr<Layer> layer = LayerFactory::createLayerInstance(params.type, params);
165     CV_Assert((bool)layer);
166
167     std::vector<MatShape> inpShapes(inputs.size());
168     int ddepth = CV_32F;
169     for (size_t i = 0; i < inputs.size(); ++i)
170     {
171         inpShapes[i] = shape(inputs[i]);
172         if (i > 0 && ddepth != inputs[i].depth())
173             CV_Error(Error::StsNotImplemented, "Mixed input data types.");
174         ddepth = inputs[i].depth();
175     }
176
177     std::vector<MatShape> outShapes, internalShapes;
178     layer->getMemoryShapes(inpShapes, 0, outShapes, internalShapes);
179
180     std::vector<Mat> internals(internalShapes.size());
181     outputs.resize(outShapes.size());
182     for (size_t i = 0; i < outShapes.size(); ++i)
183         outputs[i].create(outShapes[i], ddepth);
184     for (size_t i = 0; i < internalShapes.size(); ++i)
185         internals[i].create(internalShapes[i], ddepth);
186
187     layer->finalize(inputs, outputs);
188     layer->forward(inputs, outputs, internals);
189 }
190
191 std::map<std::string, Mat> ONNXImporter::getGraphTensors(
192                                         const opencv_onnx::GraphProto& graph_proto)
193 {
194   opencv_onnx::TensorProto tensor_proto;
195   std::map<std::string, Mat> layers_weights;
196
197   for (int i = 0; i < graph_proto.initializer_size(); i++)
198   {
199     tensor_proto = graph_proto.initializer(i);
200     Mat mat = getMatFromTensor(tensor_proto);
201     releaseONNXTensor(tensor_proto);
202     layers_weights.insert(std::make_pair(tensor_proto.name(), mat));
203   }
204   return layers_weights;
205 }
206
207 static DictValue parse(const ::google::protobuf::RepeatedField< ::google::protobuf::int64>& src) {
208     std::vector<int32_t> dst(src.size());
209     convertInt64ToInt32(src, dst, src.size());
210     return DictValue::arrayInt(&dst[0], src.size());
211 }
212
213 LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_proto)
214 {
215     LayerParams lp;
216     for(int i = 0; i < node_proto.attribute_size(); i++)
217     {
218         opencv_onnx::AttributeProto attribute_proto = node_proto.attribute(i);
219         std::string attribute_name = attribute_proto.name();
220
221         if(attribute_name == "kernel_shape")
222         {
223             CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
224             lp.set("kernel_size", parse(attribute_proto.ints()));
225         }
226         else if(attribute_name == "strides")
227         {
228             CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
229             lp.set("stride", parse(attribute_proto.ints()));
230         }
231         else if(attribute_name == "pads")
232         {
233             if (node_proto.op_type() == "Pad")
234             {
235                 // Padding layer.
236                 // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
237                 // We need to shuffle it to begin0, end0, begin1, end1, ...
238                 CV_Assert(attribute_proto.ints_size() % 2 == 0);
239                 const int dims = attribute_proto.ints_size() / 2;
240                 std::vector<int32_t> paddings;
241                 paddings.reserve(attribute_proto.ints_size());
242                 for (int i = 0; i < dims; ++i)
243                 {
244                     paddings.push_back(attribute_proto.ints(i));
245                     paddings.push_back(attribute_proto.ints(dims + i));
246                 }
247                 lp.set("paddings", DictValue::arrayInt(&paddings[0], paddings.size()));
248             }
249             else
250             {
251                 // Convolution or pooling.
252                 CV_Assert(attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6);
253                 lp.set("pad", parse(attribute_proto.ints()));
254             }
255         }
256         else if(attribute_name == "auto_pad")
257         {
258             if (attribute_proto.s() == "SAME_UPPER" || attribute_proto.s() == "SAME_LOWER") {
259                 lp.set("pad_mode",  "SAME");
260             }
261             else if (attribute_proto.s() == "VALID") {
262                 lp.set("pad_mode", "VALID");
263             }
264         }
265         else if(attribute_name == "dilations")
266         {
267             CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
268             lp.set("dilation", parse(attribute_proto.ints()));
269         }
270         else if (attribute_proto.has_i())
271         {
272             ::google::protobuf::int64 src = attribute_proto.i();
273             if (src < std::numeric_limits<int32_t>::min() || src > std::numeric_limits<int32_t>::max())
274                 CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
275             else
276                 lp.set(attribute_name, saturate_cast<int32_t>(src));
277         }
278         else if (attribute_proto.has_f())
279         {
280             lp.set(attribute_name, attribute_proto.f());
281         }
282         else if (attribute_proto.has_s())
283         {
284             lp.set(attribute_name, attribute_proto.s());
285         }
286         else if (attribute_proto.floats_size() > 0)
287         {
288             lp.set(attribute_name, DictValue::arrayReal(
289                 attribute_proto.floats().data(), attribute_proto.floats_size()));
290         }
291         else if (attribute_proto.ints_size() > 0)
292         {
293             lp.set(attribute_proto.name(), parse(attribute_proto.ints()));
294         }
295         else if (attribute_proto.has_t())
296         {
297             opencv_onnx::TensorProto tensor = attribute_proto.t();
298             Mat blob = getMatFromTensor(tensor);
299             lp.blobs.push_back(blob);
300         }
301         else if (attribute_proto.has_g() || attribute_proto.strings_size() > 0 ||
302                     attribute_proto.tensors_size() > 0 || attribute_proto.graphs_size() > 0)
303         {
304                 CV_Error(Error::StsNotImplemented, "Unexpected attribute type");
305         }
306         else
307             CV_Error(Error::StsNotImplemented, "Unsupported attribute type");
308     }
309     return lp;
310 }
311
312 Mat ONNXImporter::getBlob(const opencv_onnx::NodeProto& node_proto,
313                     const std::map<std::string, Mat>& constBlobs, int index)
314 {
315     CV_Assert(index < node_proto.input_size());
316     std::map<std::string, Mat>::const_iterator constBlob;
317     constBlob = constBlobs.find(node_proto.input(index));
318     if (constBlob == constBlobs.end()) {
319         CV_Error(Error::StsObjectNotFound,
320              "Blob " + node_proto.input(index) + " not found in const blobs");
321     }
322     return constBlob->second;
323 }
324
325 void ONNXImporter::populateNet(Net dstNet)
326 {
327     CV_Assert(model_proto.has_graph());
328     opencv_onnx::GraphProto graph_proto = model_proto.graph();
329     std::map<std::string, Mat> constBlobs = getGraphTensors(graph_proto);
330     // List of internal blobs shapes.
331     std::map<std::string, MatShape> outShapes;
332     // Add all the inputs shapes. It includes as constant blobs as network's inputs shapes.
333     for (int i = 0; i < graph_proto.input_size(); ++i)
334     {
335         opencv_onnx::ValueInfoProto valueInfoProto = graph_proto.input(i);
336         CV_Assert(valueInfoProto.has_type());
337         opencv_onnx::TypeProto typeProto = valueInfoProto.type();
338         CV_Assert(typeProto.has_tensor_type());
339         opencv_onnx::TypeProto::Tensor tensor = typeProto.tensor_type();
340         CV_Assert(tensor.has_shape());
341         opencv_onnx::TensorShapeProto tensorShape = tensor.shape();
342
343         MatShape inpShape(tensorShape.dim_size());
344         for (int j = 0; j < inpShape.size(); ++j)
345         {
346             inpShape[j] = tensorShape.dim(j).dim_value();
347         }
348         outShapes[valueInfoProto.name()] = inpShape;
349     }
350
351     std::string framework_name;
352     if (model_proto.has_producer_name()) {
353         framework_name = model_proto.producer_name();
354     }
355
356     // create map with network inputs (without const blobs)
357     std::map<std::string, LayerInfo> layer_id;
358     std::map<std::string, LayerInfo>::iterator layerId;
359     std::map<std::string, MatShape>::iterator shapeIt;
360     // fill map: push layer name, layer id and output id
361     std::vector<String> netInputs;
362     for (int j = 0; j < graph_proto.input_size(); j++)
363     {
364         const std::string& name = graph_proto.input(j).name();
365         if (constBlobs.find(name) == constBlobs.end()) {
366             netInputs.push_back(name);
367             layer_id.insert(std::make_pair(name, LayerInfo(0, netInputs.size() - 1)));
368         }
369     }
370     dstNet.setInputsNames(netInputs);
371
372     int layersSize = graph_proto.node_size();
373     LayerParams layerParams;
374     opencv_onnx::NodeProto node_proto;
375
376     for(int li = 0; li < layersSize; li++)
377     {
378         node_proto = graph_proto.node(li);
379         layerParams = getLayerParams(node_proto);
380         CV_Assert(node_proto.output_size() >= 1);
381         layerParams.name = node_proto.output(0);
382
383         std::string layer_type = node_proto.op_type();
384         layerParams.type = layer_type;
385
386
387         if (layer_type == "MaxPool")
388         {
389             layerParams.type = "Pooling";
390             layerParams.set("pool", "MAX");
391             layerParams.set("ceil_mode", layerParams.has("pad_mode"));
392         }
393         else if (layer_type == "AveragePool")
394         {
395             layerParams.type = "Pooling";
396             layerParams.set("pool", "AVE");
397             layerParams.set("ceil_mode", layerParams.has("pad_mode"));
398             layerParams.set("ave_pool_padded_area", framework_name == "pytorch");
399         }
400         else if (layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool" || layer_type == "ReduceMean")
401         {
402             CV_Assert(node_proto.input_size() == 1);
403             layerParams.type = "Pooling";
404             layerParams.set("pool", layer_type == "GlobalMaxPool"? "MAX" : "AVE");
405             layerParams.set("global_pooling", layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool");
406
407             if (layer_type == "ReduceMean")
408             {
409                 if (layerParams.get<int>("keepdims") == 0 || !layerParams.has("axes"))
410                     CV_Error(Error::StsNotImplemented, "Unsupported mode of ReduceMean operation.");
411
412                 MatShape inpShape = outShapes[node_proto.input(0)];
413                 if (inpShape.size() != 4 && inpShape.size() != 5)
414                     CV_Error(Error::StsNotImplemented, "Unsupported input shape of reduce_mean operation.");
415
416                 DictValue axes = layerParams.get("axes");
417                 CV_Assert(axes.size() <= inpShape.size() - 2);
418                 std::vector<int> kernel_size(inpShape.size() - 2, 1);
419                 for (int i = 0; i < axes.size(); i++) {
420                     int axis = axes.get<int>(i);
421                     CV_Assert_N(axis >= 2 + i, axis < inpShape.size());
422                     kernel_size[axis - 2] = inpShape[axis];
423                 }
424
425                 layerParams.set("kernel_size", DictValue::arrayInt(&kernel_size[0], kernel_size.size()));
426             }
427         }
428         else if (layer_type == "Slice")
429         {
430             if (layerParams.has("steps")) {
431                 DictValue steps = layerParams.get("steps");
432                 for (int i = 0; i < steps.size(); ++i) {
433                     if (steps.get<int>(i) != 1)
434                         CV_Error(Error::StsNotImplemented,
435                                  "Slice layer only supports steps = 1");
436                 }
437             }
438
439             int axis = 0;
440             if (layerParams.has("axes")) {
441                 DictValue axes = layerParams.get("axes");
442                 for (int i = 1; i < axes.size(); ++i) {
443                     CV_Assert(axes.get<int>(i - 1) == axes.get<int>(i) - 1);
444                 }
445                 axis = axes.get<int>(0);
446             }
447             layerParams.set("axis", axis);
448
449             DictValue starts = layerParams.get("starts");
450             DictValue ends = layerParams.get("ends");
451             CV_Assert(starts.size() == ends.size());
452
453             std::vector<int> begin;
454             std::vector<int> end;
455             if (axis > 0) {
456                 begin.resize(axis, 0);
457                 end.resize(axis, -1);
458             }
459
460             for (int i = 0; i < starts.size(); ++i)
461             {
462                 begin.push_back(starts.get<int>(i));
463                 int finish = ends.get<int>(i);
464                 end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
465             }
466             layerParams.set("begin", DictValue::arrayInt(&begin[0], begin.size()));
467             layerParams.set("end", DictValue::arrayInt(&end[0], end.size()));
468          }
469         else if (layer_type == "Split")
470         {
471             DictValue splits = layerParams.get("split");
472             const int numSplits = splits.size();
473             CV_Assert(numSplits > 1);
474
475             std::vector<int> slicePoints(numSplits - 1, splits.get<int>(0));
476             for (int i = 1; i < splits.size() - 1; ++i)
477             {
478                 slicePoints[i] = slicePoints[i - 1] + splits.get<int>(i - 1);
479             }
480             layerParams.set("slice_point", DictValue::arrayInt(&slicePoints[0], slicePoints.size()));
481             layerParams.type = "Slice";
482         }
483         else if (layer_type == "Add" || layer_type == "Sum")
484         {
485             if (layer_id.find(node_proto.input(1)) == layer_id.end())
486             {
487                 Mat blob = getBlob(node_proto, constBlobs, 1);
488                 blob = blob.reshape(1, 1);
489                 if (blob.total() == 1) {
490                     layerParams.type = "Power";
491                     layerParams.set("shift", blob.at<float>(0));
492                 }
493                 else {
494                     layerParams.type = "Scale";
495                     layerParams.set("bias_term", true);
496                     layerParams.blobs.push_back(blob);
497                 }
498             }
499             else {
500                 layerParams.type = "Eltwise";
501             }
502         }
503         else if (layer_type == "Max")
504         {
505             layerParams.type = "Eltwise";
506             layerParams.set("operation", "max");
507         }
508         else if (layer_type == "Sub")
509         {
510             Mat blob = getBlob(node_proto, constBlobs, 1);
511             if (blob.total() == 1) {
512                 layerParams.type = "Power";
513                 layerParams.set("shift", -blob.at<float>(0));
514             }
515             else {
516                 layerParams.type = "Scale";
517                 layerParams.set("has_bias", true);
518                 layerParams.blobs.push_back(-1.0f * blob.reshape(1, 1));
519             }
520         }
521         else if (layer_type == "Div")
522         {
523             if (constBlobs.find(node_proto.input(1)) == constBlobs.end())
524             {
525                 layerParams.type = "Eltwise";
526                 layerParams.set("operation", "div");
527             }
528             else
529             {
530                 Mat blob = getBlob(node_proto, constBlobs, 1);
531                 CV_Assert_N(blob.type() == CV_32F, blob.total());
532                 if (blob.total() == 1)
533                 {
534                     layerParams.set("scale", 1.0f / blob.at<float>(0));
535                     layerParams.type = "Power";
536                 }
537                 else
538                 {
539                     layerParams.type = "Scale";
540                     divide(1.0, blob, blob);
541                     layerParams.blobs.push_back(blob);
542                     layerParams.set("bias_term", false);
543                 }
544             }
545         }
546         else if (layer_type == "Neg")
547         {
548             layerParams.type = "Power";
549             layerParams.set("scale", -1);
550         }
551         else if (layer_type == "Constant")
552         {
553             CV_Assert(node_proto.input_size() == 0);
554             CV_Assert(layerParams.blobs.size() == 1);
555             constBlobs.insert(std::make_pair(layerParams.name, layerParams.blobs[0]));
556             continue;
557         }
558         else if (layer_type == "ImageScaler")
559         {
560             const float scale = layerParams.has("scale") ? layerParams.get<float>("scale") : 1.0f;
561             layerParams.erase("scale");
562
563             if (layerParams.has("bias"))
564             {
565                 layerParams.type = "Scale";
566                 layerParams.blobs.push_back(
567                     Mat(Size(1,  layerParams.get("bias").size()), CV_32FC1, scale));
568
569                 layerParams.set("bias_term", true);
570                 Mat bias(1, layerParams.get("bias").size(), CV_32FC1);
571                 for (int j = 0; j < bias.total(); j++) {
572                     bias.at<float>(0, j) = layerParams.get("bias").getRealValue(j);
573                 }
574                 layerParams.blobs.push_back(bias);
575                 layerParams.erase("bias");
576             }
577             else {
578                 layerParams.set("scale", scale);
579                 layerParams.type = "Power";
580             }
581         }
582         else if (layer_type == "Clip")
583         {
584             layerParams.type = "ReLU6";
585             replaceLayerParam(layerParams, "min", "min_value");
586             replaceLayerParam(layerParams, "max", "max_value");
587
588         }
589         else if (layer_type == "LeakyRelu")
590         {
591             layerParams.type = "ReLU";
592             replaceLayerParam(layerParams, "alpha", "negative_slope");
593         }
594         else if (layer_type == "LRN")
595         {
596             replaceLayerParam(layerParams, "size", "local_size");
597         }
598         else if (layer_type == "InstanceNormalization")
599         {
600             if (node_proto.input_size() != 3)
601                 CV_Error(Error::StsNotImplemented,
602                          "Expected input, scale, bias");
603
604             layerParams.blobs.resize(4);
605             layerParams.blobs[2] = getBlob(node_proto, constBlobs, 1);  // weightData
606             layerParams.blobs[3] = getBlob(node_proto, constBlobs, 2);  // biasData
607             layerParams.set("has_bias", true);
608             layerParams.set("has_weight", true);
609
610             // Get number of channels in input
611             int size = layerParams.blobs[2].total();
612             layerParams.blobs[0] = Mat::zeros(size, 1, CV_32F); // mean
613             layerParams.blobs[1] = Mat::ones(size, 1, CV_32F); // std
614
615             LayerParams mvnParams;
616             mvnParams.name = layerParams.name + "/MVN";
617             mvnParams.type = "MVN";
618             mvnParams.set("eps", layerParams.get<float>("epsilon"));
619             layerParams.erase("epsilon");
620
621             //Create MVN layer
622             int id = dstNet.addLayer(mvnParams.name, mvnParams.type, mvnParams);
623             //Connect to input
624             layerId = layer_id.find(node_proto.input(0));
625             CV_Assert(layerId != layer_id.end());
626             dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
627             //Add shape
628             layer_id.insert(std::make_pair(mvnParams.name, LayerInfo(id, 0)));
629             outShapes[mvnParams.name] = outShapes[node_proto.input(0)];
630
631             //Replace Batch Norm's input to MVN
632             node_proto.set_input(0, mvnParams.name);
633             layerParams.type = "BatchNorm";
634         }
635         else if (layer_type == "BatchNormalization")
636         {
637             if (node_proto.input_size() != 5)
638                 CV_Error(Error::StsNotImplemented,
639                          "Expected input, scale, bias, mean and var");
640
641             layerParams.type = "BatchNorm";
642             replaceLayerParam(layerParams, "epsilon", "eps");
643             replaceLayerParam(layerParams, "spatial", "use_global_stats");
644
645             Mat meanData = getBlob(node_proto, constBlobs, 3);
646             Mat stdData =  getBlob(node_proto, constBlobs, 4);
647
648             layerParams.blobs.push_back(meanData);
649             layerParams.blobs.push_back(stdData);
650
651             if (!node_proto.input(1).empty()) {
652                 layerParams.set("has_weight", true);
653                 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1));  // weightData
654             } else {
655                 layerParams.set("has_weight", false);
656             }
657
658             if (!node_proto.input(2).empty()) {
659                 layerParams.set("has_bias", true);
660                 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 2)); // biasData
661             } else {
662                 layerParams.set("has_bias", false);
663             }
664         }
665         else if (layer_type == "Gemm")
666         {
667             CV_Assert(node_proto.input_size() >= 2);
668             layerParams.type = "InnerProduct";
669             Mat weights = getBlob(node_proto, constBlobs, 1);
670             int ind_num_out = 0;
671             if (layerParams.has("transB") && !layerParams.get<int>("transB")) {
672                 transpose(weights, weights);
673                 ind_num_out = 1;
674             }
675             layerParams.blobs.push_back(weights);
676
677             if (node_proto.input_size() == 3) {
678                 Mat bias = getBlob(node_proto, constBlobs, 2);
679                 layerParams.blobs.push_back(bias);
680             }
681
682             layerParams.set("num_output", layerParams.blobs[0].size[ind_num_out]);
683             layerParams.set("bias_term", node_proto.input_size() == 3);
684         }
685         else if (layer_type == "MatMul")
686         {
687             CV_Assert(node_proto.input_size() == 2);
688             layerParams.type = "InnerProduct";
689             Mat blob = getBlob(node_proto, constBlobs, 1);
690             layerParams.blobs.push_back(blob.t());
691             layerParams.set("bias_term", false);
692             layerParams.set("num_output", layerParams.blobs[0].size[0]);
693         }
694         else if (layer_type == "Mul")
695         {
696             CV_Assert(node_proto.input_size() == 2);
697             if (layer_id.find(node_proto.input(1)) == layer_id.end()) {
698                 Mat blob = getBlob(node_proto, constBlobs, 1);
699                 blob = blob.reshape(1, 1);
700                 if (blob.total() == 1) {
701                     layerParams.set("scale", blob.at<float>(0));
702                     layerParams.type = "Power";
703                 }
704                 else {
705                     layerParams.blobs.push_back(blob);
706                     layerParams.type = "Scale";
707                 }
708             }
709             else {
710                 layerParams.type = "Eltwise";
711                 layerParams.set("operation", "prod");
712             }
713         }
714         else if (layer_type == "Conv")
715         {
716             CV_Assert(node_proto.input_size() >= 2);
717             layerParams.type = "Convolution";
718             for (int j = 1; j < node_proto.input_size(); j++) {
719                 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
720             }
721             layerParams.set("num_output", layerParams.blobs[0].size[0]);
722             layerParams.set("bias_term", node_proto.input_size() == 3);
723         }
724         else if (layer_type == "ConvTranspose")
725         {
726             CV_Assert(node_proto.input_size() >= 2);
727             layerParams.type = "Deconvolution";
728             for (int j = 1; j < node_proto.input_size(); j++) {
729                 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
730             }
731             layerParams.set("num_output", layerParams.blobs[0].size[1] * layerParams.get<int>("group", 1));
732             layerParams.set("bias_term", node_proto.input_size() == 3);
733
734             if (!layerParams.has("kernel_size"))
735                 CV_Error(Error::StsNotImplemented,
736                          "Required attribute 'kernel_size' is not present.");
737
738             if (layerParams.has("output_shape"))
739             {
740                 const DictValue& outShape = layerParams.get("output_shape");
741                 DictValue strides = layerParams.get("stride");
742                 DictValue kernel = layerParams.get("kernel_size");
743
744                 String padMode;
745                 std::vector<int> adjust_pads;
746                 if (layerParams.has("pad_mode"))
747                 {
748                     padMode = toUpperCase(layerParams.get<String>("pad_mode"));
749                     if (padMode != "SAME" && padMode != "VALID")
750                         CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
751
752                     for (int i = 0; i < strides.size(); i++)
753                     {
754                         int sz = outShape.get<int>(2 + i);
755                         int stride = strides.get<int>(i);
756                         adjust_pads.push_back(padMode == "SAME"? (sz - 1) % stride :
757                                                                  (sz - kernel.get<int>(i)) % stride);
758                     }
759                     layerParams.set("adj", DictValue::arrayInt(&adjust_pads[0], adjust_pads.size()));
760                 }
761             }
762             else if (layerParams.has("output_padding"))
763             {
764                 replaceLayerParam(layerParams, "output_padding", "adj");
765             }
766         }
767         else if (layer_type == "Transpose")
768         {
769             layerParams.type = "Permute";
770             replaceLayerParam(layerParams, "perm", "order");
771
772             CV_Assert(node_proto.input_size() == 1);
773             if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
774             {
775                 std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), transposed;
776                 runLayer(layerParams, inputs, transposed);
777                 CV_Assert(transposed.size() == 1);
778                 constBlobs.insert(std::make_pair(layerParams.name, transposed[0]));
779                 continue;
780             }
781         }
782         else if (layer_type == "ReduceL2")
783         {
784             CV_Assert_N(node_proto.input_size() == 1, layerParams.has("axes"));
785             CV_Assert(graph_proto.node_size() > li + 1 && graph_proto.node(li + 1).op_type() == "Div");
786             ++li;
787             node_proto = graph_proto.node(li);
788             layerParams.name = node_proto.output(0);
789             layerParams.type = "Normalize";
790
791             DictValue axes_dict = layerParams.get("axes");
792             if (axes_dict.size() != 1)
793                 CV_Error(Error::StsNotImplemented, "Multidimensional reduceL2");
794             int axis = axes_dict.getIntValue(0);
795             layerParams.set("axis",axis);
796             layerParams.set("end_axis", axis);
797         }
798         else if (layer_type == "Squeeze")
799         {
800             CV_Assert_N(node_proto.input_size() == 1, layerParams.has("axes"));
801             DictValue axes_dict = layerParams.get("axes");
802             if (axes_dict.size() != 1)
803                 CV_Error(Error::StsNotImplemented, "Multidimensional squeeze");
804
805             int axis = axes_dict.getIntValue(0);
806             layerParams.set("axis", axis - 1);
807             layerParams.set("end_axis", axis);
808             layerParams.type = "Flatten";
809         }
810         else if (layer_type == "Unsqueeze")
811         {
812             CV_Assert(node_proto.input_size() == 1);
813             DictValue axes = layerParams.get("axes");
814             if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
815             {
816                 // Constant input.
817                 Mat input = getBlob(node_proto, constBlobs, 0);
818
819                 std::vector<int> dims;
820                 for (int j = 0; j < input.dims; j++) {
821                     dims.push_back(input.size[j]);
822                 }
823                 CV_Assert(axes.getIntValue(axes.size()-1) <= dims.size());
824                 for (int j = 0; j < axes.size(); j++) {
825                     dims.insert(dims.begin() + axes.getIntValue(j), 1);
826                 }
827
828                 Mat out = input.reshape(0, dims);
829                 constBlobs.insert(std::make_pair(layerParams.name, out));
830                 continue;
831             }
832
833             // Variable input.
834             if (axes.size() != 1)
835                 CV_Error(Error::StsNotImplemented, "Multidimensional unsqueeze");
836
837             MatShape inpShape = outShapes[node_proto.input(0)];
838             int axis = axes.getIntValue(0);
839             CV_Assert(0 <= axis && axis <= inpShape.size());
840             std::vector<int> outShape = inpShape;
841             outShape.insert(outShape.begin() + axis, 1);
842             layerParams.type = "Reshape";
843             layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size()));
844         }
845         else if (layer_type == "Reshape")
846         {
847             CV_Assert(node_proto.input_size() == 2 || layerParams.has("shape"));
848
849             if (node_proto.input_size() == 2) {
850                 Mat blob = getBlob(node_proto, constBlobs, 1);
851                 CV_Assert(blob.type() == CV_32SC1);
852
853                 layerParams.set("dim", DictValue::arrayInt<int*>(
854                             blob.ptr<int>(), blob.total() ));
855
856                 if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
857                     std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), outputs;
858                     runLayer(layerParams, inputs, outputs);
859                     constBlobs.insert(std::make_pair(layerParams.name, outputs[0]));
860                     continue;
861                 }
862             }
863             else {
864                 DictValue shape = layerParams.get("shape");
865                 std::vector<int> dim;
866                 for (int j = 0; j < shape.size(); j++) {
867                     dim.push_back(shape.getIntValue(j));
868                 }
869
870                 if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
871                     Mat input = getBlob(node_proto, constBlobs, 0);
872                     Mat out = input.reshape(0, dim);
873                     constBlobs.insert(std::make_pair(layerParams.name, out));
874                     continue;
875                 }
876                 replaceLayerParam(layerParams, "shape", "dim");
877             }
878         }
879         else if (layer_type == "Pad")
880         {
881             layerParams.type = "Padding";
882         }
883         else if (layer_type == "Shape")
884         {
885             CV_Assert(node_proto.input_size() == 1);
886             shapeIt = outShapes.find(node_proto.input(0));
887             CV_Assert(shapeIt != outShapes.end());
888             MatShape inpShape = shapeIt->second;
889
890             Mat shapeMat(inpShape.size(), 1, CV_32S);
891             for (int j = 0; j < inpShape.size(); ++j)
892                 shapeMat.at<int>(j) = inpShape[j];
893             shapeMat.dims = 1;
894
895             constBlobs.insert(std::make_pair(layerParams.name, shapeMat));
896             continue;
897         }
898         else if (layer_type == "Gather")
899         {
900             CV_Assert(node_proto.input_size() == 2);
901             CV_Assert(layerParams.has("axis"));
902             Mat input = getBlob(node_proto, constBlobs, 0);
903             Mat indexMat = getBlob(node_proto, constBlobs, 1);
904             CV_Assert_N(indexMat.type() == CV_32S, indexMat.total() == 1);
905             int index = indexMat.at<int>(0);
906             int axis = layerParams.get<int>("axis");
907
908             std::vector<cv::Range> ranges(input.dims, Range::all());
909             ranges[axis] = Range(index, index + 1);
910
911             Mat out = input(ranges);
912             constBlobs.insert(std::make_pair(layerParams.name, out));
913             continue;
914         }
915         else if (layer_type == "Concat")
916         {
917             bool hasVariableInps = false;
918             for (int i = 0; i < node_proto.input_size(); ++i)
919             {
920                 if (layer_id.find(node_proto.input(i)) != layer_id.end())
921                 {
922                     hasVariableInps = true;
923                     break;
924                 }
925             }
926
927             if (!hasVariableInps)
928             {
929                 std::vector<Mat> inputs(node_proto.input_size()), concatenated;
930                 for (size_t i = 0; i < inputs.size(); ++i)
931                 {
932                     inputs[i] = getBlob(node_proto, constBlobs, i);
933                 }
934                 runLayer(layerParams, inputs, concatenated);
935
936                 CV_Assert(concatenated.size() == 1);
937                 constBlobs.insert(std::make_pair(layerParams.name, concatenated[0]));
938                 continue;
939             }
940         }
941         else if (layer_type == "Upsample")
942         {
943             layerParams.type = "Resize";
944             if (layerParams.has("scales"))
945             {
946                 // Pytorch layer
947                 DictValue scales = layerParams.get("scales");
948                 CV_Assert(scales.size() == 4);
949                 layerParams.set("zoom_factor_y", scales.getIntValue(2));
950                 layerParams.set("zoom_factor_x", scales.getIntValue(3));
951             }
952             else
953             {
954                 // Caffe2 layer
955                 replaceLayerParam(layerParams, "height_scale", "zoom_factor_y");
956                 replaceLayerParam(layerParams, "width_scale", "zoom_factor_x");
957             }
958             replaceLayerParam(layerParams, "mode", "interpolation");
959         }
960         else if (layer_type == "LogSoftmax")
961         {
962             layerParams.type = "Softmax";
963             layerParams.set("log_softmax", true);
964         }
965         else
966         {
967             for (int j = 0; j < node_proto.input_size(); j++) {
968                 if (layer_id.find(node_proto.input(j)) == layer_id.end())
969                     layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
970             }
971         }
972
973         int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
974         for (int i = 0; i < node_proto.output_size(); ++i)
975         {
976             layer_id.insert(std::make_pair(node_proto.output(i), LayerInfo(id, i)));
977         }
978
979         std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
980         for (int j = 0; j < node_proto.input_size(); j++) {
981             layerId = layer_id.find(node_proto.input(j));
982             if (layerId != layer_id.end()) {
983                 dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, j);
984                 // Collect input shapes.
985                 shapeIt = outShapes.find(node_proto.input(j));
986                 CV_Assert(shapeIt != outShapes.end());
987                 layerInpShapes.push_back(shapeIt->second);
988             }
989         }
990
991         // Compute shape of output blob for this layer.
992         Ptr<Layer> layer = dstNet.getLayer(id);
993         layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes);
994         for (int i = 0; i < node_proto.output_size() && i < (int)layerOutShapes.size(); ++i)
995         {
996             outShapes[node_proto.output(i)] = layerOutShapes[i];
997         }
998     }
999 }
1000
1001 Net readNetFromONNX(const String& onnxFile)
1002 {
1003     ONNXImporter onnxImporter(onnxFile.c_str());
1004     Net net;
1005     onnxImporter.populateNet(net);
1006     return net;
1007 }
1008
1009 Net readNetFromONNX(const char* buffer, size_t sizeBuffer)
1010 {
1011     ONNXImporter onnxImporter(buffer, sizeBuffer);
1012     Net net;
1013     onnxImporter.populateNet(net);
1014     return net;
1015 }
1016
1017 Net readNetFromONNX(const std::vector<uchar>& buffer)
1018 {
1019     return readNetFromONNX(reinterpret_cast<const char*>(buffer.data()), buffer.size());
1020 }
1021
1022 Mat readTensorFromONNX(const String& path)
1023 {
1024     opencv_onnx::TensorProto tensor_proto = opencv_onnx::TensorProto();
1025     std::fstream input(path.c_str(), std::ios::in | std::ios::binary);
1026     if (!tensor_proto.ParseFromIstream(&input)) {
1027         CV_Error(Error::StsUnsupportedFormat, "Failed to parse data");
1028     }
1029     Mat mat = getMatFromTensor(tensor_proto);
1030     releaseONNXTensor(tensor_proto);
1031     return mat;
1032 }
1033
1034 CV__DNN_EXPERIMENTAL_NS_END
1035 }} // namespace
1036
1037 #endif