1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
5 // Copyright (C) 2018, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
8 #include "../precomp.hpp"
9 #include <opencv2/dnn/shape_utils.hpp>
20 #if defined(__GNUC__) && __GNUC__ >= 5
21 #pragma GCC diagnostic push
22 #pragma GCC diagnostic ignored "-Wsuggest-override"
24 #include "opencv-onnx.pb.h"
25 #if defined(__GNUC__) && __GNUC__ >= 5
26 #pragma GCC diagnostic pop
29 #include "onnx_graph_simplifier.hpp"
33 CV__DNN_EXPERIMENTAL_NS_BEGIN
38 opencv_onnx::ModelProto model_proto;
42 LayerInfo(int _layerId = 0, int _outputId = 0) : layerId(_layerId), outputId(_outputId) {}
45 std::map<std::string, Mat> getGraphTensors(
46 const opencv_onnx::GraphProto& graph_proto);
47 Mat getBlob(const opencv_onnx::NodeProto& node_proto, const std::map<std::string, Mat>& constBlobs, int index);
49 LayerParams getLayerParams(const opencv_onnx::NodeProto& node_proto);
50 bool isCeilMode(const LayerParams& layerParams);
52 void addLayer(Net& dstNet, LayerParams& layerParams,
53 const opencv_onnx::NodeProto& node_proto,
54 std::map<std::string, LayerInfo>& layer_id,
55 std::map<std::string, MatShape>& outShapes);
59 ONNXImporter(const char *onnxFile)
61 std::fstream input(onnxFile, std::ios::in | std::ios::binary);
63 if (!model_proto.ParseFromIstream(&input))
64 CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model");
67 ONNXImporter(const char* buffer, size_t sizeBuffer)
69 struct _Buf : public std::streambuf
71 _Buf(const char* buffer, size_t sizeBuffer)
73 char* p = const_cast<char*>(buffer);
74 setg(p, p, p + sizeBuffer);
78 _Buf buf(buffer, sizeBuffer);
79 std::istream input(&buf);
81 if (!model_proto.ParseFromIstream(&input))
82 CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model from in-memory byte array.");
85 void populateNet(Net dstNet);
88 inline void replaceLayerParam(LayerParams& layerParams, const String& oldKey, const String& newKey)
90 if (layerParams.has(oldKey)) {
91 layerParams.set(newKey, layerParams.get(oldKey));
92 layerParams.erase(oldKey);
96 void releaseONNXTensor(opencv_onnx::TensorProto& tensor_proto)
98 if (!tensor_proto.raw_data().empty()) {
99 delete tensor_proto.release_raw_data();
103 void runLayer(LayerParams& params, const std::vector<Mat>& inputs,
104 std::vector<Mat>& outputs)
106 Ptr<Layer> layer = LayerFactory::createLayerInstance(params.type, params);
107 CV_Assert((bool)layer);
109 std::vector<MatShape> inpShapes(inputs.size());
111 for (size_t i = 0; i < inputs.size(); ++i)
113 inpShapes[i] = shape(inputs[i]);
114 if (i > 0 && ddepth != inputs[i].depth())
115 CV_Error(Error::StsNotImplemented, "Mixed input data types.");
116 ddepth = inputs[i].depth();
119 std::vector<MatShape> outShapes, internalShapes;
120 layer->getMemoryShapes(inpShapes, 0, outShapes, internalShapes);
122 std::vector<Mat> internals(internalShapes.size());
123 outputs.resize(outShapes.size());
124 for (size_t i = 0; i < outShapes.size(); ++i)
125 outputs[i].create(outShapes[i], ddepth);
126 for (size_t i = 0; i < internalShapes.size(); ++i)
127 internals[i].create(internalShapes[i], ddepth);
129 layer->finalize(inputs, outputs);
130 layer->forward(inputs, outputs, internals);
133 std::map<std::string, Mat> ONNXImporter::getGraphTensors(
134 const opencv_onnx::GraphProto& graph_proto)
136 opencv_onnx::TensorProto tensor_proto;
137 std::map<std::string, Mat> layers_weights;
139 for (int i = 0; i < graph_proto.initializer_size(); i++)
141 tensor_proto = graph_proto.initializer(i);
142 Mat mat = getMatFromTensor(tensor_proto);
143 releaseONNXTensor(tensor_proto);
144 layers_weights.insert(std::make_pair(tensor_proto.name(), mat));
146 return layers_weights;
149 static DictValue parse(const ::google::protobuf::RepeatedField< ::google::protobuf::int64>& src) {
150 std::vector<int32_t> dst(src.size());
151 convertInt64ToInt32(src, dst, src.size());
152 return DictValue::arrayInt(&dst[0], src.size());
155 LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_proto)
158 for(int i = 0; i < node_proto.attribute_size(); i++)
160 opencv_onnx::AttributeProto attribute_proto = node_proto.attribute(i);
161 std::string attribute_name = attribute_proto.name();
163 if(attribute_name == "kernel_shape")
165 CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
166 lp.set("kernel_size", parse(attribute_proto.ints()));
168 else if(attribute_name == "strides")
170 CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
171 lp.set("stride", parse(attribute_proto.ints()));
173 else if(attribute_name == "pads")
175 if (node_proto.op_type() == "Pad")
178 // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
179 // We need to shuffle it to begin0, end0, begin1, end1, ...
180 CV_Assert(attribute_proto.ints_size() % 2 == 0);
181 const int dims = attribute_proto.ints_size() / 2;
182 std::vector<int32_t> paddings;
183 paddings.reserve(attribute_proto.ints_size());
184 for (int i = 0; i < dims; ++i)
186 paddings.push_back(attribute_proto.ints(i));
187 paddings.push_back(attribute_proto.ints(dims + i));
189 lp.set("paddings", DictValue::arrayInt(&paddings[0], paddings.size()));
193 // Convolution or pooling.
194 CV_Assert(attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6);
195 lp.set("pad", parse(attribute_proto.ints()));
198 else if(attribute_name == "auto_pad")
200 if (attribute_proto.s() == "SAME_UPPER" || attribute_proto.s() == "SAME_LOWER") {
201 lp.set("pad_mode", "SAME");
203 else if (attribute_proto.s() == "VALID") {
204 lp.set("pad_mode", "VALID");
207 else if(attribute_name == "dilations")
209 CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
210 lp.set("dilation", parse(attribute_proto.ints()));
212 else if (attribute_proto.has_i())
214 ::google::protobuf::int64 src = attribute_proto.i();
215 if (src < std::numeric_limits<int32_t>::min() || src > std::numeric_limits<int32_t>::max())
216 CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
218 lp.set(attribute_name, saturate_cast<int32_t>(src));
220 else if (attribute_proto.has_f())
222 lp.set(attribute_name, attribute_proto.f());
224 else if (attribute_proto.has_s())
226 lp.set(attribute_name, attribute_proto.s());
228 else if (attribute_proto.floats_size() > 0)
230 lp.set(attribute_name, DictValue::arrayReal(
231 attribute_proto.floats().data(), attribute_proto.floats_size()));
233 else if (attribute_proto.ints_size() > 0)
235 lp.set(attribute_proto.name(), parse(attribute_proto.ints()));
237 else if (attribute_proto.has_t())
239 opencv_onnx::TensorProto tensor = attribute_proto.t();
240 Mat blob = getMatFromTensor(tensor);
241 lp.blobs.push_back(blob);
243 else if (attribute_proto.has_g() || attribute_proto.strings_size() > 0 ||
244 attribute_proto.tensors_size() > 0 || attribute_proto.graphs_size() > 0)
246 CV_Error(Error::StsNotImplemented, "Unexpected attribute type");
249 CV_Error(Error::StsNotImplemented, "Unsupported attribute type");
254 Mat ONNXImporter::getBlob(const opencv_onnx::NodeProto& node_proto,
255 const std::map<std::string, Mat>& constBlobs, int index)
257 CV_Assert(index < node_proto.input_size());
258 std::map<std::string, Mat>::const_iterator constBlob;
259 constBlob = constBlobs.find(node_proto.input(index));
260 if (constBlob == constBlobs.end()) {
261 CV_Error(Error::StsObjectNotFound,
262 "Blob " + node_proto.input(index) + " not found in const blobs");
264 return constBlob->second;
267 void ONNXImporter::addLayer(Net& dstNet, LayerParams& layerParams,
268 const opencv_onnx::NodeProto& node_proto,
269 std::map<std::string, LayerInfo>& layer_id,
270 std::map<std::string, MatShape>& outShapes)
272 std::map<std::string, LayerInfo>::iterator layerId;
273 std::map<std::string, MatShape>::iterator shapeIt;
275 int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
276 for (int i = 0; i < node_proto.output_size(); ++i)
278 layer_id.insert(std::make_pair(node_proto.output(i), LayerInfo(id, i)));
281 std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
283 for (int j = 0; j < node_proto.input_size(); j++) {
284 layerId = layer_id.find(node_proto.input(j));
285 if (layerId != layer_id.end()) {
286 dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, inpNum);
288 // Collect input shapes.
289 shapeIt = outShapes.find(node_proto.input(j));
290 CV_Assert(shapeIt != outShapes.end());
291 layerInpShapes.push_back(shapeIt->second);
294 // Compute shape of output blob for this layer.
295 Ptr<Layer> layer = dstNet.getLayer(id);
296 layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes);
297 for (int i = 0; i < node_proto.output_size() && i < (int)layerOutShapes.size(); ++i)
299 outShapes[node_proto.output(i)] = layerOutShapes[i];
303 static void addConstant(const std::string& name,
305 std::map<std::string, Mat>& constBlobs,
306 std::map<std::string, MatShape>& outShapes)
308 constBlobs.insert(std::make_pair(name, blob));
309 outShapes.insert(std::make_pair(name, shape(blob)));
312 void ONNXImporter::populateNet(Net dstNet)
314 CV_Assert(model_proto.has_graph());
315 opencv_onnx::GraphProto graph_proto = model_proto.graph();
317 simplifySubgraphs(graph_proto);
319 std::map<std::string, Mat> constBlobs = getGraphTensors(graph_proto);
320 // List of internal blobs shapes.
321 std::map<std::string, MatShape> outShapes;
322 // Add all the inputs shapes. It includes as constant blobs as network's inputs shapes.
323 for (int i = 0; i < graph_proto.input_size(); ++i)
325 opencv_onnx::ValueInfoProto valueInfoProto = graph_proto.input(i);
326 CV_Assert(valueInfoProto.has_type());
327 opencv_onnx::TypeProto typeProto = valueInfoProto.type();
328 CV_Assert(typeProto.has_tensor_type());
329 opencv_onnx::TypeProto::Tensor tensor = typeProto.tensor_type();
330 CV_Assert(tensor.has_shape());
331 opencv_onnx::TensorShapeProto tensorShape = tensor.shape();
333 MatShape inpShape(tensorShape.dim_size());
334 for (int j = 0; j < inpShape.size(); ++j)
336 inpShape[j] = tensorShape.dim(j).dim_value();
338 if (!inpShape.empty())
340 inpShape[0] = std::max(inpShape[0], 1); // It's OK to have undetermined batch size
342 outShapes[valueInfoProto.name()] = inpShape;
345 std::string framework_name;
346 if (model_proto.has_producer_name()) {
347 framework_name = model_proto.producer_name();
350 // create map with network inputs (without const blobs)
351 std::map<std::string, LayerInfo> layer_id;
352 std::map<std::string, LayerInfo>::iterator layerId;
353 std::map<std::string, MatShape>::iterator shapeIt;
354 // fill map: push layer name, layer id and output id
355 std::vector<String> netInputs;
356 for (int j = 0; j < graph_proto.input_size(); j++)
358 const std::string& name = graph_proto.input(j).name();
359 if (constBlobs.find(name) == constBlobs.end()) {
360 netInputs.push_back(name);
361 layer_id.insert(std::make_pair(name, LayerInfo(0, netInputs.size() - 1)));
364 dstNet.setInputsNames(netInputs);
366 int layersSize = graph_proto.node_size();
367 LayerParams layerParams;
368 opencv_onnx::NodeProto node_proto;
370 for(int li = 0; li < layersSize; li++)
372 node_proto = graph_proto.node(li);
373 layerParams = getLayerParams(node_proto);
374 CV_Assert(node_proto.output_size() >= 1);
375 layerParams.name = node_proto.output(0);
377 std::string layer_type = node_proto.op_type();
378 layerParams.type = layer_type;
381 if (layer_type == "MaxPool")
383 layerParams.type = "Pooling";
384 layerParams.set("pool", "MAX");
385 layerParams.set("ceil_mode", layerParams.has("pad_mode"));
387 else if (layer_type == "AveragePool")
389 layerParams.type = "Pooling";
390 layerParams.set("pool", "AVE");
391 layerParams.set("ceil_mode", layerParams.has("pad_mode"));
392 layerParams.set("ave_pool_padded_area", framework_name == "pytorch");
394 else if (layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool" ||
395 layer_type == "ReduceMean" || layer_type == "ReduceSum" || layer_type == "ReduceMax")
397 CV_Assert(node_proto.input_size() == 1);
398 layerParams.type = "Pooling";
400 if (layer_type == "GlobalMaxPool" || layer_type == "ReduceMax")
402 else if (layer_type == "ReduceSum")
406 layerParams.set("pool", pool);
407 layerParams.set("global_pooling", !layerParams.has("axes"));
408 if (layerParams.has("axes") && (layer_type == "ReduceMean" || layer_type == "ReduceSum" || layer_type == "ReduceMax"))
410 MatShape inpShape = outShapes[node_proto.input(0)];
411 DictValue axes = layerParams.get("axes");
412 bool keepdims = layerParams.get<int>("keepdims");
413 MatShape targetShape = inpShape;
414 for (int i = 0; i < axes.size(); i++) {
415 int axis = clamp(axes.get<int>(i), inpShape.size());
417 targetShape[axis] = 1;
419 targetShape.erase(targetShape.begin() + axis);
423 if (inpShape.size() == 3 && axes.size() <= 2)
425 int axis = clamp(axes.get<int>(0), inpShape.size());
426 CV_CheckNE(axis, 0, "");
428 LayerParams reshapeLp;
429 reshapeLp.name = layerParams.name + "/reshape";
430 reshapeLp.type = "Reshape";
431 CV_Assert(layer_id.find(reshapeLp.name) == layer_id.end());
432 reshapeLp.set("axis", 0);
433 reshapeLp.set("num_axes", 1);
434 int newShape[] = {1, -1};
435 reshapeLp.set("dim", DictValue::arrayInt(&newShape[0], 2));
437 opencv_onnx::NodeProto proto;
438 proto.add_input(node_proto.input(0));
439 proto.add_output(reshapeLp.name);
440 addLayer(dstNet, reshapeLp, proto, layer_id, outShapes);
443 avgLp.name = layerParams.name + "/avg";
444 avgLp.type = "Pooling";
445 CV_Assert(layer_id.find(avgLp.name) == layer_id.end());
446 avgLp.set("pool", pool);
447 if (axes.size() == 2)
449 CV_CheckEQ(clamp(axes.get<int>(0), inpShape.size()), 1, "Unsupported mode");
450 CV_CheckEQ(clamp(axes.get<int>(1), inpShape.size()), 2, "Unsupported mode");
451 avgLp.set("global_pooling", true);
455 avgLp.set(axis == 2 ? "global_pooling_w" : "global_pooling_h", true);
456 avgLp.set(axis == 2 ? "kernel_h" : "kernel_w", 1);
459 node_proto.set_input(0, reshapeLp.name);
460 node_proto.set_output(0, avgLp.name);
461 addLayer(dstNet, avgLp, node_proto, layer_id, outShapes);
465 if (inpShape.size() != 4 && inpShape.size() != 5)
466 CV_Error(Error::StsNotImplemented, "Unsupported input shape of " + layer_type + " operation.");
468 CV_Assert(axes.size() <= inpShape.size() - 2);
469 std::vector<int> kernel_size(inpShape.size() - 2, 1);
470 for (int i = 0; i < axes.size(); i++) {
471 int axis = clamp(axes.get<int>(i), inpShape.size());
472 CV_Assert_N(axis >= 2 + i, axis < inpShape.size());
473 kernel_size[axis - 2] = inpShape[axis];
475 LayerParams poolLp = layerParams;
476 poolLp.name = layerParams.name + "/avg";
477 CV_Assert(layer_id.find(poolLp.name) == layer_id.end());
478 poolLp.set("kernel_size", DictValue::arrayInt(&kernel_size[0], kernel_size.size()));
480 node_proto.set_output(0, poolLp.name);
481 addLayer(dstNet, poolLp, node_proto, layer_id, outShapes);
484 layerParams.type = "Reshape";
485 layerParams.set("dim", DictValue::arrayInt(&targetShape[0], targetShape.size()));
487 node_proto.set_input(0, node_proto.output(0));
488 node_proto.set_output(0, layerParams.name);
490 else if (!layerParams.has("axes") && (layer_type == "ReduceMean" || layer_type == "ReduceSum" || layer_type == "ReduceMax"))
492 CV_CheckEQ(layerParams.get<int>("keepdims"), 0, "layer only supports keepdims = false");
493 LayerParams reshapeLp;
494 reshapeLp.name = layerParams.name + "/reshape";
495 reshapeLp.type = "Reshape";
496 CV_Assert(layer_id.find(reshapeLp.name) == layer_id.end());
497 int newShape[] = {1, 1, 1, -1};
498 reshapeLp.set("dim", DictValue::arrayInt(&newShape[0], 4));
500 opencv_onnx::NodeProto proto;
501 proto.add_input(node_proto.input(0));
502 proto.add_output(reshapeLp.name);
503 addLayer(dstNet, reshapeLp, proto, layer_id, outShapes);
505 LayerParams poolLp = layerParams;
506 poolLp.name = layerParams.name + "/pool";
507 CV_Assert(layer_id.find(poolLp.name) == layer_id.end());
509 node_proto.set_input(0, reshapeLp.name);
510 node_proto.set_output(0, poolLp.name);
511 addLayer(dstNet, poolLp, node_proto, layer_id, outShapes);
513 layerParams.type = "Reshape";
514 int targetShape[] = {1};
515 layerParams.set("dim", DictValue::arrayInt(&targetShape[0], 1));
517 node_proto.set_input(0, node_proto.output(0));
518 node_proto.set_output(0, layerParams.name);
521 else if (layer_type == "Slice")
524 std::vector<int> begin;
525 std::vector<int> end;
526 int inp_size = node_proto.input_size();
530 if (layerParams.has("steps"))
532 DictValue steps = layerParams.get("steps");
533 for (int i = 0; i < steps.size(); ++i)
535 if (steps.get<int>(i) != 1)
536 CV_Error(Error::StsNotImplemented,
537 "Slice layer only supports steps = 1");
540 if (layerParams.has("axes")) {
541 DictValue axes = layerParams.get("axes");
542 for (int i = 1; i < axes.size(); ++i) {
543 CV_Assert(axes.get<int>(i - 1) == axes.get<int>(i) - 1);
545 axis = axes.get<int>(0);
548 DictValue starts = layerParams.get("starts");
549 DictValue ends = layerParams.get("ends");
550 CV_Assert(starts.size() == ends.size());
553 begin.resize(axis, 0);
554 end.resize(axis, -1);
556 for (int i = 0; i < starts.size(); ++i)
558 begin.push_back(starts.get<int>(i));
559 int finish = ends.get<int>(i);
560 end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
563 CV_Assert(inp_size >= 3);
564 for (int i = 1; i < inp_size; i++) {
565 CV_Assert(constBlobs.find(node_proto.input(i)) != constBlobs.end());
567 Mat start_blob = getBlob(node_proto, constBlobs, 1);
568 Mat end_blob = getBlob(node_proto, constBlobs, 2);
569 CV_Assert(start_blob.total() == end_blob.total());
572 Mat axes_blob = getBlob(node_proto, constBlobs, 3);
573 const int* axes = (int*)axes_blob.data;
574 for (int i = 1; i < axes_blob.total(); ++i) {
575 CV_Assert(axes[i - 1] == axes[i] - 1);
580 const int* starts = start_blob.ptr<int>();
581 const int* ends = end_blob.ptr<int>();
583 begin.resize(axis, 0);
584 end.resize(axis, -1);
586 std::copy(starts, starts + start_blob.total(), std::back_inserter(begin));
587 for (int i = 0; i < end_blob.total(); ++i)
589 int finish = ends[i];
590 end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
594 CV_Assert(constBlobs.find(node_proto.input(4)) != constBlobs.end());
595 Mat step_blob = getBlob(node_proto, constBlobs, 4);
597 // Very strange application for Slice op with tensor reversing.
598 // We just workaround it for 2d constants.
599 if (constBlobs.find(node_proto.input(0)) != constBlobs.end() &&
601 start_blob.at<int>(0) == -1 && step_blob.at<int>(0) == -1 &&
602 end_blob.at<int>(0) == std::numeric_limits<int32_t>::min())
604 Mat inp = getBlob(node_proto, constBlobs, 0);
608 flip(inp, flipped, 0);
609 addConstant(layerParams.name, flipped, constBlobs, outShapes);
613 CV_CheckEQ(countNonZero(step_blob != 1), 0, "Slice layer only supports steps = 1");
616 layerParams.set("begin", DictValue::arrayInt(&begin[0], begin.size()));
617 layerParams.set("end", DictValue::arrayInt(&end[0], end.size()));
618 layerParams.set("axis", axis);
620 if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
622 Mat inp = getBlob(node_proto, constBlobs, 0);
623 std::vector<Mat> inputs, sliced;
624 inputs.push_back(inp);
625 runLayer(layerParams, inputs, sliced);
626 CV_Assert(sliced.size() == 1);
627 addConstant(layerParams.name, sliced[0], constBlobs, outShapes);
631 else if (layer_type == "Split")
633 if (layerParams.has("split"))
635 DictValue splits = layerParams.get("split");
636 const int numSplits = splits.size();
637 CV_Assert(numSplits > 1);
639 std::vector<int> slicePoints(numSplits - 1, splits.get<int>(0));
640 for (int i = 1; i < splits.size() - 1; ++i)
642 slicePoints[i] = slicePoints[i - 1] + splits.get<int>(i - 1);
644 layerParams.set("slice_point", DictValue::arrayInt(&slicePoints[0], slicePoints.size()));
648 layerParams.set("num_split", node_proto.output_size());
650 layerParams.type = "Slice";
652 else if (layer_type == "Add" || layer_type == "Sum" || layer_type == "Sub")
654 bool isSub = layer_type == "Sub";
655 CV_CheckEQ(node_proto.input_size(), 2, "");
656 bool is_const_0 = layer_id.find(node_proto.input(0)) == layer_id.end();
657 bool is_const_1 = layer_id.find(node_proto.input(1)) == layer_id.end();
658 if (is_const_0 && is_const_1)
660 Mat blob_0 = getBlob(node_proto, constBlobs, 0);
661 Mat blob_1 = getBlob(node_proto, constBlobs, 1);
662 CV_Assert(blob_0.size == blob_1.size);
663 Mat output = isSub ? (blob_0 - blob_1) : (blob_0 + blob_1);
664 addConstant(layerParams.name, output, constBlobs, outShapes);
667 else if (is_const_0 || is_const_1)
669 int const_blob_id = is_const_0 ? 0 : 1;
670 Mat blob = getBlob(node_proto, constBlobs, const_blob_id);
671 int blob_total = blob.total();
672 if (blob_total == 1) {
673 layerParams.type = "Power";
674 layerParams.set("shift", (isSub ? -1 : 1) * blob.at<float>(0));
677 MatShape inpShape = outShapes[node_proto.input(1 - const_blob_id)];
678 if (shape(blob) == inpShape)
680 LayerParams constParams;
681 constParams.name = layerParams.name + "/const";
682 constParams.type = "Const";
683 constParams.blobs.push_back((isSub ? -1 : 1) * blob);
684 int id = dstNet.addLayer(constParams.name, constParams.type, constParams);
685 layer_id.insert(std::make_pair(constParams.name, LayerInfo(id, 0)));
686 outShapes[constParams.name] = shape(blob);
688 layerParams.type = "Eltwise";
689 node_proto.set_input(const_blob_id, constParams.name);
693 layerParams.type = "Scale";
694 layerParams.set("bias_term", true);
696 for (int i = 0; i < graph_proto.initializer_size(); i++)
698 opencv_onnx::TensorProto tensor_proto = graph_proto.initializer(i);
699 if (tensor_proto.name() == node_proto.input(const_blob_id))
701 axis = inpShape.size() - tensor_proto.dims_size();
705 layerParams.set("axis", axis);
706 blob = blob.reshape(1, 1);
707 layerParams.blobs.push_back((isSub ? -1 : 1) * blob);
711 else if (outShapes[node_proto.input(0)] == outShapes[node_proto.input(1)])
713 layerParams.type = "Eltwise";
716 static float subCoeffs[] = {1.f, -1.f};
717 layerParams.set("coeff", DictValue::arrayReal<float*>(subCoeffs, 2));
724 LayerParams powerParams;
725 powerParams.name = layerParams.name + "/neg";
726 powerParams.type = "Power";
727 powerParams.set("scale", -1);
730 int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
732 layerId = layer_id.find(node_proto.input(1));
733 CV_Assert(layerId != layer_id.end());
734 dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
736 layer_id.insert(std::make_pair(powerParams.name, LayerInfo(id, 0)));
737 outShapes[powerParams.name] = outShapes[node_proto.input(1)];
739 //Replace input to Power
740 node_proto.set_input(1, powerParams.name);
742 layerParams.type = "Scale";
743 layerParams.set("bias_term", true);
746 else if (layer_type == "Pow")
748 if (layer_id.find(node_proto.input(1)) != layer_id.end())
749 CV_Error(Error::StsNotImplemented, "Unsupported Pow op with variable power");
751 Mat blob = getBlob(node_proto, constBlobs, 1);
752 if (blob.total() != 1)
753 CV_Error(Error::StsNotImplemented, "Pow op supports only scalar power");
755 blob.convertTo(blob, CV_32F);
756 layerParams.type = "Power";
757 layerParams.set("power", blob.at<float>(0));
759 else if (layer_type == "Max")
761 layerParams.type = "Eltwise";
762 layerParams.set("operation", "max");
764 else if (layer_type == "Neg")
766 layerParams.type = "Power";
767 layerParams.set("scale", -1);
769 else if (layer_type == "Constant")
771 CV_Assert(node_proto.input_size() == 0);
772 CV_Assert(layerParams.blobs.size() == 1);
773 addConstant(layerParams.name, layerParams.blobs[0], constBlobs, outShapes);
776 else if (layer_type == "LSTM")
778 LayerParams lstmParams = layerParams;
779 lstmParams.name += "/lstm";
781 // https://pytorch.org/docs/stable/nn.html#lstm
782 CV_Assert(node_proto.input_size() == 7);
783 Mat Wx = getBlob(node_proto, constBlobs, 1);
784 Mat Wh = getBlob(node_proto, constBlobs, 2);
785 Mat b = getBlob(node_proto, constBlobs, 3);
786 CV_CheckEQ(countNonZero(getBlob(node_proto, constBlobs, 5)), 0, "Unsupported non zero initial_h");
787 CV_CheckEQ(countNonZero(getBlob(node_proto, constBlobs, 6)), 0, "Unsupported non zero initial_c");
788 b = b.reshape(1, b.size[0]);
790 const int numHidden = lstmParams.get<int>("hidden_size");
791 const int numDirs = Wx.size[0]; // Is 1 for forward only and 2 for bidirectional LSTM.
792 const int numFeatures = Wx.size[2];
793 Mat bx = b.colRange(0, b.cols / 2);
794 Mat bh = b.colRange(b.cols / 2, b.cols);
798 for (int k = 0; k < numDirs; ++k)
800 float* WxData = Wx.ptr<float>(k);
801 float* WhData = Wh.ptr<float>(k);
802 float* biasData = b.ptr<float>(k);
803 for (int j = 0; j < numHidden; ++j)
805 for (int i = 0; i < numFeatures; ++i)
807 std::swap(WxData[(numHidden + j) * numFeatures + i],
808 WxData[(numHidden * 2 + j) * numFeatures + i]);
810 for (int i = 0; i < numHidden; ++i)
812 std::swap(WhData[(numHidden + j) * numHidden + i],
813 WhData[(numHidden * 2 + j) * numHidden + i]);
815 std::swap(biasData[numHidden + j], biasData[numHidden * 2 + j]);
818 Wx = Wx.reshape(1, Wx.size[0] * Wx.size[1]);
819 Wh = Wh.reshape(1, Wh.size[0] * Wh.size[1]);
821 lstmParams.blobs.resize(3);
822 lstmParams.blobs[0] = Wh;
823 lstmParams.blobs[1] = Wx;
824 lstmParams.blobs[2] = b;
825 lstmParams.set("bidirectional", lstmParams.get<String>("direction", "") == "bidirectional");
827 node_proto.set_output(0, lstmParams.name); // set different name so output shapes will be registered on that name
828 addLayer(dstNet, lstmParams, node_proto, layer_id, outShapes);
830 MatShape lstmShape = outShapes[node_proto.output(0)];
832 // Add fake 1 as it is done in ONNX
833 lstmShape.insert(lstmShape.begin() + 1, 1);
835 layerParams.type = "Reshape";
836 layerParams.set("dim", DictValue::arrayInt(&lstmShape[0], lstmShape.size()));
837 node_proto.set_input(0, lstmParams.name); // redirect input to LSTM
838 node_proto.set_output(0, layerParams.name); // keep origin LSTM's name
840 else if (layer_type == "ImageScaler")
842 const float scale = layerParams.has("scale") ? layerParams.get<float>("scale") : 1.0f;
843 layerParams.erase("scale");
845 if (layerParams.has("bias"))
847 layerParams.type = "Scale";
848 layerParams.blobs.push_back(
849 Mat(Size(1, layerParams.get("bias").size()), CV_32FC1, scale));
851 layerParams.set("bias_term", true);
852 Mat bias(1, layerParams.get("bias").size(), CV_32FC1);
853 for (int j = 0; j < bias.total(); j++) {
854 bias.at<float>(0, j) = layerParams.get("bias").getRealValue(j);
856 layerParams.blobs.push_back(bias);
857 layerParams.erase("bias");
860 layerParams.set("scale", scale);
861 layerParams.type = "Power";
864 else if (layer_type == "Clip")
866 layerParams.type = "ReLU6";
867 replaceLayerParam(layerParams, "min", "min_value");
868 replaceLayerParam(layerParams, "max", "max_value");
871 else if (layer_type == "LeakyRelu")
873 layerParams.type = "ReLU";
874 replaceLayerParam(layerParams, "alpha", "negative_slope");
876 else if (layer_type == "Relu")
878 layerParams.type = "ReLU";
880 else if (layer_type == "Elu")
882 layerParams.type = "ELU";
884 else if (layer_type == "Tanh")
886 layerParams.type = "TanH";
888 else if (layer_type == "PRelu")
890 layerParams.type = "PReLU";
891 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1));
893 else if (layer_type == "LRN")
895 replaceLayerParam(layerParams, "size", "local_size");
897 else if (layer_type == "InstanceNormalization")
899 if (node_proto.input_size() != 3)
900 CV_Error(Error::StsNotImplemented,
901 "Expected input, scale, bias");
903 layerParams.blobs.resize(4);
904 layerParams.blobs[2] = getBlob(node_proto, constBlobs, 1); // weightData
905 layerParams.blobs[3] = getBlob(node_proto, constBlobs, 2); // biasData
906 layerParams.set("has_bias", true);
907 layerParams.set("has_weight", true);
909 // Get number of channels in input
910 int size = layerParams.blobs[2].total();
911 layerParams.blobs[0] = Mat::zeros(size, 1, CV_32F); // mean
912 layerParams.blobs[1] = Mat::ones(size, 1, CV_32F); // std
914 LayerParams mvnParams;
915 mvnParams.name = layerParams.name + "/MVN";
916 mvnParams.type = "MVN";
917 mvnParams.set("eps", layerParams.get<float>("epsilon"));
918 layerParams.erase("epsilon");
921 int id = dstNet.addLayer(mvnParams.name, mvnParams.type, mvnParams);
923 layerId = layer_id.find(node_proto.input(0));
924 CV_Assert(layerId != layer_id.end());
925 dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
927 layer_id.insert(std::make_pair(mvnParams.name, LayerInfo(id, 0)));
928 outShapes[mvnParams.name] = outShapes[node_proto.input(0)];
930 //Replace Batch Norm's input to MVN
931 node_proto.set_input(0, mvnParams.name);
932 layerParams.type = "BatchNorm";
934 else if (layer_type == "BatchNormalization")
936 if (node_proto.input_size() != 5)
937 CV_Error(Error::StsNotImplemented,
938 "Expected input, scale, bias, mean and var");
940 layerParams.type = "BatchNorm";
941 replaceLayerParam(layerParams, "epsilon", "eps");
942 replaceLayerParam(layerParams, "spatial", "use_global_stats");
944 Mat meanData = getBlob(node_proto, constBlobs, 3);
945 Mat stdData = getBlob(node_proto, constBlobs, 4);
947 layerParams.blobs.push_back(meanData);
948 layerParams.blobs.push_back(stdData);
950 if (!node_proto.input(1).empty()) {
951 layerParams.set("has_weight", true);
952 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1)); // weightData
954 layerParams.set("has_weight", false);
957 if (!node_proto.input(2).empty()) {
958 layerParams.set("has_bias", true);
959 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 2)); // biasData
961 layerParams.set("has_bias", false);
964 else if (layer_type == "Gemm")
966 CV_Assert(node_proto.input_size() >= 2);
967 layerParams.type = "InnerProduct";
968 Mat weights = getBlob(node_proto, constBlobs, 1);
970 if (layerParams.has("transB") && !layerParams.get<int>("transB")) {
971 transpose(weights, weights);
974 layerParams.blobs.push_back(weights);
976 if (node_proto.input_size() == 3) {
977 Mat bias = getBlob(node_proto, constBlobs, 2);
978 layerParams.blobs.push_back(bias);
980 if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
982 Mat inputBuf = getBlob(node_proto, constBlobs, 0);
984 LayerParams constParams;
985 constParams.name = node_proto.input(0);
986 constParams.type = "Const";
987 constParams.blobs.push_back(inputBuf);
989 opencv_onnx::NodeProto proto;
990 proto.add_output(constParams.name);
991 addLayer(dstNet, constParams, proto, layer_id, outShapes);
994 layerParams.set("num_output", layerParams.blobs[0].size[ind_num_out]);
995 layerParams.set("bias_term", node_proto.input_size() == 3);
997 else if (layer_type == "MatMul")
999 CV_Assert(node_proto.input_size() == 2);
1000 layerParams.type = "InnerProduct";
1001 layerParams.set("bias_term", false);
1002 CV_Assert(constBlobs.find(node_proto.input(0)) == constBlobs.end());
1003 int firstInpDims = outShapes[node_proto.input(0)].size();
1006 if (constBlobs.find(node_proto.input(1)) != constBlobs.end())
1008 Mat blob = getBlob(node_proto, constBlobs, 1);
1009 secondInpDims = blob.dims;
1010 layerParams.blobs.push_back(blob.t());
1011 layerParams.set("num_output", layerParams.blobs[0].size[0]);
1013 secondInpDims = outShapes[node_proto.input(1)].size();
1015 layerParams.set("axis", firstInpDims - secondInpDims + 1);
1017 else if (layer_type == "Mul" || layer_type == "Div")
1019 CV_Assert(node_proto.input_size() == 2);
1021 bool isDiv = layer_type == "Div";
1023 bool haveVariables = false;
1024 for (int i = 0; i < 2; ++i)
1026 if (constBlobs.find(node_proto.input(i)) != constBlobs.end())
1029 haveVariables = true;
1031 if (constId != -1 && haveVariables)
1033 Mat blob = getBlob(node_proto, constBlobs, constId);
1034 blob = blob.reshape(1, 1);
1035 if (blob.total() == 1) {
1036 float coeff = isDiv ? 1.0 / blob.at<float>(0) : blob.at<float>(0);
1037 layerParams.set("scale", coeff);
1038 layerParams.type = "Power";
1042 divide(1.0, blob, blob);
1043 layerParams.blobs.push_back(blob);
1044 layerParams.type = "Scale";
1047 else if (outShapes[node_proto.input(0)] == outShapes[node_proto.input(1)])
1049 layerParams.type = "Eltwise";
1050 layerParams.set("operation", isDiv ? "div" : "prod");
1054 // Scale layer allocate output with the first input shape
1055 if (total(outShapes[node_proto.input(0)]) < total(outShapes[node_proto.input(1)]))
1057 opencv_onnx::NodeProto proto;
1058 proto.add_input(node_proto.input(1));
1059 proto.add_input(node_proto.input(0));
1060 proto.add_output(layerParams.name);
1066 LayerParams powerParams;
1067 powerParams.name = layerParams.name + "/inv";
1068 powerParams.type = "Power";
1069 powerParams.set("power", -1);
1071 //Create Power layer
1072 int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
1074 layerId = layer_id.find(node_proto.input(1));
1075 CV_Assert(layerId != layer_id.end());
1076 dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
1078 layer_id.insert(std::make_pair(powerParams.name, LayerInfo(id, 0)));
1079 outShapes[powerParams.name] = outShapes[node_proto.input(1)];
1081 //Replace input to Power
1082 node_proto.set_input(1, powerParams.name);
1084 layerParams.type = "Scale";
1089 Mat inp0 = getBlob(node_proto, constBlobs, 0);
1090 Mat inp1 = getBlob(node_proto, constBlobs, 1);
1091 if (inp0.size != inp1.size && inp1.total() != 1)
1092 CV_Error(Error::StsNotImplemented, "Constant multiply with different shapes");
1094 Mat out = isDiv ? inp0 / inp1 : inp0.mul(inp1);
1095 out = out.reshape(1, inp0.dims, inp0.size);
1096 out.dims = inp0.dims; // to workaround dims == 1
1097 addConstant(layerParams.name, out, constBlobs, outShapes);
1101 else if (layer_type == "Conv")
1103 CV_Assert(node_proto.input_size() >= 2);
1104 layerParams.type = "Convolution";
1105 for (int j = 1; j < node_proto.input_size(); j++) {
1106 if (constBlobs.find(node_proto.input(j)) != constBlobs.end())
1108 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
1111 int outCn = layerParams.blobs.empty() ? outShapes[node_proto.input(1)][0] : layerParams.blobs[0].size[0];
1112 layerParams.set("num_output", outCn);
1114 else if (layer_type == "ConvTranspose")
1116 CV_Assert(node_proto.input_size() >= 2);
1117 layerParams.type = "Deconvolution";
1118 for (int j = 1; j < node_proto.input_size(); j++) {
1119 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
1121 layerParams.set("num_output", layerParams.blobs[0].size[1] * layerParams.get<int>("group", 1));
1122 layerParams.set("bias_term", node_proto.input_size() == 3);
1124 if (!layerParams.has("kernel_size"))
1125 CV_Error(Error::StsNotImplemented,
1126 "Required attribute 'kernel_size' is not present.");
1128 if (layerParams.has("output_shape"))
1130 const DictValue& outShape = layerParams.get("output_shape");
1131 DictValue strides = layerParams.get("stride");
1132 DictValue kernel = layerParams.get("kernel_size");
1135 std::vector<int> adjust_pads;
1136 if (layerParams.has("pad_mode"))
1138 padMode = toUpperCase(layerParams.get<String>("pad_mode"));
1139 if (padMode != "SAME" && padMode != "VALID")
1140 CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
1142 for (int i = 0; i < strides.size(); i++)
1144 int sz = outShape.get<int>(2 + i);
1145 int stride = strides.get<int>(i);
1146 adjust_pads.push_back(padMode == "SAME"? (sz - 1) % stride :
1147 (sz - kernel.get<int>(i)) % stride);
1149 layerParams.set("adj", DictValue::arrayInt(&adjust_pads[0], adjust_pads.size()));
1152 else if (layerParams.has("output_padding"))
1154 replaceLayerParam(layerParams, "output_padding", "adj");
1157 else if (layer_type == "Transpose")
1159 layerParams.type = "Permute";
1160 replaceLayerParam(layerParams, "perm", "order");
1162 CV_Assert(node_proto.input_size() == 1);
1163 if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
1165 std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), transposed;
1166 runLayer(layerParams, inputs, transposed);
1167 CV_Assert(transposed.size() == 1);
1168 addConstant(layerParams.name, transposed[0], constBlobs, outShapes);
1172 else if (layer_type == "Squeeze")
1174 CV_Assert_N(node_proto.input_size() == 1, layerParams.has("axes"));
1175 DictValue axes_dict = layerParams.get("axes");
1176 MatShape inpShape = outShapes[node_proto.input(0)];
1178 std::vector<bool> maskedAxes(inpShape.size(), false);
1179 for (int i = 0; i < axes_dict.size(); ++i)
1181 int axis = axes_dict.getIntValue(i);
1182 CV_CheckLE(axis, static_cast<int>(inpShape.size()), "Squeeze axis");
1183 maskedAxes[axis] = inpShape[axis] == 1;
1186 for (int i = 0; i < inpShape.size(); ++i)
1189 outShape.push_back(inpShape[i]);
1191 if (outShape.size() != inpShape.size())
1193 layerParams.type = "Reshape";
1194 layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size()));
1197 layerParams.type = "Identity";
1199 if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
1201 Mat inp = getBlob(node_proto, constBlobs, 0);
1202 Mat out = inp.reshape(1, outShape);
1203 out.dims = outShape.size(); // to workaround dims == 1
1204 addConstant(layerParams.name, out, constBlobs, outShapes);
1208 else if (layer_type == "Flatten")
1210 CV_CheckEQ(node_proto.input_size(), 1, "");
1211 if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
1213 Mat input = getBlob(node_proto, constBlobs, 0);
1214 int axis = clamp(layerParams.get<int>("axis", 1), input.dims);
1216 std::vector<int> out_size(&input.size[0], &input.size[0] + axis);
1217 out_size.push_back(input.total(axis));
1218 Mat output = input.reshape(1, out_size);
1219 addConstant(layerParams.name, output, constBlobs, outShapes);
1223 else if (layer_type == "Unsqueeze")
1225 CV_Assert(node_proto.input_size() == 1);
1226 DictValue axes = layerParams.get("axes");
1227 if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
1230 Mat input = getBlob(node_proto, constBlobs, 0);
1232 std::vector<int> dims;
1233 for (int j = 0; j < input.dims; j++) {
1234 dims.push_back(input.size[j]);
1236 CV_Assert(axes.getIntValue(axes.size()-1) <= dims.size());
1237 for (int j = 0; j < axes.size(); j++) {
1238 dims.insert(dims.begin() + axes.getIntValue(j), 1);
1241 Mat out = input.reshape(0, dims);
1242 addConstant(layerParams.name, out, constBlobs, outShapes);
1247 if (axes.size() != 1)
1248 CV_Error(Error::StsNotImplemented, "Multidimensional unsqueeze");
1250 MatShape inpShape = outShapes[node_proto.input(0)];
1251 int axis = axes.getIntValue(0);
1252 CV_Assert(0 <= axis && axis <= inpShape.size());
1253 std::vector<int> outShape = inpShape;
1254 outShape.insert(outShape.begin() + axis, 1);
1255 layerParams.type = "Reshape";
1256 layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size()));
1258 else if (layer_type == "Expand")
1260 CV_CheckEQ(node_proto.input_size(), 2, "");
1261 CV_Assert(constBlobs.find(node_proto.input(1)) != constBlobs.end());
1262 Mat newShapeMat = getBlob(node_proto, constBlobs, 1);
1263 MatShape targetShape(newShapeMat.ptr<int>(), newShapeMat.ptr<int>() + newShapeMat.total());
1266 bool haveVariables = constBlobs.find(node_proto.input(0)) == constBlobs.end();
1269 shapeIt = outShapes.find(node_proto.input(0));
1270 CV_Assert(shapeIt != outShapes.end());
1271 inpShape = shapeIt->second;
1275 inpShape = shape(getBlob(node_proto, constBlobs, 0));
1278 String srcName = node_proto.input(0);
1279 // Unsqueeze and repeat along new axis
1280 if (targetShape.size() == inpShape.size() + 1)
1282 for (int i = 0; i < targetShape.size(); i++)
1284 if (targetShape[i] == -1 && i < inpShape.size())
1285 targetShape[i] = inpShape[i];
1286 else if (i < inpShape.size() && targetShape[i] != inpShape[i])
1287 inpShape.insert(inpShape.begin() + i, 1);
1291 LayerParams reshapeLp;
1292 reshapeLp.name = layerParams.name + "/reshape";
1293 reshapeLp.type = "Reshape";
1294 CV_Assert(layer_id.find(reshapeLp.name) == layer_id.end());
1295 reshapeLp.set("dim", DictValue::arrayInt(&inpShape[0], inpShape.size()));
1297 opencv_onnx::NodeProto proto;
1298 proto.add_input(node_proto.input(0));
1299 proto.add_output(reshapeLp.name);
1300 addLayer(dstNet, reshapeLp, proto, layer_id, outShapes);
1301 srcName = reshapeLp.name;
1304 CV_CheckEQ(inpShape.size(), targetShape.size(), "Unsupported Expand op with different dims");
1306 std::vector<int> broadcast_axes;
1307 for (int i = 0; i < targetShape.size(); i++)
1309 if (targetShape[i] != inpShape[i])
1311 if (inpShape[i] == 1)
1312 broadcast_axes.push_back(i);
1314 CV_Error(Error::StsError, format("Could not be broadcast by axis: %d", i));
1320 if (broadcast_axes.size() != 1)
1321 CV_Error(Error::StsNotImplemented, "Expand op doesn't support multiple axes for constant input");
1323 Mat input = getBlob(node_proto, constBlobs, 0);
1324 input = input.reshape(0, total(inpShape, 0, broadcast_axes[0]));
1325 Mat output = cv::repeat(input, 1, targetShape[broadcast_axes[0]]);
1326 output = output.reshape(0, targetShape);
1327 addConstant(layerParams.name, output, constBlobs, outShapes);
1331 if (broadcast_axes.size() == 2 &&
1332 broadcast_axes[0] == broadcast_axes[1] - 1 && broadcast_axes[1] == inpShape.size() - 1)
1334 LayerParams constParams;
1335 constParams.name = layerParams.name + "/const";
1336 CV_Assert(layer_id.find(constParams.name) == layer_id.end());
1337 constParams.type = "Const";
1339 Mat inp = Mat::ones(newShapeMat.total(), newShapeMat.ptr<int>(), CV_32F);
1340 constParams.blobs.push_back(inp);
1342 opencv_onnx::NodeProto proto;
1343 proto.add_output(constParams.name);
1344 addLayer(dstNet, constParams, proto, layer_id, outShapes);
1346 layerParams.type = "Scale";
1347 layerParams.set("bias_term", false);
1348 node_proto.set_input(0, constParams.name);
1349 node_proto.set_input(1, shapeIt->first);
1351 else if (broadcast_axes.size() == 1 && broadcast_axes[0] <= 1)
1353 String base_name = layerParams.name + "/copy_";
1354 std::vector<std::string> input_names;
1355 for (int j = 0; j < targetShape[broadcast_axes[0]]; j++)
1357 std::ostringstream ss;
1360 copyLP.name = base_name + ss.str();
1361 copyLP.type = "Identity";
1362 CV_Assert(layer_id.find(copyLP.name) == layer_id.end());
1363 input_names.push_back(copyLP.name);
1365 node_proto.set_input(0, srcName);
1366 node_proto.set_output(0, copyLP.name);
1367 addLayer(dstNet, copyLP, node_proto, layer_id, outShapes);
1369 node_proto.clear_input();
1370 for (int i = 0; i < input_names.size(); i++)
1372 node_proto.add_input(input_names[i]);
1374 layerParams.set("axis", broadcast_axes[0]);
1375 layerParams.type = "Concat";
1376 node_proto.set_output(0, layerParams.name);
1379 CV_Error(Error::StsNotImplemented, "Unsupported Expand op");
1381 else if (layer_type == "Reshape")
1383 CV_Assert(node_proto.input_size() == 2 || layerParams.has("shape"));
1385 if (node_proto.input_size() == 2) {
1386 Mat blob = getBlob(node_proto, constBlobs, 1);
1387 CV_Assert(blob.type() == CV_32SC1);
1389 layerParams.set("dim", DictValue::arrayInt<int*>(
1390 blob.ptr<int>(), blob.total() ));
1392 if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
1393 std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), outputs;
1394 runLayer(layerParams, inputs, outputs);
1395 addConstant(layerParams.name, outputs[0], constBlobs, outShapes);
1400 DictValue shape = layerParams.get("shape");
1401 std::vector<int> dim;
1402 for (int j = 0; j < shape.size(); j++) {
1403 dim.push_back(shape.getIntValue(j));
1406 if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
1407 Mat input = getBlob(node_proto, constBlobs, 0);
1408 Mat out = input.reshape(0, dim);
1409 addConstant(layerParams.name, out, constBlobs, outShapes);
1412 replaceLayerParam(layerParams, "shape", "dim");
1415 else if (layer_type == "Pad")
1417 layerParams.type = "Padding";
1418 replaceLayerParam(layerParams, "mode", "type");
1419 if (node_proto.input_size() == 3 || node_proto.input_size() == 2)
1421 // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
1422 // We need to shuffle it to begin0, end0, begin1, end1, ...
1423 Mat paddings = getBlob(node_proto, constBlobs, 1).reshape(1, 2);
1424 paddings = paddings.t();
1425 layerParams.set("paddings", DictValue::arrayInt(paddings.ptr<int>(), paddings.total()));
1427 if (node_proto.input_size() == 3)
1429 Mat value = getBlob(node_proto, constBlobs, 2);
1430 layerParams.set("value", value.at<float>(0));
1434 else if (layer_type == "Shape")
1436 CV_Assert(node_proto.input_size() == 1);
1437 shapeIt = outShapes.find(node_proto.input(0));
1438 CV_Assert(shapeIt != outShapes.end());
1439 MatShape inpShape = shapeIt->second;
1441 Mat shapeMat(inpShape.size(), 1, CV_32S);
1442 for (int j = 0; j < inpShape.size(); ++j)
1443 shapeMat.at<int>(j) = inpShape[j];
1446 addConstant(layerParams.name, shapeMat, constBlobs, outShapes);
1449 else if (layer_type == "Cast")
1451 if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
1453 Mat blob = getBlob(node_proto, constBlobs, 0);
1455 switch (layerParams.get<int>("to"))
1457 case opencv_onnx::TensorProto_DataType_FLOAT: type = CV_32F; break;
1458 case opencv_onnx::TensorProto_DataType_UINT8: type = CV_8U; break;
1459 case opencv_onnx::TensorProto_DataType_UINT16: type = CV_16U; break;
1460 case opencv_onnx::TensorProto_DataType_FLOAT16: type = CV_16S; break;
1461 case opencv_onnx::TensorProto_DataType_INT8:
1462 case opencv_onnx::TensorProto_DataType_INT16:
1463 case opencv_onnx::TensorProto_DataType_INT32:
1464 case opencv_onnx::TensorProto_DataType_INT64: type = CV_32S; break;
1465 default: type = blob.type();
1468 blob.convertTo(dst, type);
1469 dst.dims = blob.dims;
1470 addConstant(layerParams.name, dst, constBlobs, outShapes);
1474 layerParams.type = "Identity";
1476 else if (layer_type == "ConstantOfShape" || layer_type == "ConstantFill")
1480 if (!layerParams.blobs.empty())
1482 CV_Assert(!layerParams.has("value"));
1483 depth = layerParams.blobs[0].depth();
1485 layerParams.blobs[0].convertTo(floats, CV_32F);
1486 fill_value = floats.at<float>(0, 0);
1489 fill_value = layerParams.get("value", 0);
1491 MatShape inpShape = getBlob(node_proto, constBlobs, 0);
1492 for (int i = 0; i < inpShape.size(); i++)
1493 CV_CheckGT(inpShape[i], 0, "");
1494 Mat tensor(inpShape.size(), &inpShape[0], depth, Scalar(fill_value));
1495 addConstant(layerParams.name, tensor, constBlobs, outShapes);
1498 else if (layer_type == "Gather")
1500 CV_Assert(node_proto.input_size() == 2);
1501 Mat indexMat = getBlob(node_proto, constBlobs, 1);
1502 CV_Assert_N(indexMat.type() == CV_32S, indexMat.total() == 1);
1503 int index = indexMat.at<int>(0);
1504 int axis = layerParams.get<int>("axis", 0);
1506 if ((constBlobs.find(node_proto.input(0)) != constBlobs.end()))
1508 Mat input = getBlob(node_proto, constBlobs, 0);
1510 std::vector<cv::Range> ranges(input.dims, Range::all());
1511 ranges[axis] = Range(index, index + 1);
1513 out = input(ranges);
1514 MatShape outShape = shape(out);
1515 if (outShape.size() > 1)
1517 outShape.erase(outShape.begin() + axis);
1518 out.reshape(0, outShape);
1522 addConstant(layerParams.name, out, constBlobs, outShapes);
1527 shapeIt = outShapes.find(node_proto.input(0));
1528 CV_Assert(shapeIt != outShapes.end());
1529 MatShape inpShape = shapeIt->second;
1531 LayerParams sliceLp;
1532 sliceLp.type = "Slice";
1533 sliceLp.name = inpShape.size() > 1 ? layerParams.name + "/slice" : layerParams.name;
1534 std::vector<int> begin(inpShape.size(), 0);
1535 std::vector<int> end(inpShape.size(), -1);
1536 begin[axis] = index;
1537 end[axis] = index + 1;
1539 cv::dnn::DictValue paramBegin = cv::dnn::DictValue::arrayInt(begin.data(), begin.size());
1540 cv::dnn::DictValue paramEnd = cv::dnn::DictValue::arrayInt(end.data(), end.size());
1541 sliceLp.set("begin", paramBegin);
1542 sliceLp.set("end", paramEnd);
1544 if (inpShape.size() > 1)
1546 opencv_onnx::NodeProto proto;
1547 proto.add_input(node_proto.input(0));
1548 proto.add_output(sliceLp.name);
1549 addLayer(dstNet, sliceLp, proto, layer_id, outShapes);
1551 inpShape.erase(inpShape.begin() + axis);
1552 layerParams.type = "Reshape";
1553 layerParams.set("axis", 0);
1554 layerParams.set("dim", DictValue::arrayInt(&inpShape[0], inpShape.size()));
1555 node_proto.set_input(0, sliceLp.name);
1559 layerParams = sliceLp;
1563 else if (layer_type == "Concat")
1565 bool hasVariableInps = false;
1566 for (int i = 0; i < node_proto.input_size(); ++i)
1568 if (layer_id.find(node_proto.input(i)) != layer_id.end())
1570 hasVariableInps = true;
1575 if (!hasVariableInps)
1577 std::vector<Mat> inputs(node_proto.input_size()), concatenated;
1578 for (size_t i = 0; i < inputs.size(); ++i)
1580 inputs[i] = getBlob(node_proto, constBlobs, i);
1582 runLayer(layerParams, inputs, concatenated);
1584 CV_Assert(concatenated.size() == 1);
1585 addConstant(layerParams.name, concatenated[0], constBlobs, outShapes);
1589 else if (layer_type == "Resize")
1591 for (int i = 1; i < node_proto.input_size(); i++)
1592 CV_Assert(layer_id.find(node_proto.input(i)) == layer_id.end());
1594 String interp_mode = layerParams.get<String>("coordinate_transformation_mode");
1595 CV_Assert_N(interp_mode != "tf_crop_and_resize", interp_mode != "tf_half_pixel_for_nn");
1597 layerParams.set("align_corners", interp_mode == "align_corners");
1598 Mat shapes = getBlob(node_proto, constBlobs, node_proto.input_size() - 1);
1599 CV_CheckEQ(shapes.size[0], 4, "");
1600 CV_CheckEQ(shapes.size[1], 1, "");
1601 CV_CheckDepth(shapes.depth(), shapes.depth() == CV_32S || shapes.depth() == CV_32F, "");
1602 if (shapes.depth() == CV_32F)
1603 shapes.convertTo(shapes, CV_32S);
1604 int height = shapes.at<int>(2);
1605 int width = shapes.at<int>(3);
1606 if (node_proto.input_size() == 3)
1608 shapeIt = outShapes.find(node_proto.input(0));
1609 CV_Assert(shapeIt != outShapes.end());
1610 MatShape scales = shapeIt->second;
1611 height *= scales[2];
1614 layerParams.set("width", width);
1615 layerParams.set("height", height);
1617 if (layerParams.get<String>("mode") == "linear") {
1618 layerParams.set("mode", interp_mode == "pytorch_half_pixel" ?
1619 "opencv_linear" : "bilinear");
1621 replaceLayerParam(layerParams, "mode", "interpolation");
1623 else if (layer_type == "Upsample")
1625 //fused from Resize Subgraph
1626 if (layerParams.has("coordinate_transformation_mode"))
1628 String interp_mode = layerParams.get<String>("coordinate_transformation_mode");
1629 CV_Assert_N(interp_mode != "tf_crop_and_resize", interp_mode != "tf_half_pixel_for_nn");
1631 layerParams.set("align_corners", interp_mode == "align_corners");
1632 if (layerParams.get<String>("mode") == "linear")
1634 layerParams.set("mode", interp_mode == "pytorch_half_pixel" ?
1635 "opencv_linear" : "bilinear");
1638 if (layerParams.get<String>("mode") == "linear" && framework_name == "pytorch")
1639 layerParams.set("mode", "opencv_linear");
1641 layerParams.type = "Resize";
1642 if (layerParams.has("scales"))
1645 DictValue scales = layerParams.get("scales");
1646 CV_Assert(scales.size() == 4);
1647 layerParams.set("zoom_factor_y", scales.getIntValue(2));
1648 layerParams.set("zoom_factor_x", scales.getIntValue(3));
1650 else if (layerParams.has("height_scale") && layerParams.has("width_scale"))
1653 replaceLayerParam(layerParams, "height_scale", "zoom_factor_y");
1654 replaceLayerParam(layerParams, "width_scale", "zoom_factor_x");
1659 Mat scales = getBlob(node_proto, constBlobs, 1);
1660 CV_Assert(scales.total() == 4);
1661 layerParams.set("zoom_factor_y", scales.at<float>(2));
1662 layerParams.set("zoom_factor_x", scales.at<float>(3));
1664 replaceLayerParam(layerParams, "mode", "interpolation");
1666 else if (layer_type == "SoftMax" || layer_type == "LogSoftmax")
1668 layerParams.type = "Softmax";
1669 layerParams.set("log_softmax", layer_type == "LogSoftmax");
1671 else if (layer_type == "DetectionOutput")
1673 CV_CheckEQ(node_proto.input_size(), 3, "");
1674 if (constBlobs.find(node_proto.input(2)) != constBlobs.end())
1676 Mat priors = getBlob(node_proto, constBlobs, 2);
1678 LayerParams constParams;
1679 constParams.name = layerParams.name + "/priors";
1680 constParams.type = "Const";
1681 constParams.blobs.push_back(priors);
1683 opencv_onnx::NodeProto priorsProto;
1684 priorsProto.add_output(constParams.name);
1685 addLayer(dstNet, constParams, priorsProto, layer_id, outShapes);
1687 node_proto.set_input(2, constParams.name);
1692 for (int j = 0; j < node_proto.input_size(); j++) {
1693 if (layer_id.find(node_proto.input(j)) == layer_id.end())
1694 layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
1697 addLayer(dstNet, layerParams, node_proto, layer_id, outShapes);
1701 Net readNetFromONNX(const String& onnxFile)
1703 ONNXImporter onnxImporter(onnxFile.c_str());
1705 onnxImporter.populateNet(net);
1709 Net readNetFromONNX(const char* buffer, size_t sizeBuffer)
1711 ONNXImporter onnxImporter(buffer, sizeBuffer);
1713 onnxImporter.populateNet(net);
1717 Net readNetFromONNX(const std::vector<uchar>& buffer)
1719 return readNetFromONNX(reinterpret_cast<const char*>(buffer.data()), buffer.size());
1722 Mat readTensorFromONNX(const String& path)
1724 opencv_onnx::TensorProto tensor_proto = opencv_onnx::TensorProto();
1725 std::fstream input(path.c_str(), std::ios::in | std::ios::binary);
1726 if (!tensor_proto.ParseFromIstream(&input)) {
1727 CV_Error(Error::StsUnsupportedFormat, "Failed to parse data");
1729 Mat mat = getMatFromTensor(tensor_proto);
1730 releaseONNXTensor(tensor_proto);
1734 CV__DNN_EXPERIMENTAL_NS_END