#include "../precomp.hpp"
#include <opencv2/dnn/shape_utils.hpp>
+#include <opencv2/core/utils/logger.defines.hpp>
+#undef CV_LOG_STRIP_LEVEL
+#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_DEBUG + 1
+#include <opencv2/core/utils/logger.hpp>
+
#ifdef HAVE_PROTOBUF
#include <iostream>
std::map<std::string, Mat> getGraphTensors(
const opencv_onnx::GraphProto& graph_proto);
- Mat getBlob(const opencv_onnx::NodeProto& node_proto, const std::map<std::string, Mat>& constBlobs, int index);
+ Mat getBlob(const opencv_onnx::NodeProto& node_proto, int index);
+ Mat getBlob(const std::string& input_name);
LayerParams getLayerParams(const opencv_onnx::NodeProto& node_proto);
bool isCeilMode(const LayerParams& layerParams);
- void addLayer(Net& dstNet, LayerParams& layerParams,
- const opencv_onnx::NodeProto& node_proto,
- std::map<std::string, LayerInfo>& layer_id,
- std::map<std::string, MatShape>& outShapes);
+ void addConstant(const std::string& name, const Mat& blob);
+ void addLayer(LayerParams& layerParams,
+ const opencv_onnx::NodeProto& node_proto);
public:
- ONNXImporter(const char *onnxFile)
+ ONNXImporter(Net& net, const char *onnxFile)
+ : dstNet(net)
{
+ CV_Assert(onnxFile);
+ CV_LOG_DEBUG(NULL, "DNN/ONNX: processing ONNX model from file: " << onnxFile);
+
std::fstream input(onnxFile, std::ios::in | std::ios::binary);
+ if (!input)
+ {
+ CV_Error(Error::StsBadArg, cv::format("Can't read ONNX file: %s", onnxFile));
+ }
if (!model_proto.ParseFromIstream(&input))
- CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model");
+ {
+ CV_Error(Error::StsUnsupportedFormat, cv::format("Failed to parse ONNX model: %s", onnxFile));
+ }
+
+ populateNet();
}
- ONNXImporter(const char* buffer, size_t sizeBuffer)
+ ONNXImporter(Net& net, const char* buffer, size_t sizeBuffer)
+ : dstNet(net)
{
+ CV_LOG_DEBUG(NULL, "DNN/ONNX: processing in-memory ONNX model (" << sizeBuffer << " bytes)");
+
struct _Buf : public std::streambuf
{
_Buf(const char* buffer, size_t sizeBuffer)
if (!model_proto.ParseFromIstream(&input))
CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model from in-memory byte array.");
+
+ populateNet();
}
- void populateNet(Net dstNet);
+ void populateNet();
+
+protected:
+ Net& dstNet;
+
+ opencv_onnx::GraphProto graph_proto;
+ std::string framework_name;
+
+ std::map<std::string, Mat> constBlobs;
+
+ std::map<std::string, MatShape> outShapes; // List of internal blobs shapes.
+ typedef std::map<std::string, MatShape>::iterator IterShape_t;
+
+ std::map<std::string, LayerInfo> layer_id;
+ typedef std::map<std::string, LayerInfo>::iterator IterLayerId_t;
+
+ void handleNode(const opencv_onnx::NodeProto& node_proto);
};
inline void replaceLayerParam(LayerParams& layerParams, const String& oldKey, const String& newKey)
}
else if (attribute_proto.ints_size() > 0)
{
- lp.set(attribute_proto.name(), parse(attribute_proto.ints()));
+ lp.set(attribute_name, parse(attribute_proto.ints()));
}
else if (attribute_proto.has_t())
{
Mat blob = getMatFromTensor(tensor);
lp.blobs.push_back(blob);
}
- else if (attribute_proto.has_g() || attribute_proto.strings_size() > 0 ||
- attribute_proto.tensors_size() > 0 || attribute_proto.graphs_size() > 0)
+ else if (attribute_proto.has_g())
+ {
+ CV_Error(Error::StsNotImplemented, cv::format("DNN/ONNX/Attribute[%s]: 'Graph' is not supported", attribute_name.c_str()));
+ }
+ else if (attribute_proto.graphs_size() > 0)
+ {
+ CV_Error(Error::StsNotImplemented,
+ cv::format("DNN/ONNX/Attribute[%s]: 'Graphs' (%d) in attributes is not supported",
+ attribute_name.c_str(), attribute_proto.graphs_size())
+ );
+ }
+ else if (attribute_proto.strings_size() > 0)
+ {
+ std::string msg = cv::format("DNN/ONNX/Attribute[%s]: 'Strings' (%d) are not supported",
+ attribute_name.c_str(), attribute_proto.strings_size());
+ CV_LOG_ERROR(NULL, msg);
+ for (int i = 0; i < attribute_proto.strings_size(); i++)
+ {
+ CV_LOG_ERROR(NULL, " Attribute[" << attribute_name << "].string(" << i << ") = '" << attribute_proto.strings(i) << "'");
+ }
+ CV_Error(Error::StsNotImplemented, msg);
+ }
+ else if (attribute_proto.tensors_size() > 0)
{
- CV_Error(Error::StsNotImplemented, "Unexpected attribute type");
+ CV_Error(Error::StsNotImplemented,
+ cv::format("DNN/ONNX/Attribute[%s]: 'Tensors' (%d) in attributes are not supported",
+ attribute_name.c_str(), attribute_proto.tensors_size())
+ );
}
else
- CV_Error(Error::StsNotImplemented, "Unsupported attribute type");
+ {
+ CV_Error(Error::StsNotImplemented, cv::format("DNN/ONNX/Attribute[%s]: unsupported attribute format", attribute_name.c_str()));
+ }
}
return lp;
}
-Mat ONNXImporter::getBlob(const opencv_onnx::NodeProto& node_proto,
- const std::map<std::string, Mat>& constBlobs, int index)
+Mat ONNXImporter::getBlob(const opencv_onnx::NodeProto& node_proto, int index)
{
CV_Assert(index < node_proto.input_size());
- std::map<std::string, Mat>::const_iterator constBlob;
- constBlob = constBlobs.find(node_proto.input(index));
- if (constBlob == constBlobs.end()) {
- CV_Error(Error::StsObjectNotFound,
- "Blob " + node_proto.input(index) + " not found in const blobs");
+ const std::string& input_name = node_proto.input(index);
+ return getBlob(input_name);
+}
+
+Mat ONNXImporter::getBlob(const std::string& input_name)
+{
+ std::map<std::string, Mat>::const_iterator constBlob = constBlobs.find(input_name);
+ if (constBlob == constBlobs.end())
+ {
+ CV_Error(Error::StsBadArg, std::string("Blob ") + input_name + " not found in const blobs");
}
return constBlob->second;
}
-void ONNXImporter::addLayer(Net& dstNet, LayerParams& layerParams,
- const opencv_onnx::NodeProto& node_proto,
- std::map<std::string, LayerInfo>& layer_id,
- std::map<std::string, MatShape>& outShapes)
+void ONNXImporter::addLayer(LayerParams& layerParams,
+ const opencv_onnx::NodeProto& node_proto)
{
- std::map<std::string, LayerInfo>::iterator layerId;
- std::map<std::string, MatShape>::iterator shapeIt;
-
int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
for (int i = 0; i < node_proto.output_size(); ++i)
{
std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
int inpNum = 0;
- for (int j = 0; j < node_proto.input_size(); j++) {
- layerId = layer_id.find(node_proto.input(j));
+ for (int j = 0; j < node_proto.input_size(); j++)
+ {
+ const std::string& input_name = node_proto.input(j);
+ IterLayerId_t layerId = layer_id.find(input_name);
if (layerId != layer_id.end()) {
dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, inpNum);
++inpNum;
// Collect input shapes.
- shapeIt = outShapes.find(node_proto.input(j));
+ IterShape_t shapeIt = outShapes.find(input_name);
CV_Assert(shapeIt != outShapes.end());
layerInpShapes.push_back(shapeIt->second);
}
}
// Compute shape of output blob for this layer.
- Ptr<Layer> layer = dstNet.getLayer(id);
+ Ptr<Layer> layer = dstNet.getLayer(id); // FIXIT: avoid instantiation of layers during the import stage
layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes);
for (int i = 0; i < node_proto.output_size() && i < (int)layerOutShapes.size(); ++i)
{
}
}
-static void addConstant(const std::string& name,
- const Mat& blob,
- std::map<std::string, Mat>& constBlobs,
- std::map<std::string, MatShape>& outShapes)
+void ONNXImporter::addConstant(const std::string& name, const Mat& blob)
{
constBlobs.insert(std::make_pair(name, blob));
outShapes.insert(std::make_pair(name, shape(blob)));
}
-void ONNXImporter::populateNet(Net dstNet)
+void ONNXImporter::populateNet()
{
CV_Assert(model_proto.has_graph());
- opencv_onnx::GraphProto graph_proto = model_proto.graph();
+ graph_proto = model_proto.graph();
+
+ std::string framework_version;
+ if (model_proto.has_producer_name())
+ framework_name = model_proto.producer_name();
+ if (model_proto.has_producer_version())
+ framework_version = model_proto.producer_version();
+
+ CV_LOG_INFO(NULL, "DNN/ONNX: loading ONNX"
+ << (model_proto.has_ir_version() ? cv::format(" v%d", (int)model_proto.ir_version()) : cv::String())
+ << " model produced by '" << framework_name << "'"
+ << (framework_version.empty() ? cv::String() : cv::format(":%s", framework_version.c_str()))
+ << ". Number of nodes = " << graph_proto.node_size()
+ << ", inputs = " << graph_proto.input_size()
+ << ", outputs = " << graph_proto.output_size()
+ );
simplifySubgraphs(graph_proto);
- std::map<std::string, Mat> constBlobs = getGraphTensors(graph_proto);
- // List of internal blobs shapes.
- std::map<std::string, MatShape> outShapes;
+ const int layersSize = graph_proto.node_size();
+ CV_LOG_DEBUG(NULL, "DNN/ONNX: graph simplified to " << layersSize << " nodes");
+
+ constBlobs = getGraphTensors(graph_proto);
// Add all the inputs shapes. It includes as constant blobs as network's inputs shapes.
for (int i = 0; i < graph_proto.input_size(); ++i)
{
- opencv_onnx::ValueInfoProto valueInfoProto = graph_proto.input(i);
+ const opencv_onnx::ValueInfoProto& valueInfoProto = graph_proto.input(i);
+ CV_Assert(valueInfoProto.has_name());
CV_Assert(valueInfoProto.has_type());
opencv_onnx::TypeProto typeProto = valueInfoProto.type();
CV_Assert(typeProto.has_tensor_type());
outShapes[valueInfoProto.name()] = inpShape;
}
- std::string framework_name;
- if (model_proto.has_producer_name()) {
- framework_name = model_proto.producer_name();
- }
-
// create map with network inputs (without const blobs)
- std::map<std::string, LayerInfo> layer_id;
- std::map<std::string, LayerInfo>::iterator layerId;
- std::map<std::string, MatShape>::iterator shapeIt;
// fill map: push layer name, layer id and output id
std::vector<String> netInputs;
for (int j = 0; j < graph_proto.input_size(); j++)
}
dstNet.setInputsNames(netInputs);
- int layersSize = graph_proto.node_size();
- LayerParams layerParams;
- opencv_onnx::NodeProto node_proto;
-
for(int li = 0; li < layersSize; li++)
{
- node_proto = graph_proto.node(li);
- layerParams = getLayerParams(node_proto);
- CV_Assert(node_proto.output_size() >= 1);
- layerParams.name = node_proto.output(0);
+ const opencv_onnx::NodeProto& node_proto = graph_proto.node(li);
+ handleNode(node_proto);
+ }
- std::string layer_type = node_proto.op_type();
- layerParams.type = layer_type;
+ CV_LOG_DEBUG(NULL, "DNN/ONNX: import completed!");
+}
+
+void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
+{
+ opencv_onnx::NodeProto node_proto = node_proto_; // TODO FIXIT
+ CV_Assert(node_proto.output_size() >= 1);
+ std::string name = node_proto.output(0);
+ std::string layer_type = node_proto.op_type();
+ CV_LOG_DEBUG(NULL, "DNN/ONNX: processing node with " << node_proto.input_size() << " inputs and " << node_proto.output_size() << " outputs: "
+ << cv::format("[%s]:(%s)", layer_type.c_str(), name.c_str())
+ );
+
+ try
+ {
+ // FIXIT not all cases can be repacked into "LayerParams". Importer should handle such cases directly for each "layer_type"
+ LayerParams layerParams = getLayerParams(node_proto);
+
+ layerParams.name = name;
+ layerParams.type = layer_type;
if (layer_type == "MaxPool")
{
opencv_onnx::NodeProto proto;
proto.add_input(node_proto.input(0));
proto.add_output(reshapeLp.name);
- addLayer(dstNet, reshapeLp, proto, layer_id, outShapes);
+ addLayer(reshapeLp, proto);
LayerParams avgLp;
avgLp.name = layerParams.name + "/avg";
node_proto.set_input(0, reshapeLp.name);
node_proto.set_output(0, avgLp.name);
- addLayer(dstNet, avgLp, node_proto, layer_id, outShapes);
+ addLayer(avgLp, node_proto);
}
else
{
poolLp.set("kernel_size", DictValue::arrayInt(&kernel_size[0], kernel_size.size()));
node_proto.set_output(0, poolLp.name);
- addLayer(dstNet, poolLp, node_proto, layer_id, outShapes);
+ addLayer(poolLp, node_proto);
}
layerParams.type = "Reshape";
opencv_onnx::NodeProto proto;
proto.add_input(node_proto.input(0));
proto.add_output(reshapeLp.name);
- addLayer(dstNet, reshapeLp, proto, layer_id, outShapes);
+ addLayer(reshapeLp, proto);
LayerParams poolLp = layerParams;
poolLp.name = layerParams.name + "/pool";
node_proto.set_input(0, reshapeLp.name);
node_proto.set_output(0, poolLp.name);
- addLayer(dstNet, poolLp, node_proto, layer_id, outShapes);
+ addLayer(poolLp, node_proto);
layerParams.type = "Reshape";
int targetShape[] = {1};
for (int i = 1; i < inp_size; i++) {
CV_Assert(constBlobs.find(node_proto.input(i)) != constBlobs.end());
}
- Mat start_blob = getBlob(node_proto, constBlobs, 1);
- Mat end_blob = getBlob(node_proto, constBlobs, 2);
+ Mat start_blob = getBlob(node_proto, 1);
+ Mat end_blob = getBlob(node_proto, 2);
CV_Assert(start_blob.total() == end_blob.total());
if (inp_size > 3) {
- Mat axes_blob = getBlob(node_proto, constBlobs, 3);
+ Mat axes_blob = getBlob(node_proto, 3);
const int* axes = (int*)axes_blob.data;
for (int i = 1; i < axes_blob.total(); ++i) {
CV_Assert(axes[i - 1] == axes[i] - 1);
if (inp_size == 5) {
CV_Assert(constBlobs.find(node_proto.input(4)) != constBlobs.end());
- Mat step_blob = getBlob(node_proto, constBlobs, 4);
+ Mat step_blob = getBlob(node_proto, 4);
// Very strange application for Slice op with tensor reversing.
// We just workaround it for 2d constants.
start_blob.at<int>(0) == -1 && step_blob.at<int>(0) == -1 &&
end_blob.at<int>(0) == std::numeric_limits<int32_t>::min())
{
- Mat inp = getBlob(node_proto, constBlobs, 0);
+ Mat inp = getBlob(node_proto, 0);
if (inp.dims == 2)
{
Mat flipped;
flip(inp, flipped, 0);
- addConstant(layerParams.name, flipped, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, flipped);
+ return;
}
}
CV_CheckEQ(countNonZero(step_blob != 1), 0, "Slice layer only supports steps = 1");
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
{
- Mat inp = getBlob(node_proto, constBlobs, 0);
+ Mat inp = getBlob(node_proto, 0);
std::vector<Mat> inputs, sliced;
inputs.push_back(inp);
runLayer(layerParams, inputs, sliced);
CV_Assert(sliced.size() == 1);
- addConstant(layerParams.name, sliced[0], constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, sliced[0]);
+ return;
}
}
else if (layer_type == "Split")
bool is_const_1 = layer_id.find(node_proto.input(1)) == layer_id.end();
if (is_const_0 && is_const_1)
{
- Mat blob_0 = getBlob(node_proto, constBlobs, 0);
- Mat blob_1 = getBlob(node_proto, constBlobs, 1);
+ Mat blob_0 = getBlob(node_proto, 0);
+ Mat blob_1 = getBlob(node_proto, 1);
CV_Assert(blob_0.size == blob_1.size);
Mat output = isSub ? (blob_0 - blob_1) : (blob_0 + blob_1);
- addConstant(layerParams.name, output, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, output);
+ return;
}
else if (is_const_0 || is_const_1)
{
int const_blob_id = is_const_0 ? 0 : 1;
- Mat blob = getBlob(node_proto, constBlobs, const_blob_id);
+ Mat blob = getBlob(node_proto, const_blob_id);
int blob_total = blob.total();
if (blob_total == 1) {
layerParams.type = "Power";
//Create Power layer
int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
//Connect to input
- layerId = layer_id.find(node_proto.input(1));
+ IterLayerId_t layerId = layer_id.find(node_proto.input(1));
CV_Assert(layerId != layer_id.end());
dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
//Add shape
if (layer_id.find(node_proto.input(1)) != layer_id.end())
CV_Error(Error::StsNotImplemented, "Unsupported Pow op with variable power");
- Mat blob = getBlob(node_proto, constBlobs, 1);
+ Mat blob = getBlob(node_proto, 1);
if (blob.total() != 1)
CV_Error(Error::StsNotImplemented, "Pow op supports only scalar power");
{
CV_Assert(node_proto.input_size() == 0);
CV_Assert(layerParams.blobs.size() == 1);
- addConstant(layerParams.name, layerParams.blobs[0], constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, layerParams.blobs[0]);
+ return;
}
else if (layer_type == "LSTM")
{
// https://pytorch.org/docs/stable/nn.html#lstm
CV_Assert(node_proto.input_size() == 7);
- Mat Wx = getBlob(node_proto, constBlobs, 1);
- Mat Wh = getBlob(node_proto, constBlobs, 2);
- Mat b = getBlob(node_proto, constBlobs, 3);
- CV_CheckEQ(countNonZero(getBlob(node_proto, constBlobs, 5)), 0, "Unsupported non zero initial_h");
- CV_CheckEQ(countNonZero(getBlob(node_proto, constBlobs, 6)), 0, "Unsupported non zero initial_c");
+ Mat Wx = getBlob(node_proto, 1);
+ Mat Wh = getBlob(node_proto, 2);
+ Mat b = getBlob(node_proto, 3);
+ CV_CheckEQ(countNonZero(getBlob(node_proto, 5)), 0, "Unsupported non zero initial_h");
+ CV_CheckEQ(countNonZero(getBlob(node_proto, 6)), 0, "Unsupported non zero initial_c");
b = b.reshape(1, b.size[0]);
const int numHidden = lstmParams.get<int>("hidden_size");
lstmParams.set("bidirectional", lstmParams.get<String>("direction", "") == "bidirectional");
node_proto.set_output(0, lstmParams.name); // set different name so output shapes will be registered on that name
- addLayer(dstNet, lstmParams, node_proto, layer_id, outShapes);
+ addLayer(lstmParams, node_proto);
MatShape lstmShape = outShapes[node_proto.output(0)];
else if (layer_type == "PRelu")
{
layerParams.type = "PReLU";
- layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1));
+ layerParams.blobs.push_back(getBlob(node_proto, 1));
}
else if (layer_type == "LRN")
{
"Expected input, scale, bias");
layerParams.blobs.resize(4);
- layerParams.blobs[2] = getBlob(node_proto, constBlobs, 1); // weightData
- layerParams.blobs[3] = getBlob(node_proto, constBlobs, 2); // biasData
+ layerParams.blobs[2] = getBlob(node_proto, 1); // weightData
+ layerParams.blobs[3] = getBlob(node_proto, 2); // biasData
layerParams.set("has_bias", true);
layerParams.set("has_weight", true);
//Create MVN layer
int id = dstNet.addLayer(mvnParams.name, mvnParams.type, mvnParams);
//Connect to input
- layerId = layer_id.find(node_proto.input(0));
+ IterLayerId_t layerId = layer_id.find(node_proto.input(0));
CV_Assert(layerId != layer_id.end());
dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
//Add shape
replaceLayerParam(layerParams, "epsilon", "eps");
replaceLayerParam(layerParams, "spatial", "use_global_stats");
- Mat meanData = getBlob(node_proto, constBlobs, 3);
- Mat stdData = getBlob(node_proto, constBlobs, 4);
+ Mat meanData = getBlob(node_proto, 3);
+ Mat stdData = getBlob(node_proto, 4);
layerParams.blobs.push_back(meanData);
layerParams.blobs.push_back(stdData);
if (!node_proto.input(1).empty()) {
layerParams.set("has_weight", true);
- layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1)); // weightData
+ layerParams.blobs.push_back(getBlob(node_proto, 1)); // weightData
} else {
layerParams.set("has_weight", false);
}
if (!node_proto.input(2).empty()) {
layerParams.set("has_bias", true);
- layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 2)); // biasData
+ layerParams.blobs.push_back(getBlob(node_proto, 2)); // biasData
} else {
layerParams.set("has_bias", false);
}
{
CV_Assert(node_proto.input_size() >= 2);
layerParams.type = "InnerProduct";
- Mat weights = getBlob(node_proto, constBlobs, 1);
+ Mat weights = getBlob(node_proto, 1);
int ind_num_out = 0;
if (layerParams.has("transB") && !layerParams.get<int>("transB")) {
transpose(weights, weights);
layerParams.blobs.push_back(weights);
if (node_proto.input_size() == 3) {
- Mat bias = getBlob(node_proto, constBlobs, 2);
+ Mat bias = getBlob(node_proto, 2);
layerParams.blobs.push_back(bias);
}
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
{
- Mat inputBuf = getBlob(node_proto, constBlobs, 0);
+ Mat inputBuf = getBlob(node_proto, 0);
LayerParams constParams;
constParams.name = node_proto.input(0);
opencv_onnx::NodeProto proto;
proto.add_output(constParams.name);
- addLayer(dstNet, constParams, proto, layer_id, outShapes);
+ addLayer(constParams, proto);
}
layerParams.set("num_output", layerParams.blobs[0].size[ind_num_out]);
if (constBlobs.find(node_proto.input(1)) != constBlobs.end())
{
- Mat blob = getBlob(node_proto, constBlobs, 1);
+ Mat blob = getBlob(node_proto, 1);
secondInpDims = blob.dims;
layerParams.blobs.push_back(blob.t());
layerParams.set("num_output", layerParams.blobs[0].size[0]);
}
if (constId != -1 && haveVariables)
{
- Mat blob = getBlob(node_proto, constBlobs, constId);
+ Mat blob = getBlob(node_proto, constId);
blob = blob.reshape(1, 1);
if (blob.total() == 1) {
float coeff = isDiv ? 1.0 / blob.at<float>(0) : blob.at<float>(0);
//Create Power layer
int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
//Connect to input
- layerId = layer_id.find(node_proto.input(1));
+ IterLayerId_t layerId = layer_id.find(node_proto.input(1));
CV_Assert(layerId != layer_id.end());
dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
//Add shape
if (!haveVariables)
{
- Mat inp0 = getBlob(node_proto, constBlobs, 0);
- Mat inp1 = getBlob(node_proto, constBlobs, 1);
+ Mat inp0 = getBlob(node_proto, 0);
+ Mat inp1 = getBlob(node_proto, 1);
if (inp0.size != inp1.size && inp1.total() != 1)
CV_Error(Error::StsNotImplemented, "Constant multiply with different shapes");
Mat out = isDiv ? inp0 / inp1 : inp0.mul(inp1);
out = out.reshape(1, inp0.dims, inp0.size);
out.dims = inp0.dims; // to workaround dims == 1
- addConstant(layerParams.name, out, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, out);
+ return;
}
}
else if (layer_type == "Conv")
for (int j = 1; j < node_proto.input_size(); j++) {
if (constBlobs.find(node_proto.input(j)) != constBlobs.end())
{
- layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
+ layerParams.blobs.push_back(getBlob(node_proto, j));
}
}
int outCn = layerParams.blobs.empty() ? outShapes[node_proto.input(1)][0] : layerParams.blobs[0].size[0];
CV_Assert(node_proto.input_size() >= 2);
layerParams.type = "Deconvolution";
for (int j = 1; j < node_proto.input_size(); j++) {
- layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
+ layerParams.blobs.push_back(getBlob(node_proto, j));
}
layerParams.set("num_output", layerParams.blobs[0].size[1] * layerParams.get<int>("group", 1));
layerParams.set("bias_term", node_proto.input_size() == 3);
CV_Assert(node_proto.input_size() == 1);
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
{
- std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), transposed;
+ std::vector<Mat> inputs(1, getBlob(node_proto, 0)), transposed;
runLayer(layerParams, inputs, transposed);
CV_Assert(transposed.size() == 1);
- addConstant(layerParams.name, transposed[0], constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, transposed[0]);
+ return;
}
}
else if (layer_type == "Squeeze")
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
{
- Mat inp = getBlob(node_proto, constBlobs, 0);
+ Mat inp = getBlob(node_proto, 0);
Mat out = inp.reshape(1, outShape);
out.dims = outShape.size(); // to workaround dims == 1
- addConstant(layerParams.name, out, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, out);
+ return;
}
}
else if (layer_type == "Flatten")
CV_CheckEQ(node_proto.input_size(), 1, "");
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
{
- Mat input = getBlob(node_proto, constBlobs, 0);
+ Mat input = getBlob(node_proto, 0);
int axis = clamp(layerParams.get<int>("axis", 1), input.dims);
std::vector<int> out_size(&input.size[0], &input.size[0] + axis);
out_size.push_back(input.total(axis));
Mat output = input.reshape(1, out_size);
- addConstant(layerParams.name, output, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, output);
+ return;
}
}
else if (layer_type == "Unsqueeze")
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
{
// Constant input.
- Mat input = getBlob(node_proto, constBlobs, 0);
+ Mat input = getBlob(node_proto, 0);
std::vector<int> dims;
for (int j = 0; j < input.dims; j++) {
}
Mat out = input.reshape(0, dims);
- addConstant(layerParams.name, out, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, out);
+ return;
}
// Variable input.
else if (layer_type == "Expand")
{
CV_CheckEQ(node_proto.input_size(), 2, "");
- CV_Assert(constBlobs.find(node_proto.input(1)) != constBlobs.end());
- Mat newShapeMat = getBlob(node_proto, constBlobs, 1);
+ const std::string& input0 = node_proto.input(0);
+ const std::string& input1 = node_proto.input(1);
+ Mat newShapeMat = getBlob(input1);
MatShape targetShape(newShapeMat.ptr<int>(), newShapeMat.ptr<int>() + newShapeMat.total());
MatShape inpShape;
- bool haveVariables = constBlobs.find(node_proto.input(0)) == constBlobs.end();
+ bool haveVariables = constBlobs.find(input0) == constBlobs.end();
if (haveVariables)
{
- shapeIt = outShapes.find(node_proto.input(0));
+ IterShape_t shapeIt = outShapes.find(input0);
CV_Assert(shapeIt != outShapes.end());
inpShape = shapeIt->second;
}
else
{
- inpShape = shape(getBlob(node_proto, constBlobs, 0));
+ inpShape = shape(getBlob(input0));
}
- String srcName = node_proto.input(0);
+ String srcName = input0;
// Unsqueeze and repeat along new axis
if (targetShape.size() == inpShape.size() + 1)
{
opencv_onnx::NodeProto proto;
proto.add_input(node_proto.input(0));
proto.add_output(reshapeLp.name);
- addLayer(dstNet, reshapeLp, proto, layer_id, outShapes);
+ addLayer(reshapeLp, proto);
srcName = reshapeLp.name;
}
}
if (broadcast_axes.size() != 1)
CV_Error(Error::StsNotImplemented, "Expand op doesn't support multiple axes for constant input");
- Mat input = getBlob(node_proto, constBlobs, 0);
+ Mat input = getBlob(node_proto, 0);
input = input.reshape(0, total(inpShape, 0, broadcast_axes[0]));
Mat output = cv::repeat(input, 1, targetShape[broadcast_axes[0]]);
output = output.reshape(0, targetShape);
- addConstant(layerParams.name, output, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, output);
+ return;
}
if (broadcast_axes.size() == 2 &&
opencv_onnx::NodeProto proto;
proto.add_output(constParams.name);
- addLayer(dstNet, constParams, proto, layer_id, outShapes);
+ addLayer(constParams, proto);
layerParams.type = "Scale";
layerParams.set("bias_term", false);
node_proto.set_input(0, constParams.name);
- node_proto.set_input(1, shapeIt->first);
+ node_proto.set_input(1, srcName);
}
else if (broadcast_axes.size() == 1 && broadcast_axes[0] <= 1)
{
node_proto.set_input(0, srcName);
node_proto.set_output(0, copyLP.name);
- addLayer(dstNet, copyLP, node_proto, layer_id, outShapes);
+ addLayer(copyLP, node_proto);
}
node_proto.clear_input();
for (int i = 0; i < input_names.size(); i++)
CV_Assert(node_proto.input_size() == 2 || layerParams.has("shape"));
if (node_proto.input_size() == 2) {
- Mat blob = getBlob(node_proto, constBlobs, 1);
+ Mat blob = getBlob(node_proto, 1);
CV_Assert(blob.type() == CV_32SC1);
layerParams.set("dim", DictValue::arrayInt<int*>(
blob.ptr<int>(), blob.total() ));
if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
- std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), outputs;
+ std::vector<Mat> inputs(1, getBlob(node_proto, 0)), outputs;
runLayer(layerParams, inputs, outputs);
- addConstant(layerParams.name, outputs[0], constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, outputs[0]);
+ return;
}
}
else {
}
if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
- Mat input = getBlob(node_proto, constBlobs, 0);
+ Mat input = getBlob(node_proto, 0);
Mat out = input.reshape(0, dim);
- addConstant(layerParams.name, out, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, out);
+ return;
}
replaceLayerParam(layerParams, "shape", "dim");
}
{
// Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
// We need to shuffle it to begin0, end0, begin1, end1, ...
- Mat paddings = getBlob(node_proto, constBlobs, 1).reshape(1, 2);
+ Mat paddings = getBlob(node_proto, 1).reshape(1, 2);
paddings = paddings.t();
layerParams.set("paddings", DictValue::arrayInt(paddings.ptr<int>(), paddings.total()));
if (node_proto.input_size() == 3)
{
- Mat value = getBlob(node_proto, constBlobs, 2);
+ Mat value = getBlob(node_proto, 2);
layerParams.set("value", value.at<float>(0));
}
}
else if (layer_type == "Shape")
{
CV_Assert(node_proto.input_size() == 1);
- shapeIt = outShapes.find(node_proto.input(0));
+ IterShape_t shapeIt = outShapes.find(node_proto.input(0));
CV_Assert(shapeIt != outShapes.end());
- MatShape inpShape = shapeIt->second;
+ const MatShape& inpShape = shapeIt->second;
Mat shapeMat(inpShape.size(), 1, CV_32S);
for (int j = 0; j < inpShape.size(); ++j)
shapeMat.at<int>(j) = inpShape[j];
shapeMat.dims = 1;
- addConstant(layerParams.name, shapeMat, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, shapeMat);
+ return;
}
else if (layer_type == "Cast")
{
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
{
- Mat blob = getBlob(node_proto, constBlobs, 0);
+ Mat blob = getBlob(node_proto, 0);
int type;
switch (layerParams.get<int>("to"))
{
Mat dst;
blob.convertTo(dst, type);
dst.dims = blob.dims;
- addConstant(layerParams.name, dst, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, dst);
+ return;
}
else
layerParams.type = "Identity";
else
fill_value = layerParams.get("value", 0);
- MatShape inpShape = getBlob(node_proto, constBlobs, 0);
+ MatShape inpShape = getBlob(node_proto, 0);
for (int i = 0; i < inpShape.size(); i++)
CV_CheckGT(inpShape[i], 0, "");
Mat tensor(inpShape.size(), &inpShape[0], depth, Scalar(fill_value));
- addConstant(layerParams.name, tensor, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, tensor);
+ return;
}
else if (layer_type == "Gather")
{
CV_Assert(node_proto.input_size() == 2);
- Mat indexMat = getBlob(node_proto, constBlobs, 1);
+ Mat indexMat = getBlob(node_proto, 1);
CV_Assert_N(indexMat.type() == CV_32S, indexMat.total() == 1);
int index = indexMat.at<int>(0);
int axis = layerParams.get<int>("axis", 0);
if ((constBlobs.find(node_proto.input(0)) != constBlobs.end()))
{
- Mat input = getBlob(node_proto, constBlobs, 0);
+ Mat input = getBlob(node_proto, 0);
Mat out;
std::vector<cv::Range> ranges(input.dims, Range::all());
ranges[axis] = Range(index, index + 1);
} else {
out.dims = 1;
}
- addConstant(layerParams.name, out, constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, out);
+ return;
}
else
{
- shapeIt = outShapes.find(node_proto.input(0));
+ IterShape_t shapeIt = outShapes.find(node_proto.input(0));
CV_Assert(shapeIt != outShapes.end());
MatShape inpShape = shapeIt->second;
opencv_onnx::NodeProto proto;
proto.add_input(node_proto.input(0));
proto.add_output(sliceLp.name);
- addLayer(dstNet, sliceLp, proto, layer_id, outShapes);
+ addLayer(sliceLp, proto);
inpShape.erase(inpShape.begin() + axis);
layerParams.type = "Reshape";
std::vector<Mat> inputs(node_proto.input_size()), concatenated;
for (size_t i = 0; i < inputs.size(); ++i)
{
- inputs[i] = getBlob(node_proto, constBlobs, i);
+ inputs[i] = getBlob(node_proto, i);
}
runLayer(layerParams, inputs, concatenated);
CV_Assert(concatenated.size() == 1);
- addConstant(layerParams.name, concatenated[0], constBlobs, outShapes);
- continue;
+ addConstant(layerParams.name, concatenated[0]);
+ return;
}
}
else if (layer_type == "Resize")
CV_Assert_N(interp_mode != "tf_crop_and_resize", interp_mode != "tf_half_pixel_for_nn");
layerParams.set("align_corners", interp_mode == "align_corners");
- Mat shapes = getBlob(node_proto, constBlobs, node_proto.input_size() - 1);
+ Mat shapes = getBlob(node_proto, node_proto.input_size() - 1);
CV_CheckEQ(shapes.size[0], 4, "");
CV_CheckEQ(shapes.size[1], 1, "");
CV_CheckDepth(shapes.depth(), shapes.depth() == CV_32S || shapes.depth() == CV_32F, "");
int width = shapes.at<int>(3);
if (node_proto.input_size() == 3)
{
- shapeIt = outShapes.find(node_proto.input(0));
+ IterShape_t shapeIt = outShapes.find(node_proto.input(0));
CV_Assert(shapeIt != outShapes.end());
MatShape scales = shapeIt->second;
height *= scales[2];
else
{
// scales as input
- Mat scales = getBlob(node_proto, constBlobs, 1);
+ Mat scales = getBlob(node_proto, 1);
CV_Assert(scales.total() == 4);
layerParams.set("zoom_factor_y", scales.at<float>(2));
layerParams.set("zoom_factor_x", scales.at<float>(3));
CV_CheckEQ(node_proto.input_size(), 3, "");
if (constBlobs.find(node_proto.input(2)) != constBlobs.end())
{
- Mat priors = getBlob(node_proto, constBlobs, 2);
+ Mat priors = getBlob(node_proto, 2);
LayerParams constParams;
constParams.name = layerParams.name + "/priors";
opencv_onnx::NodeProto priorsProto;
priorsProto.add_output(constParams.name);
- addLayer(dstNet, constParams, priorsProto, layer_id, outShapes);
+ addLayer(constParams, priorsProto);
node_proto.set_input(2, constParams.name);
}
{
for (int j = 0; j < node_proto.input_size(); j++) {
if (layer_id.find(node_proto.input(j)) == layer_id.end())
- layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
+ layerParams.blobs.push_back(getBlob(node_proto, j));
}
}
- addLayer(dstNet, layerParams, node_proto, layer_id, outShapes);
+ addLayer(layerParams, node_proto);
+ }
+ catch (const cv::Exception& e)
+ {
+ CV_LOG_ERROR(NULL, "DNN/ONNX: ERROR during processing node with " << node_proto.input_size() << " inputs and " << node_proto.output_size() << " outputs: "
+ << cv::format("[%s]:(%s)", layer_type.c_str(), name.c_str())
+ );
+ for (int i = 0; i < node_proto.input_size(); i++)
+ {
+ CV_LOG_INFO(NULL, " Input[" << i << "] = '" << node_proto.input(i) << "'");
+ }
+ for (int i = 0; i < node_proto.output_size(); i++)
+ {
+ CV_LOG_INFO(NULL, " Output[" << i << "] = '" << node_proto.output(i) << "'");
+ }
+ CV_Error(Error::StsError, cv::format("Node [%s]:(%s) parse error: %s", layer_type.c_str(), name.c_str(), e.what()));
}
}
Net readNetFromONNX(const String& onnxFile)
{
- ONNXImporter onnxImporter(onnxFile.c_str());
Net net;
- onnxImporter.populateNet(net);
+ ONNXImporter onnxImporter(net, onnxFile.c_str());
return net;
}
Net readNetFromONNX(const char* buffer, size_t sizeBuffer)
{
- ONNXImporter onnxImporter(buffer, sizeBuffer);
Net net;
- onnxImporter.populateNet(net);
+ ONNXImporter onnxImporter(net, buffer, sizeBuffer);
return net;
}
Mat readTensorFromONNX(const String& path)
{
- opencv_onnx::TensorProto tensor_proto = opencv_onnx::TensorProto();
std::fstream input(path.c_str(), std::ios::in | std::ios::binary);
- if (!tensor_proto.ParseFromIstream(&input)) {
- CV_Error(Error::StsUnsupportedFormat, "Failed to parse data");
+ if (!input)
+ {
+ CV_Error(Error::StsBadArg, cv::format("Can't read ONNX file: %s", path.c_str()));
+ }
+
+ opencv_onnx::TensorProto tensor_proto = opencv_onnx::TensorProto();
+ if (!tensor_proto.ParseFromIstream(&input))
+ {
+ CV_Error(Error::StsUnsupportedFormat, cv::format("Failed to parse ONNX data: %s", path.c_str()));
}
Mat mat = getMatFromTensor(tensor_proto);
releaseONNXTensor(tensor_proto);