Merge remote-tracking branch 'upstream/3.4' into merge-3.4
authorAlexander Alekhin <alexander.a.alekhin@gmail.com>
Sat, 14 Aug 2021 18:24:00 +0000 (18:24 +0000)
committerAlexander Alekhin <alexander.a.alekhin@gmail.com>
Sat, 14 Aug 2021 18:24:00 +0000 (18:24 +0000)
1  2 
modules/dnn/src/layers/detection_output_layer.cpp
modules/dnn/src/layers/recurrent_layers.cpp
modules/dnn/src/onnx/onnx_importer.cpp
modules/dnn/src/tensorflow/tf_importer.cpp
modules/dnn/test/test_onnx_importer.cpp
modules/dnn/test/test_tf_importer.cpp
modules/imgproc/include/opencv2/imgproc.hpp
modules/imgproc/test/test_convhull.cpp

@@@ -262,132 -256,122 +266,136 @@@ LayerParams ONNXImporter::getLayerParam
          opencv_onnx::AttributeProto attribute_proto = node_proto.attribute(i);
          std::string attribute_name = attribute_proto.name();
  
 -        if(attribute_name == "kernel_shape")
 -        {
 -            CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
 -            lp.set("kernel_size", parse(attribute_proto.ints()));
 -        }
 -        else if(attribute_name == "strides")
 +        try
          {
 -            CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
 -            lp.set("stride", parse(attribute_proto.ints()));
 -        }
 -        else if(attribute_name == "pads")
 -        {
 -            if (node_proto.op_type() == "Pad")
 +            if(attribute_name == "kernel_shape")
 +            {
 +                CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
 +                lp.set("kernel_size", parse(attribute_proto.ints()));
 +            }
 +            else if(attribute_name == "strides")
              {
 -                // Padding layer.
 -                // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
 -                // We need to shuffle it to begin0, end0, begin1, end1, ...
 -                CV_Assert(attribute_proto.ints_size() % 2 == 0);
 -                const int dims = attribute_proto.ints_size() / 2;
 -                std::vector<int32_t> paddings;
 -                paddings.reserve(attribute_proto.ints_size());
 -                for (int i = 0; i < dims; ++i)
 +                CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
 +                lp.set("stride", parse(attribute_proto.ints()));
 +            }
 +            else if(attribute_name == "pads")
 +            {
 +                if (node_proto.op_type() == "Pad")
 +                {
 +                    // Padding layer.
 +                    // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
 +                    // We need to shuffle it to begin0, end0, begin1, end1, ...
 +                    CV_Assert(attribute_proto.ints_size() % 2 == 0);
 +                    const int dims = attribute_proto.ints_size() / 2;
 +                    std::vector<int32_t> paddings;
 +                    paddings.reserve(attribute_proto.ints_size());
 +                    for (int i = 0; i < dims; ++i)
 +                    {
 +                        paddings.push_back(attribute_proto.ints(i));
 +                        paddings.push_back(attribute_proto.ints(dims + i));
 +                    }
 +                    lp.set("paddings", DictValue::arrayInt(&paddings[0], paddings.size()));
 +                }
 +                else
                  {
 -                    paddings.push_back(attribute_proto.ints(i));
 -                    paddings.push_back(attribute_proto.ints(dims + i));
 +                    // Convolution or pooling.
 +                    CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6);
 +                    lp.set("pad", parse(attribute_proto.ints()));
                  }
 -                lp.set("paddings", DictValue::arrayInt(&paddings[0], paddings.size()));
              }
 -            else
 +            else if(attribute_name == "auto_pad")
              {
 -                // Convolution or pooling.
 -                CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6);
 -                lp.set("pad", parse(attribute_proto.ints()));
 +                if (attribute_proto.s() == "SAME_UPPER" || attribute_proto.s() == "SAME_LOWER") {
 +                    lp.set("pad_mode",  "SAME");
 +                }
 +                else if (attribute_proto.s() == "VALID") {
 +                    lp.set("pad_mode", "VALID");
 +                }
              }
 -        }
 -        else if(attribute_name == "auto_pad")
 -        {
 -            if (attribute_proto.s() == "SAME_UPPER" || attribute_proto.s() == "SAME_LOWER") {
 -                lp.set("pad_mode",  "SAME");
 +            else if(attribute_name == "dilations")
 +            {
 +                CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
 +                lp.set("dilation", parse(attribute_proto.ints()));
              }
 -            else if (attribute_proto.s() == "VALID") {
 -                lp.set("pad_mode", "VALID");
++            else if(attribute_name == "activations" && node_proto.op_type() == "LSTM")
++            {
++                lp.set(attribute_name, parseStr(attribute_proto.strings()));
++            }
 +            else if (attribute_proto.has_i())
 +            {
 +                ::google::protobuf::int64 src = attribute_proto.i();
 +                if (src < std::numeric_limits<int32_t>::min() || src > std::numeric_limits<int32_t>::max())
 +                    CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
 +                else
 +                    lp.set(attribute_name, saturate_cast<int32_t>(src));
 +            }
 +            else if (attribute_proto.has_f())
 +            {
 +                lp.set(attribute_name, attribute_proto.f());
 +            }
 +            else if (attribute_proto.has_s())
 +            {
 +                lp.set(attribute_name, attribute_proto.s());
 +            }
 +            else if (attribute_proto.floats_size() > 0)
 +            {
 +                lp.set(attribute_name, DictValue::arrayReal(
 +                    attribute_proto.floats().data(), attribute_proto.floats_size()));
 +            }
 +            else if (attribute_proto.ints_size() > 0)
 +            {
 +                lp.set(attribute_name, parse(attribute_proto.ints()));
 +            }
 +            else if (attribute_proto.has_t())
 +            {
 +                opencv_onnx::TensorProto tensor = attribute_proto.t();
 +                Mat blob = getMatFromTensor(tensor);
 +                lp.blobs.push_back(blob);
 +            }
 +            else if (attribute_proto.has_g())
 +            {
 +                CV_Error(Error::StsNotImplemented, cv::format("DNN/ONNX/Attribute[%s]: 'Graph' is not supported", attribute_name.c_str()));
 +            }
 +            else if (attribute_proto.graphs_size() > 0)
 +            {
 +                CV_Error(Error::StsNotImplemented,
 +                        cv::format("DNN/ONNX/Attribute[%s]: 'Graphs' (%d) in attributes is not supported",
 +                                attribute_name.c_str(), attribute_proto.graphs_size())
 +                );
 +            }
 +            else if (attribute_proto.strings_size() > 0)
 +            {
 +                std::string msg = cv::format("DNN/ONNX/Attribute[%s]: 'Strings' (%d) are not supported",
 +                        attribute_name.c_str(), attribute_proto.strings_size());
 +                CV_LOG_ERROR(NULL, msg);
 +                for (int i = 0; i < attribute_proto.strings_size(); i++)
 +                {
 +                    CV_LOG_ERROR(NULL, "    Attribute[" << attribute_name << "].string(" << i << ") = '" << attribute_proto.strings(i) << "'");
 +                }
 +                CV_Error(Error::StsNotImplemented, msg);
 +            }
 +            else if (attribute_proto.tensors_size() > 0)
 +            {
 +                CV_Error(Error::StsNotImplemented,
 +                        cv::format("DNN/ONNX/Attribute[%s]: 'Tensors' (%d) in attributes are not supported",
 +                                attribute_name.c_str(), attribute_proto.tensors_size())
 +                );
              }
 -        }
 -        else if(attribute_name == "dilations")
 -        {
 -            CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
 -            lp.set("dilation", parse(attribute_proto.ints()));
 -        }
 -        else if(attribute_name == "activations" && node_proto.op_type() == "LSTM")
 -        {
 -            lp.set(attribute_name, parseStr(attribute_proto.strings()));
 -        }
 -        else if (attribute_proto.has_i())
 -        {
 -            ::google::protobuf::int64 src = attribute_proto.i();
 -            if (src < std::numeric_limits<int32_t>::min() || src > std::numeric_limits<int32_t>::max())
 -                CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
              else
 -                lp.set(attribute_name, saturate_cast<int32_t>(src));
 -        }
 -        else if (attribute_proto.has_f())
 -        {
 -            lp.set(attribute_name, attribute_proto.f());
 -        }
 -        else if (attribute_proto.has_s())
 -        {
 -            lp.set(attribute_name, attribute_proto.s());
 -        }
 -        else if (attribute_proto.floats_size() > 0)
 -        {
 -            lp.set(attribute_name, DictValue::arrayReal(
 -                attribute_proto.floats().data(), attribute_proto.floats_size()));
 -        }
 -        else if (attribute_proto.ints_size() > 0)
 -        {
 -            lp.set(attribute_name, parse(attribute_proto.ints()));
 -        }
 -        else if (attribute_proto.has_t())
 -        {
 -            opencv_onnx::TensorProto tensor = attribute_proto.t();
 -            Mat blob = getMatFromTensor(tensor);
 -            lp.blobs.push_back(blob);
 -        }
 -        else if (attribute_proto.has_g())
 -        {
 -            CV_Error(Error::StsNotImplemented, cv::format("DNN/ONNX/Attribute[%s]: 'Graph' is not supported", attribute_name.c_str()));
 -        }
 -        else if (attribute_proto.graphs_size() > 0)
 -        {
 -            CV_Error(Error::StsNotImplemented,
 -                    cv::format("DNN/ONNX/Attribute[%s]: 'Graphs' (%d) in attributes is not supported",
 -                            attribute_name.c_str(), attribute_proto.graphs_size())
 -            );
 -        }
 -        else if (attribute_proto.strings_size() > 0)
 -        {
 -            std::string msg = cv::format("DNN/ONNX/Attribute[%s]: 'Strings' (%d) are not supported",
 -                    attribute_name.c_str(), attribute_proto.strings_size());
 -            CV_LOG_ERROR(NULL, msg);
 -            for (int i = 0; i < attribute_proto.strings_size(); i++)
              {
 -                CV_LOG_ERROR(NULL, "    Attribute[" << attribute_name << "].string(" << i << ") = '" << attribute_proto.strings(i) << "'");
 +                CV_Error(Error::StsNotImplemented, cv::format("DNN/ONNX/Attribute[%s]: unsupported attribute format", attribute_name.c_str()));
              }
 -            CV_Error(Error::StsNotImplemented, msg);
          }
 -        else if (attribute_proto.tensors_size() > 0)
 +        catch (const cv::Exception& e)
          {
 -            CV_Error(Error::StsNotImplemented,
 -                    cv::format("DNN/ONNX/Attribute[%s]: 'Tensors' (%d) in attributes are not supported",
 -                            attribute_name.c_str(), attribute_proto.tensors_size())
 -            );
 -        }
 -        else
 -        {
 -            CV_Error(Error::StsNotImplemented, cv::format("DNN/ONNX/Attribute[%s]: unsupported attribute format", attribute_name.c_str()));
 +            CV_UNUSED(e);
 +            if (DNN_DIAGNOSTICS_RUN)
 +            {
 +                CV_LOG_ERROR(NULL, "DNN/ONNX: Potential problem with processing attributes for node " << node_proto.name() << " Attribute " << attribute_name.c_str()
 +                );
 +                continue;
 +            }
 +            throw;
          }
      }
      return lp;
Simple merge