Merge remote-tracking branch 'upstream/3.4' into merge-3.4
authorAlexander Alekhin <alexander.a.alekhin@gmail.com>
Mon, 22 Mar 2021 12:05:23 +0000 (12:05 +0000)
committerAlexander Alekhin <alexander.a.alekhin@gmail.com>
Mon, 22 Mar 2021 12:05:23 +0000 (12:05 +0000)
16 files changed:
1  2 
modules/core/include/opencv2/core/bindings_utils.hpp
modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp
modules/dnn/src/dnn.cpp
modules/dnn/src/layers/convolution_layer.cpp
modules/dnn/src/layers/elementwise_layers.cpp
modules/dnn/src/layers/normalize_bbox_layer.cpp
modules/dnn/src/layers/padding_layer.cpp
modules/dnn/src/layers/permute_layer.cpp
modules/dnn/src/layers/pooling_layer.cpp
modules/dnn/src/layers/region_layer.cpp
modules/dnn/src/op_inf_engine.cpp
modules/dnn/src/op_inf_engine.hpp
modules/dnn/test/test_common.hpp
modules/dnn/test/test_onnx_importer.cpp
modules/features2d/include/opencv2/features2d.hpp
platforms/js/opencv_js.config.py

@@@ -144,10 -145,26 +145,30 @@@ AsyncArray testAsyncException(
      return p.getArrayResult();
  }
  
- //! @}
- }} // namespaces cv /  utils
 +namespace fs {
 +    CV_EXPORTS_W cv::String getCacheDirectoryForDownloads();
 +} // namespace fs
++
+ //! @}  // core_utils
+ }  // namespace cv::utils
+ //! @cond IGNORED
+ CV_WRAP static inline
+ int setLogLevel(int level)
+ {
+     // NB: Binding generators doesn't work with enums properly yet, so we define separate overload here
+     return cv::utils::logging::setLogLevel((cv::utils::logging::LogLevel)level);
+ }
+ CV_WRAP static inline
+ int getLogLevel()
+ {
+     return cv::utils::logging::getLogLevel();
+ }
+ //! @endcond IGNORED
+ } // namespaces cv /  utils
  
  #endif // OPENCV_CORE_BINDINGS_UTILS_HPP
@@@ -57,13 -59,13 +59,18 @@@ CV_EXPORTS_W void resetMyriadDevice()
   */
  CV_EXPORTS_W cv::String getInferenceEngineVPUType();
  
+ /** @brief Returns Inference Engine CPU type.
+  *
+  * Specify OpenVINO plugin: CPU or ARM.
+  */
+ CV_EXPORTS_W cv::String getInferenceEngineCPUType();
  
 -CV__DNN_EXPERIMENTAL_NS_END
 +/** @brief Release a HDDL plugin.
 + */
 +CV_EXPORTS_W void releaseHDDLPlugin();
 +
 +
 +CV__DNN_INLINE_NS_END
  }} // namespace
  
  #endif // OPENCV_DNN_UTILS_INF_ENGINE_HPP
@@@ -1394,10 -1298,7 +1395,11 @@@ struct Net::Impl : public detail::NetIm
                    preferableTarget == DNN_TARGET_FPGA
              );
          }
+ #endif
 +        CV_Assert(preferableBackend != DNN_BACKEND_VKCOM ||
 +                  preferableTarget == DNN_TARGET_VULKAN);
 +        CV_Assert(preferableBackend != DNN_BACKEND_CUDA ||
 +                  IS_DNN_CUDA_TARGET(preferableTarget));
          if (!netWasAllocated || this->blobsToKeep != blobsToKeep_)
          {
              if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
@@@ -324,14 -273,16 +324,17 @@@ public
  #ifdef HAVE_INF_ENGINE
          if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
          {
-             if (ksize == 1)
+             bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin();
+             if (isArmTarget && blobs.empty())
                  return false;
+             if (ksize == 1)
+                 return isArmTarget;
              if (ksize == 3)
-                 return preferableTarget == DNN_TARGET_CPU;
+                 return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget;
 -            if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableTarget != DNN_TARGET_MYRIAD) && blobs.empty())
 +            bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
 +            if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || !isMyriad) && blobs.empty())
                  return false;
 -            return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
 +            return (!isMyriad || dilation.width == dilation.height);
          }
  #endif
          if (backendId == DNN_BACKEND_OPENCV)
@@@ -104,10 -98,10 +104,11 @@@ public
  #ifdef HAVE_INF_ENGINE
          if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
          {
 -            if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && preferableTarget == DNN_TARGET_MYRIAD)
 +            bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
-             return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
-                    (!isMyriad ||
-                     (dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0));
++            if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && isMyriad)
+                 return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0;
+             return (dstRanges.size() <= 4 || !isArmComputePlugin());
          }
  #endif
          return backendId == DNN_BACKEND_OPENCV ||
@@@ -113,10 -105,12 +113,14 @@@ public
  
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
+ #ifdef HAVE_INF_ENGINE
+         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && preferableTarget == DNN_TARGET_CPU)
+             return _order.size() <= 4 || !isArmComputePlugin();
+ #endif
          return backendId == DNN_BACKEND_OPENCV ||
 -               ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
 +               backendId == DNN_BACKEND_CUDA ||
 +               ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()) ||
 +               (backendId == DNN_BACKEND_VKCOM && haveVulkan());
      }
  
      bool getMemoryShapes(const std::vector<MatShape> &inputs,
@@@ -655,8 -651,24 +655,24 @@@ InferenceEngine::Core& getCore(const st
  }
  #endif
  
+ static bool detectArmPlugin_()
+ {
+     InferenceEngine::Core& ie = getCore("CPU");
+     const std::vector<std::string> devices = ie.GetAvailableDevices();
+     for (std::vector<std::string>::const_iterator i = devices.begin(); i != devices.end(); ++i)
+     {
+         if (i->find("CPU") != std::string::npos)
+         {
+             const std::string name = ie.GetMetric(*i, METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
+             CV_LOG_INFO(NULL, "CPU plugin: " << name);
+             return name.find("arm_compute::NEON") != std::string::npos;
+         }
+     }
+     return false;
+ }
  #if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
 -static bool detectMyriadX_()
 +static bool detectMyriadX_(std::string device)
  {
      AutoLock lock(getInitializationMutex());
  #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R3)
@@@ -1181,10 -1174,16 +1197,16 @@@ void releaseHDDLPlugin(
  #ifdef HAVE_INF_ENGINE
  bool isMyriadX()
  {
 -     static bool myriadX = getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
 -     return myriadX;
 +    static bool myriadX = getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
 +    return myriadX;
  }
  
+ bool isArmComputePlugin()
+ {
+     static bool armPlugin = getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE;
+     return armPlugin;
+ }
  static std::string getInferenceEngineVPUType_()
  {
      static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_DNN_IE_VPU_TYPE", "");
@@@ -255,8 -254,10 +255,11 @@@ CV__DNN_INLINE_NS_BEGI
  
  bool isMyriadX();
  
 -CV__DNN_EXPERIMENTAL_NS_END
+ bool isArmComputePlugin();
 +CV__DNN_INLINE_NS_END
 +
  InferenceEngine::Core& getCore(const std::string& id);
  
  template<typename T = size_t>
  #define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2         "dnn_skip_ie_myriad2"
  #define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X         "dnn_skip_ie_myriadx"
  #define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD           CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X
+ #define CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU          "dnn_skip_ie_arm_cpu"
  
 +#define CV_TEST_TAG_DNN_SKIP_VULKAN              "dnn_skip_vulkan"
 +
 +#define CV_TEST_TAG_DNN_SKIP_CUDA                "dnn_skip_cuda"
 +#define CV_TEST_TAG_DNN_SKIP_CUDA_FP16           "dnn_skip_cuda_fp16"
 +#define CV_TEST_TAG_DNN_SKIP_CUDA_FP32           "dnn_skip_cuda_fp32"
 +
  
  #ifdef HAVE_INF_ENGINE
  #if INF_ENGINE_VER_MAJOR_EQ(2018050000)
@@@ -151,11 -144,10 +151,15 @@@ TEST_P(Test_ONNX_layers, Convolution_va
           backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD)
          applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
  
 +    if (backend == DNN_BACKEND_CUDA)
 +        applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
 +    if (backend == DNN_BACKEND_VKCOM)
 +        applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); // not supported
 +
+     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU &&
+         getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
      String basename = "conv_variable_wb";
      Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
      ASSERT_FALSE(net.empty());
Simple merge