return p.getArrayResult();
}
- //! @}
- }} // namespaces cv / utils
+namespace fs {
+ CV_EXPORTS_W cv::String getCacheDirectoryForDownloads();
+} // namespace fs
++
+ //! @} // core_utils
+ } // namespace cv::utils
+
+ //! @cond IGNORED
+
+ CV_WRAP static inline
+ int setLogLevel(int level)
+ {
+ // NB: Binding generators doesn't work with enums properly yet, so we define separate overload here
+ return cv::utils::logging::setLogLevel((cv::utils::logging::LogLevel)level);
+ }
+
+ CV_WRAP static inline
+ int getLogLevel()
+ {
+ return cv::utils::logging::getLogLevel();
+ }
+
+ //! @endcond IGNORED
+
+ } // namespaces cv / utils
#endif // OPENCV_CORE_BINDINGS_UTILS_HPP
*/
CV_EXPORTS_W cv::String getInferenceEngineVPUType();
+ /** @brief Returns Inference Engine CPU type.
+ *
+ * Specify OpenVINO plugin: CPU or ARM.
+ */
+ CV_EXPORTS_W cv::String getInferenceEngineCPUType();
-CV__DNN_EXPERIMENTAL_NS_END
+/** @brief Release a HDDL plugin.
+ */
+CV_EXPORTS_W void releaseHDDLPlugin();
+
+
+CV__DNN_INLINE_NS_END
}} // namespace
#endif // OPENCV_DNN_UTILS_INF_ENGINE_HPP
preferableTarget == DNN_TARGET_FPGA
);
}
+ #endif
+ CV_Assert(preferableBackend != DNN_BACKEND_VKCOM ||
+ preferableTarget == DNN_TARGET_VULKAN);
+ CV_Assert(preferableBackend != DNN_BACKEND_CUDA ||
+ IS_DNN_CUDA_TARGET(preferableTarget));
if (!netWasAllocated || this->blobsToKeep != blobsToKeep_)
{
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
- if (ksize == 1)
+ bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin();
+ if (isArmTarget && blobs.empty())
return false;
+ if (ksize == 1)
+ return isArmTarget;
if (ksize == 3)
- return preferableTarget == DNN_TARGET_CPU;
+ return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget;
- if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableTarget != DNN_TARGET_MYRIAD) && blobs.empty())
+ bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
+ if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || !isMyriad) && blobs.empty())
return false;
- return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
+ return (!isMyriad || dilation.width == dilation.height);
}
#endif
if (backendId == DNN_BACKEND_OPENCV)
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
- if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && preferableTarget == DNN_TARGET_MYRIAD)
+ bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
- return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
- (!isMyriad ||
- (dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0));
++ if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && isMyriad)
+ return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0;
+
+ return (dstRanges.size() <= 4 || !isArmComputePlugin());
}
#endif
return backendId == DNN_BACKEND_OPENCV ||
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
+ #ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && preferableTarget == DNN_TARGET_CPU)
+ return _order.size() <= 4 || !isArmComputePlugin();
+ #endif
return backendId == DNN_BACKEND_OPENCV ||
- ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
+ backendId == DNN_BACKEND_CUDA ||
+ ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()) ||
+ (backendId == DNN_BACKEND_VKCOM && haveVulkan());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
}
#endif
+ static bool detectArmPlugin_()
+ {
+ InferenceEngine::Core& ie = getCore("CPU");
+ const std::vector<std::string> devices = ie.GetAvailableDevices();
+ for (std::vector<std::string>::const_iterator i = devices.begin(); i != devices.end(); ++i)
+ {
+ if (i->find("CPU") != std::string::npos)
+ {
+ const std::string name = ie.GetMetric(*i, METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
+ CV_LOG_INFO(NULL, "CPU plugin: " << name);
+ return name.find("arm_compute::NEON") != std::string::npos;
+ }
+ }
+ return false;
+ }
+
#if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
-static bool detectMyriadX_()
+static bool detectMyriadX_(std::string device)
{
AutoLock lock(getInitializationMutex());
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R3)
#ifdef HAVE_INF_ENGINE
bool isMyriadX()
{
- static bool myriadX = getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
- return myriadX;
+ static bool myriadX = getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
+ return myriadX;
}
+ bool isArmComputePlugin()
+ {
+ static bool armPlugin = getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE;
+ return armPlugin;
+ }
+
static std::string getInferenceEngineVPUType_()
{
static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_DNN_IE_VPU_TYPE", "");
bool isMyriadX();
-CV__DNN_EXPERIMENTAL_NS_END
+ bool isArmComputePlugin();
+
+CV__DNN_INLINE_NS_END
+
+
InferenceEngine::Core& getCore(const std::string& id);
template<typename T = size_t>
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2 "dnn_skip_ie_myriad2"
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X "dnn_skip_ie_myriadx"
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X
+ #define CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU "dnn_skip_ie_arm_cpu"
+#define CV_TEST_TAG_DNN_SKIP_VULKAN "dnn_skip_vulkan"
+
+#define CV_TEST_TAG_DNN_SKIP_CUDA "dnn_skip_cuda"
+#define CV_TEST_TAG_DNN_SKIP_CUDA_FP16 "dnn_skip_cuda_fp16"
+#define CV_TEST_TAG_DNN_SKIP_CUDA_FP32 "dnn_skip_cuda_fp32"
+
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_EQ(2018050000)
backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ if (backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
+ if (backend == DNN_BACKEND_VKCOM)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); // not supported
+
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU &&
+ getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+
String basename = "conv_variable_wb";
Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
ASSERT_FALSE(net.empty());