#define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2 "Myriad2"
/// Intel(R) Neural Compute Stick 2, NCS2 (USB 03e7:2485), MyriadX (https://software.intel.com/ru-ru/neural-compute-stick)
#define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X "MyriadX"
+#define CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE "ARM_COMPUTE"
+#define CV_DNN_INFERENCE_ENGINE_CPU_TYPE_X86 "X86"
/** @brief Returns Inference Engine VPU type.
*/
CV_EXPORTS_W cv::String getInferenceEngineVPUType();
+/** @brief Returns Inference Engine CPU type.
+ *
+ * Specify OpenVINO plugin: CPU or ARM.
+ */
+CV_EXPORTS_W cv::String getInferenceEngineCPUType();
CV__DNN_EXPERIMENTAL_NS_END
}} // namespace
CV_Assert(preferableBackend != DNN_BACKEND_HALIDE ||
preferableTarget == DNN_TARGET_CPU ||
preferableTarget == DNN_TARGET_OPENCL);
+#ifdef HAVE_INF_ENGINE
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
CV_Assert(
- preferableTarget == DNN_TARGET_CPU ||
+ (preferableTarget == DNN_TARGET_CPU && (!isArmComputePlugin() || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) ||
preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16 ||
preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_FPGA
);
}
+#endif
if (!netWasAllocated || this->blobsToKeep != blobsToKeep_)
{
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
return;
}
- bool supportsCPUFallback = preferableTarget == DNN_TARGET_CPU ||
- BackendRegistry::checkIETarget(DNN_TARGET_CPU);
+ bool supportsCPUFallback = !isArmComputePlugin() && (preferableTarget == DNN_TARGET_CPU ||
+ BackendRegistry::checkIETarget(DNN_TARGET_CPU));
// Build Inference Engine networks from sets of layers that support this
// backend. Split a whole model on several Inference Engine networks if
shape[1] = weights_.total();
auto weight = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), weights_.data);
auto bias = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), bias_.data);
+#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2021_2)
+ auto scale_node = std::make_shared<ngraph::op::v1::Multiply>(ieInpNode, weight, ngraph::op::AutoBroadcastType::NUMPY);
+#else
auto scale_node = std::make_shared<ngraph::op::v0::Multiply>(ieInpNode, weight, ngraph::op::AutoBroadcastType::NUMPY);
+#endif
auto scale_shift = std::make_shared<ngraph::op::v1::Add>(scale_node, bias, ngraph::op::AutoBroadcastType::NUMPY);
return Ptr<BackendNode>(new InfEngineNgraphNode(scale_shift));
}
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
- if (ksize == 1)
+ bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin();
+ if (isArmTarget && blobs.empty())
return false;
+ if (ksize == 1)
+ return isArmTarget;
if (ksize == 3)
- return preferableTarget == DNN_TARGET_CPU;
+ return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget;
if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableTarget != DNN_TARGET_MYRIAD) && blobs.empty())
return false;
return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
CV_Assert_N(inputs.size() >= 1, nodes.size() >= 1);
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> dims = ieInpNode->get_shape();
- CV_Assert(dims.size() == 4 || dims.size() == 5);
+ CV_Check(dims.size(), dims.size() >= 3 && dims.size() <= 5, "");
std::shared_ptr<ngraph::Node> ieWeights = nodes.size() > 1 ? nodes[1].dynamicCast<InfEngineNgraphNode>()->node : nullptr;
if (nodes.size() > 1)
CV_Assert(ieWeights); // dynamic_cast should not fail
else
{
auto shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
- ngraph::Shape{kernel_shape.size()}, kernel_shape.data());
+ ngraph::Shape{kernel_shape.size()}, std::vector<int64_t>(kernel_shape.begin(), kernel_shape.end()));
ieWeights = std::make_shared<ngraph::op::v1::Reshape>(ieWeights, shape, true);
}
if (nodes.size() == 3)
{
auto bias_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
- ngraph::Shape{shape.size()}, shape.data());
+ ngraph::Shape{shape.size()}, std::vector<int64_t>(shape.begin(), shape.end()));
bias = std::make_shared<ngraph::op::v1::Reshape>(nodes[2].dynamicCast<InfEngineNgraphNode>()->node, bias_shape, true);
}
else
ngraph::Shape{1}, &scale);
auto shift_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
ngraph::Shape{1}, &shift);
- auto power_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
- ngraph::Shape{1}, &power);
auto mul = std::make_shared<ngraph::op::v1::Multiply>(scale_node, node, ngraph::op::AutoBroadcastType::NUMPY);
auto scale_shift = std::make_shared<ngraph::op::v1::Add>(mul, shift_node, ngraph::op::AutoBroadcastType::NUMPY);
+
+ if (power == 1)
+ return scale_shift;
+
+ auto power_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+ ngraph::Shape{1}, &power);
return std::make_shared<ngraph::op::v1::Power>(scale_shift, power_node, ngraph::op::AutoBroadcastType::NUMPY);
}
#endif // HAVE_DNN_NGRAPH
if (!acrossSpatial) {
axes_data.push_back(1);
} else {
- axes_data.resize(ieInpNode->get_shape().size());
- std::iota(axes_data.begin(), axes_data.end(), 0);
+ axes_data.resize(ieInpNode->get_shape().size() - 1);
+ std::iota(axes_data.begin(), axes_data.end(), 1);
}
auto axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes_data.size()}, axes_data);
auto norm = std::make_shared<ngraph::op::NormalizeL2>(ieInpNode, axes, epsilon, ngraph::op::EpsMode::ADD);
std::vector<size_t> shape(ieInpNode->get_shape().size(), 1);
shape[0] = blobs.empty() ? 1 : batch;
shape[1] = numChannels;
- std::shared_ptr<ngraph::op::Constant> weight;
- if (blobs.empty())
+ if (!blobs.empty())
{
- std::vector<float> ones(numChannels, 1);
- weight = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), ones.data());
- }
- else
- {
- weight = std::make_shared<ngraph::op::Constant>(
+ auto weight = std::make_shared<ngraph::op::Constant>(
ngraph::element::f32, ngraph::Shape(shape), blobs[0].data);
+#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2021_2)
+ auto mul = std::make_shared<ngraph::op::v1::Multiply>(norm, weight, ngraph::op::AutoBroadcastType::NUMPY);
+#else
+ auto mul = std::make_shared<ngraph::op::v0::Multiply>(norm, weight, ngraph::op::AutoBroadcastType::NUMPY);
+#endif
+ return Ptr<BackendNode>(new InfEngineNgraphNode(mul));
}
- auto mul = std::make_shared<ngraph::op::v0::Multiply>(norm, weight, ngraph::op::AutoBroadcastType::NUMPY);
- return Ptr<BackendNode>(new InfEngineNgraphNode(mul));
+ return Ptr<BackendNode>(new InfEngineNgraphNode(norm));
}
#endif // HAVE_DNN_NGRAPH
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
- (preferableTarget != DNN_TARGET_MYRIAD ||
- (dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0));
+ {
+ if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && preferableTarget == DNN_TARGET_MYRIAD)
+ return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0;
+
+ return (dstRanges.size() <= 4 || !isArmComputePlugin());
+ }
#endif
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4);
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && preferableTarget == DNN_TARGET_CPU)
+ return _order.size() <= 4 || !isArmComputePlugin();
+#endif
return backendId == DNN_BACKEND_OPENCV ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
}
#endif
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
- return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1;
+#ifdef HAVE_DNN_NGRAPH
+ return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1 && (kernel_size.size() != 3 || !isArmComputePlugin());
+#endif
}
else if (backendId == DNN_BACKEND_OPENCV)
{
std::vector<int64_t> mask(anchors, 1);
region = std::make_shared<ngraph::op::RegionYolo>(tr_input, coords, classes, anchors, useSoftmax, mask, 1, 3, anchors_vec);
+ auto tr_shape = tr_input->get_shape();
auto shape_as_inp = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
- ngraph::Shape{tr_input->get_shape().size()}, tr_input->get_shape().data());
+ ngraph::Shape{tr_shape.size()},
+ std::vector<int64_t>(tr_shape.begin(), tr_shape.end()));
region = std::make_shared<ngraph::op::v1::Reshape>(region, shape_as_inp, true);
new_axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{4}, std::vector<int64_t>{0, 2, 3, 1});
result = std::make_shared<ngraph::op::Transpose>(result, tr_axes);
if (b > 1)
{
- std::vector<size_t> sizes = {(size_t)b, result->get_shape()[0] / b, result->get_shape()[1]};
+ std::vector<int64_t> sizes{b, static_cast<int64_t>(result->get_shape()[0]) / b, static_cast<int64_t>(result->get_shape()[1])};
auto shape_node = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{sizes.size()}, sizes.data());
result = std::make_shared<ngraph::op::v1::Reshape>(result, shape_node, true);
}
auto weight = blobs.empty() ? ieInpNode1 :
std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), blobs[0].data);
- node = std::make_shared<ngraph::op::v0::Multiply>(node, weight, ngraph::op::AutoBroadcastType::NUMPY);
+#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2021_2)
+ node = std::make_shared<ngraph::op::v1::Multiply>(node, weight, ngraph::op::AutoBroadcastType::NUMPY);
+#else
+ node = std::make_shared<ngraph::op::v0::Multiply>(node, weight, ngraph::op::AutoBroadcastType::NUMPY);
+#endif
}
if (hasBias || !hasWeights)
{
}
#endif
+static bool detectArmPlugin_()
+{
+ InferenceEngine::Core& ie = getCore("CPU");
+ const std::vector<std::string> devices = ie.GetAvailableDevices();
+ for (std::vector<std::string>::const_iterator i = devices.begin(); i != devices.end(); ++i)
+ {
+ if (i->find("CPU") != std::string::npos)
+ {
+ const std::string name = ie.GetMetric(*i, METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
+ CV_LOG_INFO(NULL, "CPU plugin: " << name);
+ return name.find("arm_compute::NEON") != std::string::npos;
+ }
+ }
+ return false;
+}
+
#if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
static bool detectMyriadX_()
{
return myriadX;
}
+bool isArmComputePlugin()
+{
+ static bool armPlugin = getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE;
+ return armPlugin;
+}
+
static std::string getInferenceEngineVPUType_()
{
static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_DNN_IE_VPU_TYPE", "");
return vpu_type;
}
+cv::String getInferenceEngineCPUType()
+{
+ static cv::String cpu_type = detectArmPlugin_() ?
+ CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE :
+ CV_DNN_INFERENCE_ENGINE_CPU_TYPE_X86;
+ return cpu_type;
+}
+
#else // HAVE_INF_ENGINE
cv::String getInferenceEngineBackendType()
{
CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support");
}
+
+cv::String getInferenceEngineCPUType()
+{
+ CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support");
+}
#endif // HAVE_INF_ENGINE
bool isMyriadX();
+bool isArmComputePlugin();
+
CV__DNN_EXPERIMENTAL_NS_END
InferenceEngine::Core& getCore(const std::string& id);
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2 "dnn_skip_ie_myriad2"
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X "dnn_skip_ie_myriadx"
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X
+#define CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU "dnn_skip_ie_arm_cpu"
#ifdef HAVE_INF_ENGINE
backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU &&
+ getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+
String basename = "conv_variable_wb";
Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
ASSERT_FALSE(net.empty());
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ if (target == DNN_TARGET_CPU && getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
}
String basename = "conv1d_variable_wb";
Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));