set(dnn_runtime_libs "")
if(INF_ENGINE_TARGET)
+ ocv_option(OPENCV_DNN_IE_NN_BUILDER_2019 "Build with Inference Engine NN Builder API support" ON)
+ if(OPENCV_DNN_IE_NN_BUILDER_2019)
+ message(STATUS "DNN: Enabling Inference Engine NN Builder API support")
+ add_definitions(-DHAVE_DNN_IE_NN_BUILDER_2019=1)
+ endif()
list(APPEND dnn_runtime_libs ${INF_ENGINE_TARGET})
endif()
if(HAVE_NGRAPH)
+ message(STATUS "DNN: Enabling Inference Engine nGraph API support")
add_definitions(-DHAVE_DNN_NGRAPH)
list(APPEND dnn_runtime_libs ngraph::ngraph)
endif()
#ifdef HAVE_INF_ENGINE
if (checkIETarget(DNN_TARGET_CPU)) {
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_CPU));
+#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_CPU));
#endif
}
if (checkIETarget(DNN_TARGET_MYRIAD)) {
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_MYRIAD));
+#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_MYRIAD));
#endif
}
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (checkIETarget(DNN_TARGET_FPGA))
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_FPGA));
+#endif
#ifdef HAVE_OPENCL
if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
{
if (checkIETarget(DNN_TARGET_OPENCL)) {
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL));
+#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL));
#endif
}
if (checkIETarget(DNN_TARGET_OPENCL_FP16)) {
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL_FP16));
+#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL_FP16));
#endif
}
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
CV_CheckEQ(inputsData.size(), (size_t)1, "");
addConstantData("biases", biases, ieLayer);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
std::vector<String> outNames;
std::vector<MatShape> shapes;
}
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
return Ptr<BackendWrapper>(new InfEngineBackendWrapper(targetId, m));
#else
- CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine API support");
+ CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
#endif
}
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
initHalideBackend();
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
initInfEngineBackend(blobsToKeep_);
#else
- CV_Assert(false && "This OpenCV version is built without Inference Engine API support");
+ CV_Assert(false && "This OpenCV version is built without Inference Engine NN Builder API support");
#endif
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
}
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
// Before launching Inference Engine graph we need to specify output blobs.
// This function requests output blobs based on inputs references of
// layers from default backend or layers from different graphs.
}
}
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
Ptr<InfEngineBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<InfEngineBackendWrapper>();
return std::move(wrapper->futureMat);
+#else
+ CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
+#endif
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
else
#endif
{
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
Ptr<InfEngineBackendNode> backendNodeNN(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
backendNodeNN->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
backendNode = backendNodeNN;
+#else
+ CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
+#endif
}
for (auto& it : ieNet.getOutputsInfo())
else
#endif
{
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
Ptr<Layer> cvLayer(new InfEngineBackendLayer(ieNet));
InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(it.first.c_str());
ld.layerInstance = cvLayer;
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019] = backendNode;
+#else
+ CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
+#endif
}
for (int i = 0; i < inputsNames.size(); ++i)
// For networks with input layer which has an empty name, IE generates a name id[some_number].
// OpenCV lets users use an empty input name and to prevent unexpected naming,
// we can use some predefined name.
-static std::string kDefaultInpLayerName = "empty_inp_layer_name";
-static constexpr const char* kOpenCVLayersType = "OpenCVLayer";
+static std::string kDefaultInpLayerName = "opencv_ngraph_empty_inp_layer_name";
+static constexpr const char* kOpenCVLayersType = "opencv_ngraph_layer";
static std::string shapesToStr(const std::vector<Mat>& mats)
{
return type_info;
}
- NgraphCustomOp() {};
NgraphCustomOp(const ngraph::NodeVector& inputs,
const std::map<std::string, InferenceEngine::Parameter>& params = {}):
Op(inputs), params(params)
constructor_validate_and_infer_types();
}
+ ~NgraphCustomOp()
+ {
+ // nothing
+ }
+
void validate_and_infer_types() override
{
std::vector<std::vector<size_t> > shapes;
std::map<std::string, InferenceEngine::Parameter> params;
};
+
+class InfEngineNgraphCustomLayer : public InferenceEngine::ILayerExecImpl
+{
+public:
+ explicit InfEngineNgraphCustomLayer(const InferenceEngine::CNNLayer& layer) : cnnLayer(layer)
+ {
+ std::istringstream iss(layer.GetParamAsString("impl"));
+ size_t ptr;
+ iss >> ptr;
+ cvLayer = (Layer*)ptr;
+
+ std::vector<std::vector<size_t> > shapes;
+ strToShapes(layer.GetParamAsString("internals"), shapes);
+ internals.resize(shapes.size());
+ for (int i = 0; i < shapes.size(); ++i)
+ internals[i].create(std::vector<int>(shapes[i].begin(), shapes[i].end()), CV_32F);
+ }
+
+ ~InfEngineNgraphCustomLayer()
+ {
+ // nothing
+ }
+
+ virtual InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs,
+ std::vector<InferenceEngine::Blob::Ptr>& outputs,
+ InferenceEngine::ResponseDesc *resp) noexcept
+ {
+ std::vector<Mat> inpMats, outMats;
+ infEngineBlobsToMats(inputs, inpMats);
+ infEngineBlobsToMats(outputs, outMats);
+
+ try
+ {
+ cvLayer->forward(inpMats, outMats, internals);
+ return InferenceEngine::StatusCode::OK;
+ }
+ catch (...)
+ {
+ return InferenceEngine::StatusCode::GENERAL_ERROR;
+ }
+ }
+
+ virtual InferenceEngine::StatusCode
+ getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
+ InferenceEngine::ResponseDesc* resp) noexcept
+ {
+ std::vector<InferenceEngine::DataConfig> inDataConfig;
+ std::vector<InferenceEngine::DataConfig> outDataConfig;
+ for (auto& it : cnnLayer.insData)
+ {
+ InferenceEngine::DataConfig conf;
+ conf.desc = it.lock()->getTensorDesc();
+ inDataConfig.push_back(conf);
+ }
+
+ for (auto& it : cnnLayer.outData)
+ {
+ InferenceEngine::DataConfig conf;
+ conf.desc = it->getTensorDesc();
+ outDataConfig.push_back(conf);
+ }
+
+ InferenceEngine::LayerConfig layerConfig;
+ layerConfig.inConfs = inDataConfig;
+ layerConfig.outConfs = outDataConfig;
+
+ conf.push_back(layerConfig);
+ return InferenceEngine::StatusCode::OK;
+ }
+
+ InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config,
+ InferenceEngine::ResponseDesc *resp) noexcept
+ {
+ return InferenceEngine::StatusCode::OK;
+ }
+
+private:
+ InferenceEngine::CNNLayer cnnLayer;
+ dnn::Layer* cvLayer;
+ std::vector<Mat> internals;
+};
+
+
+class InfEngineNgraphCustomLayerFactory : public InferenceEngine::ILayerImplFactory {
+public:
+ explicit InfEngineNgraphCustomLayerFactory(const InferenceEngine::CNNLayer* layer) : cnnLayer(*layer)
+ {
+ // nothing
+ }
+
+ InferenceEngine::StatusCode
+ getImplementations(std::vector<InferenceEngine::ILayerImpl::Ptr>& impls,
+ InferenceEngine::ResponseDesc* resp) noexcept override
+ {
+ impls.push_back(std::make_shared<InfEngineNgraphCustomLayer>(cnnLayer));
+ return InferenceEngine::StatusCode::OK;
+ }
+
+private:
+ InferenceEngine::CNNLayer cnnLayer;
+};
+
+
+class InfEngineNgraphExtension : public InferenceEngine::IExtension
+{
+public:
+ virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
+ virtual void Unload() noexcept {}
+ virtual void Release() noexcept {}
+ virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}
+
+ virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
+ InferenceEngine::ResponseDesc*) noexcept
+ {
+ return InferenceEngine::StatusCode::OK;
+ }
+
+ InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory,
+ const InferenceEngine::CNNLayer* cnnLayer,
+ InferenceEngine::ResponseDesc* resp) noexcept
+ {
+ if (cnnLayer->type != kOpenCVLayersType)
+ return InferenceEngine::StatusCode::NOT_IMPLEMENTED;
+ factory = new InfEngineNgraphCustomLayerFactory(cnnLayer);
+ return InferenceEngine::StatusCode::OK;
+ }
+};
+
+
+
InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) {}
// OpenCV fallbacks as extensions.
try
{
- ie.AddExtension(std::make_shared<InfEngineExtension>(), "CPU");
+ ie.AddExtension(std::make_shared<InfEngineNgraphExtension>(), "CPU");
}
catch(const std::exception& e)
{
- CV_LOG_INFO(NULL, "DNN-IE: Can't register OpenCV custom layers extension: " << e.what());
+ CV_LOG_INFO(NULL, "DNN-IE: Can't register OpenCV custom layers nGraph extension: " << e.what());
}
#ifndef _WIN32
// Limit the number of CPU threads.
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
}
#endif
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
return Ptr<BackendNode>();
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
blobs[0].copyTo(outputs[0]);
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::ConstLayer ieLayer(name);
ieLayer.setData(wrapToInfEngineBlob(blobs[0]));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
blobs[0].data);
return Ptr<BackendNode>(new InfEngineNgraphNode(node));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_CUDA
Ptr<BackendNode> initCUDA(
return Ptr<BackendNode>();
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
return group == 1;
}
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) {
return std::accumulate(dilations.begin(), dilations.end(), 1, std::multiplies<size_t>()) == 1;
return true;
}
- else
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_INF_ENGINE
+ {
return backendId == DNN_BACKEND_CUDA ||
(kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE));
+ }
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
return Ptr<BackendNode>();
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
{
InferenceEngine::Layout layout = blobs[0].dims == 5? InferenceEngine::Layout::NCDHW :
addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
}
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::DetectionOutputLayer ieLayer(name);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
return Ptr<BackendNode>();
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer = func.initInfEngineBuilderAPI();
ieLayer.setName(this->name);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
bool supportBackend(int backendId, int)
{
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
+#endif
+#ifdef HAVE_DNN_NGRAPH
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(slope);
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ClampLayer("").setMinValue(minValue).setMaxValue(maxValue);
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::TanHLayer("");
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
CV_Error(Error::StsNotImplemented, "");
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
CV_Error(Error::StsNotImplemented, "");
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::SigmoidLayer("");
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ELULayer("");
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(-0.999999f);
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
CV_Error(Error::StsNotImplemented, "");
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::PowerLayer("").setPower(power)
.setScale(scale)
.setShift(shift);
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer("");
addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l);
return l;
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
return Ptr<BackendNode>();
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::Builder::EltwiseLayer ieLayer(name);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
}
#endif
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
return Ptr<BackendNode>(new InfEngineNgraphNode(reshape));
}
#endif // HAVE_DNN_NGRAPH
- // HAVE_INF_ENGINE
int _startAxis;
int _endAxis;
return Ptr<BackendNode>();
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::FullyConnectedLayer ieLayer(name);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
#endif // HAVE_HALIDE
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
float alphaSize = alpha;
l.getParameters()["k"] = bias;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return !zeroDev && (preferableTarget != DNN_TARGET_MYRIAD || eps <= 1e-7f);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+#endif
+#ifdef HAVE_DNN_NGRAPH
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
- else
-#endif // HAVE_INF_ENGINE
+#endif
+ {
return backendId == DNN_BACKEND_OPENCV;
+ }
}
#ifdef HAVE_OPENCL
}
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::MVNLayer ieLayer(name);
ieLayer.setEpsilon(eps);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
}
#endif
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
return Ptr<BackendNode>();
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer(name);
return Ptr<BackendNode>();
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::PermuteLayer ieLayer(name);
ieLayer.setOrder(_order);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
{
return type == MAX || type == AVE || type == ROI;
}
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (computeMaxIdx)
return false;
-#ifdef HAVE_INF_ENGINE
if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU;
if (preferableTarget == DNN_TARGET_MYRIAD) {
-#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) {
return !isMyriadX();
}
}
else
return type != STOCHASTIC;
-#else
- return false;
-#endif
}
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
+#endif
+ else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ {
return !computeMaxIdx && type != STOCHASTIC;
}
- else
+ else if (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_VKCOM)
{
if (kernel_size.size() == 3)
return (backendId == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU);
else
return false;
}
+ return false;
}
#ifdef HAVE_OPENCL
return Ptr<BackendNode>();
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
if (type == MAX || type == AVE)
CV_Error(Error::StsNotImplemented, "Unsupported pooling type");
return Ptr<BackendNode>();
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
return Ptr<BackendNode>();
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
if (_explicitSizes)
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
layerOutputs[0].col(2).copyTo(dst);
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::ProposalLayer ieLayer(name);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
}
#endif
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::ReorgYoloLayer ieLayer(name);
ieLayer.setStride(reorgStride);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
}
#endif
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::Builder::ReshapeLayer ieLayer(name);
ieLayer.setDims(outShapes[0]);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
return interpolation == "nearest" || interpolation == "bilinear";
#ifdef HAVE_INF_ENGINE
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
return (interpolation == "nearest" && scaleWidth == scaleHeight) ||
(interpolation == "bilinear");
}
#endif
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
-#ifdef HAVE_INF_ENGINE
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
if (interpolation == "nearest")
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
-#endif // HAVE_INF_ENGINE
- return Ptr<BackendNode>();
}
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
}
#endif // HAVE_HALIDE
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name);
addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_CUDA ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && sliceRanges.size() == 1) ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
-#ifdef HAVE_INF_ENGINE
- INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+ return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
+ sliceRanges.size() == 1 && sliceRanges[0].size() == 4;
+#endif
+#ifdef HAVE_DNN_NGRAPH
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return sliceRanges.size() == 1;
#endif
- sliceRanges.size() == 1 && sliceRanges[0].size() == 4);
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
}
#endif
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
return Ptr<BackendNode>();
}
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
-#endif // HAVE_INF_ENGINE
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
{
static Backend param = parseInferenceEngineBackendType(
utils::getConfigurationParameterString("OPENCV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019_TYPE",
-#ifdef HAVE_NGRAPH
- CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API // future: CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
+#ifndef HAVE_DNN_IE_NN_BUILDER_2019
+ CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
#else
CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API
#endif
CV__DNN_INLINE_NS_END
+
+Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
+{
+ // NOTE: Inference Engine sizes are reversed.
+ std::vector<size_t> dims = blob->getTensorDesc().getDims();
+ std::vector<int> size(dims.begin(), dims.end());
+ auto precision = blob->getTensorDesc().getPrecision();
+
+ int type = -1;
+ switch (precision)
+ {
+ case InferenceEngine::Precision::FP32: type = CV_32F; break;
+ case InferenceEngine::Precision::U8: type = CV_8U; break;
+ default:
+ CV_Error(Error::StsNotImplemented, "Unsupported blob precision");
+ }
+ return Mat(size, type, (void*)blob->buffer());
+}
+
+void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
+ std::vector<Mat>& mats)
+{
+ mats.resize(blobs.size());
+ for (int i = 0; i < blobs.size(); ++i)
+ mats[i] = infEngineBlobToMat(blobs[i]);
+}
+
+
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
+
// For networks with input layer which has an empty name, IE generates a name id[some_number].
// OpenCV lets users use an empty input name and to prevent unexpected naming,
// we can use some predefined name.
}
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
static std::map<std::string, InferenceEngine::InferenceEnginePluginPtr>& getSharedPlugins()
}
#endif // !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
+
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
+
void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
{
CV_Assert(!isInitialized());
}
}
-Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
-{
- // NOTE: Inference Engine sizes are reversed.
- std::vector<size_t> dims = blob->getTensorDesc().getDims();
- std::vector<int> size(dims.begin(), dims.end());
- auto precision = blob->getTensorDesc().getPrecision();
-
- int type = -1;
- switch (precision)
- {
- case InferenceEngine::Precision::FP32: type = CV_32F; break;
- case InferenceEngine::Precision::U8: type = CV_8U; break;
- default:
- CV_Error(Error::StsNotImplemented, "Unsupported blob precision");
- }
- return Mat(size, type, (void*)blob->buffer());
-}
-
-void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
- std::vector<Mat>& mats)
-{
- mats.resize(blobs.size());
- for (int i = 0; i < blobs.size(); ++i)
- mats[i] = infEngineBlobToMat(blobs[i]);
-}
-
bool InfEngineBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
#endif
}
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
+
#endif // HAVE_INF_ENGINE
bool haveInfEngine()
Ptr<BackendNode>& node, bool isAsync)
{
CV_Assert(haveInfEngine());
-#ifdef HAVE_INF_ENGINE
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
CV_Assert(!node.empty());
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
CV_Assert(!ieNode.empty());
ieNode->net->forward(outBlobsWrappers, isAsync);
+#else
+ CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
#endif // HAVE_INF_ENGINE
}
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
//#define INFERENCE_ENGINE_DEPRECATED // turn off deprecation warnings from IE
//there is no way to suppress warnings from IE only at this moment, so we are forced to suppress warnings globally
#if defined(__GNUC__)
#ifdef _MSC_VER
#pragma warning(disable: 4996) // was declared deprecated
#endif
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1)
#pragma GCC visibility push(default)
Backend& getInferenceEngineBackendTypeParam();
+Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
+
+void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
+ std::vector<Mat>& mats);
+
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
+
class InfEngineBackendNet
{
public:
InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
-Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
-
-void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
- std::vector<Mat>& mats);
-
// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
InferenceEngine::ResponseDesc* resp) noexcept;
};
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
+
CV__DNN_INLINE_NS_BEGIN
#ifdef HAVE_DNN_NGRAPH
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH,
#endif
- CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
+ CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER,
+#endif
+ ""
);
#endif
registerGlobalSkipTag(
EXPECT_TRUE(res.empty()) << res.size;
}
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
TEST(readNet, do_not_call_setInput_IE_NN_BUILDER_2019)
{
test_readNet_IE_do_not_call_setInput(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
}
+#endif
+#ifdef HAVE_DNN_NGRAPH
TEST(readNet, do_not_call_setInput_IE_NGRAPH)
{
test_readNet_IE_do_not_call_setInput(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
}
+#endif
#endif // HAVE_INF_ENGINE
typedef testing::TestWithParam<tuple<Backend, Target> > dump;
void registerGlobalSkipTag(const std::string& skipTag)
{
+ if (skipTag.empty())
+ return; // do nothing
std::vector<std::string>& skipTags = getTestTagsSkipList();
for (size_t i = 0; i < skipTags.size(); ++i)
{