if(DEFINED ENV{INTEL_CVSDK_DIR})
list(APPEND ie_root_paths "$ENV{INTEL_CVSDK_DIR}")
endif()
+ if(DEFINED INTEL_CVSDK_DIR)
+ list(APPEND ie_root_paths "${INTEL_CVSDK_DIR}")
+ endif()
if(WITH_INF_ENGINE AND NOT ie_root_paths)
list(APPEND ie_root_paths "/opt/intel/deeplearning_deploymenttoolkit/deployment_tools")
PERF_TEST_P_(DNNTestNetwork, OpenFace)
{
+ if (backend == DNN_BACKEND_HALIDE) throw SkipTestException("");
processNet("dnn/openface_nn4.small2.v1.t7", "", "",
Mat(cv::Size(96, 96), CV_32FC3), "", "torch");
}
Mat(cv::Size(368, 368), CV_32FC3), "", "caffe");
}
+PERF_TEST_P_(DNNTestNetwork, opencv_face_detector)
+{
+ if (backend == DNN_BACKEND_HALIDE ||
+ backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL)
+ throw SkipTestException("");
+ processNet("dnn/opencv_face_detector.caffemodel", "dnn/opencv_face_detector.prototxt", "",
+ Mat(cv::Size(300, 300), CV_32FC3), "", "caffe");
+}
+
const tuple<DNNBackend, DNNTarget> testCases[] = {
#ifdef HAVE_HALIDE
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_HALIDE, DNN_TARGET_CPU),
}
}
+#ifdef HAVE_INF_ENGINE
+ // Before launching Inference Engine graph we need to specify output blobs.
+ // This function requests output blobs based on inputs references of
+ // layers from default backend or layers from different graphs.
+ void addInfEngineNetOutputs(LayerData &ld)
+ {
+ Ptr<InfEngineBackendNet> layerNet;
+ if (ld.backendNodes.find(preferableBackend) != ld.backendNodes.end())
+ {
+ Ptr<BackendNode> node = ld.backendNodes[preferableBackend];
+ if (!node.empty())
+ {
+ Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
+ CV_Assert(!ieNode.empty(), !ieNode->net.empty());
+ layerNet = ieNode->net;
+ }
+ }
+ // For an every input reference we check that it belongs to one of
+ // the Inference Engine backend graphs. Request an output blob if it is.
+ // Do nothing if layer's input is from the same graph.
+ for (int i = 0; i < ld.inputBlobsId.size(); ++i)
+ {
+ LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
+ Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
+ if (!inpNode.empty())
+ {
+ Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
+ CV_Assert(!ieInpNode.empty(), !ieInpNode->net.empty());
+ if (layerNet != ieInpNode->net)
+ {
+ // layerNet is empty or nodes are from different graphs.
+ ieInpNode->net->addOutput(inpLd.name);
+ }
+ }
+ }
+ }
+#endif // HAVE_INF_ENGINE
+
void initInfEngineBackend()
{
// Build Inference Engine networks from sets of layers that support this
- // backend. If an internal layer isn't supported we'll use default
- // implementation of it but build a new network after it.
+ // backend. Split a whole model on several Inference Engine networks if
+ // some of layers is not implemented.
CV_TRACE_FUNCTION();
CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE, haveInfEngine());
#ifdef HAVE_INF_ENGINE
MapIdToLayerData::iterator it;
Ptr<InfEngineBackendNet> net;
+ // Set of all input and output blobs wrappers for current network.
+ std::map<int, Ptr<BackendWrapper> > netBlobsWrappers;
for (it = layers.begin(); it != layers.end(); ++it)
{
LayerData &ld = it->second;
- ld.skip = true;
+ ld.skip = true; // Initially skip all Inference Engine supported layers.
Ptr<Layer> layer = ld.layerInstance;
if (!layer->supportBackend(preferableBackend))
{
- for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
- {
- auto dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
- dataPtr->name = ld.name;
- }
+ addInfEngineNetOutputs(ld);
ld.skip = false;
net = Ptr<InfEngineBackendNet>();
+ netBlobsWrappers.clear();
continue;
}
- // Check what all inputs are from the same network or from default backend.
+ // Create a new network if one of inputs from different Inference Engine graph.
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
if (!inpNode.empty())
{
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
- CV_Assert(!ieInpNode.empty(), net.empty() || net == ieInpNode->net);
+ CV_Assert(!ieInpNode.empty(), !ieInpNode->net.empty());
+ if (ieInpNode->net != net)
+ {
+ net = Ptr<InfEngineBackendNet>();
+ netBlobsWrappers.clear();
+ break;
+ }
}
}
+ // The same blobs wrappers cannot be shared between two Inference Engine
+ // networks because of explicit references between layers and blobs.
+ // So we need to rewrap all the external blobs.
+ for (int i = 0; i < ld.inputBlobsId.size(); ++i)
+ {
+ int lid = ld.inputBlobsId[i].lid;
+ LayerData &inpLd = layers[lid];
+ auto it = netBlobsWrappers.find(lid);
+ if (it == netBlobsWrappers.end())
+ {
+ ld.inputBlobsWrappers[i] = wrap(*ld.inputBlobs[i]);
+ auto dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]);
+ dataPtr->name = inpLd.name;
+ netBlobsWrappers[lid] = ld.inputBlobsWrappers[i];
+ }
+ else
+ ld.inputBlobsWrappers[i] = it->second;
+ }
+ netBlobsWrappers[ld.id] = ld.outputBlobsWrappers[0];
+
bool fused = false;
Ptr<BackendNode> node;
if (!net.empty())
if (!fused)
net->addLayer(ieNode->layer);
+ addInfEngineNetOutputs(ld);
}
// Initialize all networks.
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
lp.name = name;
- lp.type = "BatchNormalization";
+ lp.type = "ScaleShift";
lp.precision = InferenceEngine::Precision::FP32;
- std::shared_ptr<InferenceEngine::BatchNormalizationLayer> ieLayer(new InferenceEngine::BatchNormalizationLayer(lp));
+ std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
- size_t numChannels = weights_.total();
- ieLayer->epsilon = epsilon;
- ieLayer->_weights = wrapToInfEngineBlob(blobs[1], {numChannels});
- ieLayer->_biases = wrapToInfEngineBlob(blobs[0], {numChannels});
+ ieLayer->_weights = wrapToInfEngineBlob(weights_);
+ ieLayer->_biases = wrapToInfEngineBlob(bias_);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE
for (int i = 1; i < _variance.size(); ++i)
ieLayer->params["variance"] += format(",%f", _variance[i]);
- ieLayer->params["step"] = "0";
+ ieLayer->params["step"] = _stepX == _stepY ? format("%f", _stepX) : "0";
ieLayer->params["step_h"] = _stepY;
ieLayer->params["step_w"] = _stepX;
// Assume that outputs of network is unconnected blobs.
void InfEngineBackendNet::getOutputsInfo(InferenceEngine::OutputsDataMap &outputs_) noexcept
{
- if (outputs.empty())
- {
- for (const auto& l : layers)
- {
- // Add all outputs.
- for (const InferenceEngine::DataPtr& out : l->outData)
- {
- // TODO: Replace to uniquness assertion.
- if (outputs.find(out->name) == outputs.end())
- outputs[out->name] = out;
- }
- // Remove internally connected outputs.
- for (const InferenceEngine::DataWeakPtr& inp : l->insData)
- {
- outputs.erase(InferenceEngine::DataPtr(inp)->name);
- }
- }
- CV_Assert(layers.empty() || !outputs.empty());
- }
- outBlobs.clear();
- for (const auto& it : outputs)
- {
- CV_Assert(allBlobs.find(it.first) != allBlobs.end());
- outBlobs[it.first] = allBlobs[it.first];
- }
outputs_ = outputs;
}
InfEngineBackendNet::addOutput(const std::string &layerName, size_t outputIndex,
InferenceEngine::ResponseDesc *resp) noexcept
{
- CV_Error(Error::StsNotImplemented, "");
+ for (const auto& l : layers)
+ {
+ for (const InferenceEngine::DataPtr& out : l->outData)
+ {
+ if (out->name == layerName)
+ {
+ outputs[out->name] = out;
+ return InferenceEngine::StatusCode::OK;
+ }
+ }
+ }
+ CV_Error(Error::StsObjectNotFound, "Cannot find a layer " + layerName);
return InferenceEngine::StatusCode::OK;
}
void InfEngineBackendNet::initEngine()
{
CV_Assert(!isInitialized());
+
+ // Add all unconnected blobs to output blobs.
+ InferenceEngine::OutputsDataMap unconnectedOuts;
+ for (const auto& l : layers)
+ {
+ // Add all outputs.
+ for (const InferenceEngine::DataPtr& out : l->outData)
+ {
+ // TODO: Replace to uniquness assertion.
+ if (unconnectedOuts.find(out->name) == unconnectedOuts.end())
+ unconnectedOuts[out->name] = out;
+ }
+ // Remove internally connected outputs.
+ for (const InferenceEngine::DataWeakPtr& inp : l->insData)
+ {
+ unconnectedOuts.erase(InferenceEngine::DataPtr(inp)->name);
+ }
+ }
+ CV_Assert(layers.empty() || !unconnectedOuts.empty());
+
+ for (auto it = unconnectedOuts.begin(); it != unconnectedOuts.end(); ++it)
+ {
+ outputs[it->first] = it->second;
+ }
+
+ // Set up output blobs.
+ outBlobs.clear();
+ for (const auto& it : outputs)
+ {
+ CV_Assert(allBlobs.find(it.first) != allBlobs.end());
+ outBlobs[it.first] = allBlobs[it.first];
+ }
+
engine = InferenceEngine::InferenceEnginePluginPtr("libMKLDNNPlugin.so");
InferenceEngine::ResponseDesc resp;
InferenceEngine::StatusCode status = engine->LoadNetwork(*this, &resp);
TEST_P(DNNTestNetwork, OpenFace)
{
+ if (backend == DNN_BACKEND_HALIDE) throw SkipTestException("");
processNet("dnn/openface_nn4.small2.v1.t7", "", Size(96, 96), "", "torch");
}
+TEST_P(DNNTestNetwork, opencv_face_detector)
+{
+ if (backend == DNN_BACKEND_HALIDE ||
+ backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL)
+ throw SkipTestException("");
+ Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
+ Mat inp = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
+ processNet("dnn/opencv_face_detector.caffemodel", "dnn/opencv_face_detector.prototxt",
+ inp, "detection_out", "caffe");
+}
+
const tuple<DNNBackend, DNNTarget> testCases[] = {
#ifdef HAVE_HALIDE
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_HALIDE, DNN_TARGET_CPU),
normAssert(detections, ref);
}
-OCL_TEST(Test_TensorFlow, MobileNet_SSD)
+OCL_TEST(Test_TensorFlow, DISABLED_MobileNet_SSD)
{
- throw SkipTestException("TODO: test is failed");
std::string netPath = findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false);
std::string netConfig = findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt", false);
std::string imgPath = findDataFile("dnn/street.png", false);