for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
- dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
+ std::string outputName = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
+ outputName = ld.outputBlobsWrappers.size() > 1 ? (outputName + "." + std::to_string(i)) : outputName;
+ dataPtr->setName(outputName);
}
}
else
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
- dataPtr->setName(ld.name);
+ std::string outputName = ld.outputBlobsWrappers.size() > 1 ? (ld.name + "." + std::to_string(i)) : ld.name;
+ dataPtr->setName(outputName);
}
}
}
return;
}
+ bool supportsCPUFallback = preferableTarget == DNN_TARGET_CPU ||
+ BackendRegistry::checkIETarget(DNN_TARGET_CPU);
+
// Build Inference Engine networks from sets of layers that support this
// backend. Split a whole model on several Inference Engine networks if
// some of layers are not implemented.
Ptr<Layer> layer = ld.layerInstance;
if (!fused && !layer->supportBackend(preferableBackend))
{
- addNgraphOutputs(ld);
- net = Ptr<InfEngineNgraphNet>();
- layer->preferableTarget = DNN_TARGET_CPU;
+ bool customizable = ld.id != 0 && supportsCPUFallback;
- for (int i = 0; i < ld.inputBlobsId.size(); ++i)
+ // TODO: there is a bug in Myriad plugin with custom layers shape infer.
+ if (preferableTarget == DNN_TARGET_MYRIAD)
{
- LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
- Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
- if (!inpNode.empty()) {
- Ptr<InfEngineNgraphNode> ieNode = inpNode.dynamicCast<InfEngineNgraphNode>();
- ieNode->net->setUnconnectedNodes(ieNode);
+ for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i)
+ {
+ customizable = ld.inputBlobs[i]->size[0] == 1;
}
}
- continue;
+
+ // TODO: fix these workarounds
+ if (preferableTarget == DNN_TARGET_MYRIAD ||
+ preferableTarget == DNN_TARGET_OPENCL ||
+ preferableTarget == DNN_TARGET_OPENCL_FP16)
+ customizable &= ld.type != "Concat";
+
+ if (preferableTarget == DNN_TARGET_OPENCL ||
+ preferableTarget == DNN_TARGET_OPENCL_FP16)
+ customizable &= ld.type != "Power";
+
+ if (preferableTarget == DNN_TARGET_OPENCL)
+ customizable &= ld.type != "Eltwise";
+
+ if (!customizable)
+ {
+ addNgraphOutputs(ld);
+ net = Ptr<InfEngineNgraphNet>();
+ layer->preferableTarget = DNN_TARGET_CPU;
+
+ for (int i = 0; i < ld.inputBlobsId.size(); ++i)
+ {
+ LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
+ Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
+ if (!inpNode.empty()) {
+ Ptr<InfEngineNgraphNode> ieNode = inpNode.dynamicCast<InfEngineNgraphNode>();
+ ieNode->net->setUnconnectedNodes(ieNode);
+ }
+ }
+ continue;
+ }
}
ld.skip = true; // Initially skip all Inference Engine supported layers.
if (!fused)
{
- CV_Assert(!inputNodes.empty());
- node = layer->initNgraph(ld.inputBlobsWrappers, inputNodes);
- for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
+ CV_Assert(ld.inputBlobsId.size() == inputNodes.size());
+ for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
- InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
- node.dynamicCast<InfEngineNgraphNode>()->setName(dataPtr->getName());
+ int lid = ld.inputBlobsId[i].lid;
+ int oid = ld.inputBlobsId[i].oid;
+ if (oid == 0 || lid == 0)
+ continue;
+
+ auto ieInpNode = inputNodes[i].dynamicCast<InfEngineNgraphNode>();
+ CV_Assert(oid < ieInpNode->node->get_output_size());
+ inputNodes[i] = Ptr<BackendNode>(new InfEngineNgraphNode(ieInpNode->node->get_output_as_single_output_node(oid, false)));
+ }
+
+ if (layer->supportBackend(preferableBackend))
+ {
+ node = layer->initNgraph(ld.inputBlobsWrappers, inputNodes);
+ for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
+ {
+ InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
+ node.dynamicCast<InfEngineNgraphNode>()->setName(dataPtr->getName());
+ }
+ }
+ else
+ {
+ node = Ptr<BackendNode>(new InfEngineNgraphNode(inputNodes,
+ ld.layerInstance, ld.inputBlobs, ld.outputBlobs, ld.internals));
}
}
else if (node.empty())
// OpenCV lets users use an empty input name and to prevent unexpected naming,
// we can use some predefined name.
static std::string kDefaultInpLayerName = "empty_inp_layer_name";
+static constexpr const char* kOpenCVLayersType = "OpenCVLayer";
+
+static std::string shapesToStr(const std::vector<Mat>& mats)
+{
+ std::ostringstream shapes;
+ shapes << mats.size() << " ";
+ for (const Mat& m : mats)
+ {
+ shapes << m.dims << " ";
+ for (int i = 0; i < m.dims; ++i)
+ shapes << m.size[i] << " ";
+ }
+ return shapes.str();
+}
+
+static void strToShapes(const std::string& str, std::vector<std::vector<size_t> >& shapes)
+{
+ std::istringstream ss(str);
+ int num, dims;
+ ss >> num;
+ shapes.resize(num);
+ for (int i = 0; i < num; ++i)
+ {
+ ss >> dims;
+ shapes[i].resize(dims);
+ for (int j = 0; j < dims; ++j)
+ ss >> shapes[i][j];
+ }
+}
static std::vector<Ptr<NgraphBackendWrapper> >
ngraphWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
return wrappers;
}
+class NgraphCustomOp: public ngraph::op::Op {
+public:
+ const ngraph::NodeTypeInfo& get_type_info() const override
+ {
+ static constexpr ngraph::NodeTypeInfo type_info{kOpenCVLayersType, 0};
+ return type_info;
+ }
+
+ NgraphCustomOp() {};
+ NgraphCustomOp(const ngraph::NodeVector& inputs,
+ const std::map<std::string, InferenceEngine::Parameter>& params = {}):
+ Op(inputs), params(params)
+ {
+ constructor_validate_and_infer_types();
+ }
+
+ void validate_and_infer_types() override
+ {
+ std::vector<std::vector<size_t> > shapes;
+ strToShapes(params["outputs"], shapes);
+ set_output_size(shapes.size());
+ for (size_t i = 0; i < shapes.size(); ++i)
+ {
+ ngraph::Shape output_shape(shapes[i]);
+ set_output_type(i, get_input_element_type(0), output_shape);
+ }
+ }
+
+ std::shared_ptr<ngraph::Node> copy_with_new_args(const ngraph::NodeVector& new_args) const override
+ {
+ return std::make_shared<NgraphCustomOp>(new_args, params);
+ }
+
+ bool visit_attributes(ngraph::AttributeVisitor& visitor) override
+ {
+ for (auto& attr : params)
+ {
+ if (attr.second.is<std::string>())
+ visitor.on_attribute(attr.first, attr.second.as<std::string>());
+ }
+ return true;
+ }
+
+private:
+ std::map<std::string, InferenceEngine::Parameter> params;
+};
+
InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) {}
InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr<ngraph::Node>& _node)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(_node) {}
+InfEngineNgraphNode::InfEngineNgraphNode(const std::vector<Ptr<BackendNode> >& nodes,
+ Ptr<Layer>& cvLayer_, std::vector<Mat*>& inputs,
+ std::vector<Mat>& outputs, std::vector<Mat>& internals)
+ : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), cvLayer(cvLayer_)
+{
+ std::ostringstream oss;
+ oss << (size_t)cvLayer.get();
+
+ std::map<std::string, InferenceEngine::Parameter> params = {
+ {"impl", oss.str()},
+ {"outputs", shapesToStr(outputs)},
+ {"internals", shapesToStr(internals)}
+ };
+
+ ngraph::NodeVector inp_nodes;
+ for (const auto& node : nodes)
+ inp_nodes.emplace_back(node.dynamicCast<InfEngineNgraphNode>()->node);
+ node = std::make_shared<NgraphCustomOp>(inp_nodes, params);
+
+ CV_Assert(!cvLayer->name.empty());
+ setName(cvLayer->name);
+}
+
void InfEngineNgraphNode::setName(const std::string& name) {
node->set_friendly_name(name);
}
if (device_name == "MYRIAD") {
config.emplace("VPU_DETECT_NETWORK_BATCH", CONFIG_VALUE(NO));
}
- netExec = ie.LoadNetwork(net, device_name, config);
+
+ bool isHetero = false;
+ if (device_name != "CPU")
+ {
+ isHetero = device_name == "FPGA";
+ for (auto& layer : net)
+ {
+ if (layer->type == kOpenCVLayersType)
+ {
+ isHetero = true;
+ break;
+ }
+ }
+ }
+ if (isHetero)
+ netExec = ie.LoadNetwork(net, "HETERO:" + device_name + ",CPU", config);
+ else
+ netExec = ie.LoadNetwork(net, device_name, config);
}
catch (const std::exception& ex)
{
class InfEngineNgraphNode : public BackendNode
{
public:
+ InfEngineNgraphNode(const std::vector<Ptr<BackendNode> >& nodes, Ptr<Layer>& layer,
+ std::vector<Mat*>& inputs, std::vector<Mat>& outputs,
+ std::vector<Mat>& internals);
+
InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node);
InfEngineNgraphNode(std::shared_ptr<ngraph::Node>& _node);
// Inference Engine network object that allows to obtain the outputs of this layer.
std::shared_ptr<ngraph::Node> node;
Ptr<InfEngineNgraphNet> net;
+ Ptr<dnn::Layer> cvLayer;
};
class NgraphBackendWrapper : public BackendWrapper
#include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include "layers_common.hpp"
+#include "../ie_ngraph.hpp"
#ifdef HAVE_OPENCL
#include "opencl_kernels_dnn.hpp"
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
+ backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
}
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_INF_ENGINE
+
+
+#ifdef HAVE_DNN_NGRAPH
+ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
+ const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
+ {
+ auto node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+ getShape<size_t>(blobs[0]),
+ blobs[0].data);
+ return Ptr<BackendNode>(new InfEngineNgraphNode(node));
+ }
+#endif // HAVE_INF_ENGINE
};
Ptr<Layer> ConstLayer::create(const LayerParams& params)
Mat sample = imread(findDataFile("dnn/street.png"));
Mat inp = blobFromImage(sample, 1.0f, Size(300, 560), Scalar(), false);
- float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.012 : 0.0;
+ float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.013 : 0.0;
float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.06 : 0.0;
processNet("dnn/ssd_mobilenet_v1_coco_2017_11_17.pb", "dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt",
inp, "detection_out", "", l1, lInf);
runTensorFlowNet("spatial_padding");
runTensorFlowNet("mirror_pad");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000)
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+ if (target == DNN_TARGET_MYRIAD)
{
- if (target == DNN_TARGET_MYRIAD)
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
- if (target == DNN_TARGET_OPENCL_FP16)
- applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
}
#endif
runTensorFlowNet("keras_pad_concat");
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
runTensorFlowNet("split");
}
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
- applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
applyTestTag(CV_TEST_TAG_MEMORY_1GB, CV_TEST_TAG_DEBUG_VERYLONG);
Mat img = imread(findDataFile("dnn/street.png"));
throw SkipTestException("");
}
#endif
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
{
- applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ throw SkipTestException("");
}
Net net;