//
//M*/
#include "../precomp.hpp"
+#include "../op_inf_engine.hpp"
namespace cv
{
setParamsFrom(params);
}
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
+ {
+ return backendId == DNN_BACKEND_DEFAULT ||
+ backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
+ }
+
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
if (outputs[i].data != inputs[i]->data)
inputs[i]->copyTo(outputs[i]);
}
+
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
+ {
+#ifdef HAVE_INF_ENGINE
+ InferenceEngine::LayerParams lp;
+ lp.name = name;
+ lp.type = "Split";
+ lp.precision = InferenceEngine::Precision::FP32;
+ std::shared_ptr<InferenceEngine::SplitLayer> ieLayer(new InferenceEngine::SplitLayer(lp));
+ return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
+#endif // HAVE_INF_ENGINE
+ return Ptr<BackendNode>();
+ }
};
Ptr<Layer> BlankLayer::create(const LayerParams& params)
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_inf_engine.hpp"
namespace cv { namespace dnn {
CV_Assert(pnorm > 0);
}
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
+ {
+ return backendId == DNN_BACKEND_DEFAULT ||
+ backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() &&
+ pnorm == 2 && !blobs.empty();
+ }
+
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
}
}
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
+ {
+#ifdef HAVE_INF_ENGINE
+ InferenceEngine::LayerParams lp;
+ lp.name = name;
+ lp.type = "Normalize";
+ lp.precision = InferenceEngine::Precision::FP32;
+ std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
+
+ CV_Assert(!blobs.empty());
+
+ ieLayer->params["eps"] = format("%f", epsilon);
+ ieLayer->params["across_spatial"] = acrossSpatial ? "1" : "0";
+ ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0";
+
+ const int numChannels = blobs[0].total();
+ ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
+ return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
+#endif // HAVE_INF_ENGINE
+ return Ptr<BackendNode>();
+ }
+
private:
int startAxis, endAxis;
};
#ifdef HAVE_INF_ENGINE
+static int infEngineVersion()
+{
+ return std::atoi(InferenceEngine::GetInferenceEngineVersion()->buildNumber);
+}
+
InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& _layer)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {}
{
std::vector<size_t> reversedShape(&m.size[0], &m.size[0] + m.dims);
std::reverse(reversedShape.begin(), reversedShape.end());
- return InferenceEngine::DataPtr(
- new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32)
- );
+ if (infEngineVersion() > 5855)
+ {
+ InferenceEngine::Layout l = InferenceEngine::Layout::ANY;
+ if (m.dims == 4)
+ l = InferenceEngine::Layout::NCHW;
+ else if (m.dims == 2)
+ l = InferenceEngine::Layout::NC;
+ return InferenceEngine::DataPtr(
+ new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32, l)
+ );
+ }
+ else
+ {
+ return InferenceEngine::DataPtr(
+ new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32)
+ );
+ }
}
InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape,
InferenceEngine::StatusCode status;
InferenceEngine::ResponseDesc resp;
- const InferenceEngine::Version* v = InferenceEngine::GetInferenceEngineVersion();
plugin = InferenceEngine::PluginDispatcher({""}).getSuitablePlugin(targetDevice);
- if (std::atoi(v->buildNumber) > 5855)
+ if (infEngineVersion() > 5855 && targetDevice == InferenceEngine::TargetDevice::eCPU)
{
#ifdef _WIN32
InferenceEngine::IExtensionPtr extension =