From: Dmitry Kurtaev Date: Fri, 20 Jul 2018 15:58:37 +0000 (+0300) Subject: Add a sample which tests OpenVINO models X-Git-Tag: accepted/tizen/6.0/unified/20201030.111113~1^2~599^2~5^2~2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=28e08ae0bd5ae3106700229c9cf94513833730c2;p=platform%2Fupstream%2Fopencv.git Add a sample which tests OpenVINO models --- diff --git a/modules/dnn/CMakeLists.txt b/modules/dnn/CMakeLists.txt index a2f741c..a4cdc18 100644 --- a/modules/dnn/CMakeLists.txt +++ b/modules/dnn/CMakeLists.txt @@ -120,3 +120,9 @@ if(BUILD_PERF_TESTS) endif() endif() endif() + +# Test Intel's Inference Engine models +if(HAVE_INF_ENGINE AND TARGET opencv_test_dnn) + ocv_target_include_directories(opencv_test_dnn PRIVATE ${INF_ENGINE_INCLUDE_DIRS}) + ocv_target_link_libraries(opencv_test_dnn LINK_PRIVATE ${INF_ENGINE_LIBRARIES}) +endif() diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index bcf2c2a..a7c13f3 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -428,9 +428,8 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net) try { - static std::map sharedPlugins; - std::string deviceName = InferenceEngine::getDeviceName(targetDevice); - auto pluginIt = sharedPlugins.find(deviceName); + static std::map sharedPlugins; + auto pluginIt = sharedPlugins.find(targetDevice); if (pluginIt != sharedPlugins.end()) { enginePtr = pluginIt->second; @@ -438,7 +437,7 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net) else { enginePtr = InferenceEngine::PluginDispatcher({""}).getSuitablePlugin(targetDevice); - sharedPlugins[deviceName] = enginePtr; + sharedPlugins[targetDevice] = enginePtr; if (targetDevice == InferenceEngine::TargetDevice::eCPU) { diff --git a/modules/dnn/test/test_ie_models.cpp b/modules/dnn/test/test_ie_models.cpp new file mode 100644 index 0000000..80c8ef3 --- /dev/null +++ b/modules/dnn/test/test_ie_models.cpp @@ -0,0 +1,220 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2018, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +#include "test_precomp.hpp" + +#ifdef HAVE_INF_ENGINE +#include + +#include +#include +#include + +static std::string extraTestDataPath = +#ifdef WINRT + NULL; +#else + getenv("INTEL_CVSDK_DIR"); +#endif + +namespace opencv_test { namespace { + +using namespace cv; +using namespace cv::dnn; +using namespace InferenceEngine; + +static inline void genData(const std::vector& dims, Mat& m, Blob::Ptr& dataPtr) +{ + std::vector reversedDims(dims.begin(), dims.end()); + std::reverse(reversedDims.begin(), reversedDims.end()); + + m.create(reversedDims, CV_32F); + randu(m, -1, 1); + + dataPtr = make_shared_blob(Precision::FP32, dims, (float*)m.data); +} + +void runIE(Target target, const std::string& xmlPath, const std::string& binPath, + std::map& inputsMap, std::map& outputsMap) +{ + CNNNetReader reader; + reader.ReadNetwork(xmlPath); + reader.ReadWeights(binPath); + + CNNNetwork net = reader.getNetwork(); + + InferenceEnginePluginPtr enginePtr; + InferencePlugin plugin; + ExecutableNetwork netExec; + InferRequest infRequest; + TargetDevice targetDevice; + switch (target) + { + case DNN_TARGET_CPU: + targetDevice = TargetDevice::eCPU; + break; + case DNN_TARGET_OPENCL: + case DNN_TARGET_OPENCL_FP16: + targetDevice = TargetDevice::eGPU; + break; + case DNN_TARGET_MYRIAD: + targetDevice = TargetDevice::eMYRIAD; + break; + default: + CV_Error(Error::StsNotImplemented, "Unknown target"); + }; + + try + { + enginePtr = PluginDispatcher({""}).getSuitablePlugin(targetDevice); + + if (targetDevice == TargetDevice::eCPU) + { + std::string suffixes[] = {"_avx2", "_sse4", ""}; + bool haveFeature[] = { + checkHardwareSupport(CPU_AVX2), + checkHardwareSupport(CPU_SSE4_2), + true + }; + for (int i = 0; i < 3; ++i) + { + if (!haveFeature[i]) + continue; +#ifdef _WIN32 + std::string libName = "cpu_extension" + suffixes[i] + ".dll"; +#else + std::string libName = "libcpu_extension" + suffixes[i] + ".so"; +#endif // _WIN32 + try + { + IExtensionPtr extension = make_so_pointer(libName); + enginePtr->AddExtension(extension, 0); + break; + } + catch(...) {} + } + // Some of networks can work without a library of extra layers. + } + plugin = InferencePlugin(enginePtr); + + netExec = plugin.LoadNetwork(net, {}); + infRequest = netExec.CreateInferRequest(); + } + catch (const std::exception& ex) + { + CV_Error(Error::StsAssert, format("Failed to initialize Inference Engine backend: %s", ex.what())); + } + + // Fill input blobs. + inputsMap.clear(); + BlobMap inputBlobs; + for (auto& it : net.getInputsInfo()) + { + genData(it.second->getDims(), inputsMap[it.first], inputBlobs[it.first]); + } + infRequest.SetInput(inputBlobs); + + // Fill output blobs. + outputsMap.clear(); + BlobMap outputBlobs; + for (auto& it : net.getOutputsInfo()) + { + genData(it.second->dims, outputsMap[it.first], outputBlobs[it.first]); + } + infRequest.SetOutput(outputBlobs); + + infRequest.Infer(); +} + +std::vector getOutputsNames(const Net& net) +{ + std::vector names; + if (names.empty()) + { + std::vector outLayers = net.getUnconnectedOutLayers(); + std::vector layersNames = net.getLayerNames(); + names.resize(outLayers.size()); + for (size_t i = 0; i < outLayers.size(); ++i) + names[i] = layersNames[outLayers[i] - 1]; + } + return names; +} + +void runCV(Target target, const std::string& xmlPath, const std::string& binPath, + const std::map& inputsMap, + std::map& outputsMap) +{ + Net net = readNet(xmlPath, binPath); + for (auto& it : inputsMap) + net.setInput(it.second, it.first); + net.setPreferableTarget(target); + + std::vector outNames = getOutputsNames(net); + std::vector outs; + net.forward(outs, outNames); + + outputsMap.clear(); + EXPECT_EQ(outs.size(), outNames.size()); + for (int i = 0; i < outs.size(); ++i) + { + EXPECT_TRUE(outputsMap.insert({outNames[i], outs[i]}).second); + } +} + +typedef TestWithParam > DNNTestOpenVINO; +TEST_P(DNNTestOpenVINO, models) +{ + Target target = (dnn::Target)(int)get<0>(GetParam()); + std::string modelName = get<1>(GetParam()); + + if (modelName == "semantic-segmentation-adas-0001" && target == DNN_TARGET_OPENCL_FP16) + throw SkipTestException(""); + + std::string precision = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? "FP16" : "FP32"; + std::string prefix = utils::fs::join(extraTestDataPath, + utils::fs::join("deployment_tools", + utils::fs::join("intel_models", + utils::fs::join(modelName, + utils::fs::join(precision, modelName))))); + std::string xmlPath = prefix + ".xml"; + std::string binPath = prefix + ".bin"; + + std::map inputsMap; + std::map ieOutputsMap, cvOutputsMap; + runIE(target, xmlPath, binPath, inputsMap, ieOutputsMap); + runCV(target, xmlPath, binPath, inputsMap, cvOutputsMap); + + EXPECT_EQ(ieOutputsMap.size(), cvOutputsMap.size()); + for (auto& srcIt : ieOutputsMap) + { + auto dstIt = cvOutputsMap.find(srcIt.first); + CV_Assert(dstIt != cvOutputsMap.end()); + double normInf = cvtest::norm(srcIt.second, dstIt->second, cv::NORM_INF); + EXPECT_EQ(normInf, 0); + } +} + +static testing::internal::ParamGenerator intelModels() +{ + String path = utils::fs::join(utils::fs::join(extraTestDataPath, "deployment_tools"), "intel_models"); + + std::vector modelsNames; + cv::utils::fs::glob_relative(path, "", modelsNames, false, true); + + std::vector::iterator end = + std::remove_if(modelsNames.begin(), modelsNames.end(), + [&](const String& dir){ return !utils::fs::isDirectory(utils::fs::join(path, dir)); }); + modelsNames = std::vector(modelsNames.begin(), end); + + return testing::ValuesIn(modelsNames); +} + +INSTANTIATE_TEST_CASE_P(/**/, DNNTestOpenVINO, Combine( + Values(DNN_TARGET_CPU, DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16), intelModels() +)); + +}} +#endif // HAVE_INF_ENGINE