DNN_TARGET_FPGA
};
+ CV_EXPORTS std::vector< std::pair<Backend, Target> > getAvailableBackends();
+ CV_EXPORTS std::vector<Target> getAvailableTargets(Backend be);
+
/** @brief This class provides all data needed to initialize layer.
*
* It includes dictionary with scalar params (which can be read by using Dict interface),
void processNet(std::string weights, std::string proto, std::string halide_scheduler,
const Mat& input, const std::string& outputLayer = "")
{
- if (backend == DNN_BACKEND_OPENCV && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
- {
-#if defined(HAVE_OPENCL)
- if (!cv::ocl::useOpenCL())
-#endif
- {
- throw cvtest::SkipTestException("OpenCL is not available/disabled in OpenCV");
- }
- }
- if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
- {
- if (!checkIETarget(DNN_TARGET_MYRIAD))
- {
- throw SkipTestException("Myriad is not available/disabled in OpenCV");
- }
- }
-
randu(input, 0.0f, 1.0f);
weights = findDataFile(weights, false);
#endif
);
+//==================================================================================================
+
+class BackendRegistry
+{
+public:
+ typedef std::vector< std::pair<Backend, Target> > BackendsList;
+ const BackendsList & getBackends() const { return backends; }
+ static BackendRegistry & getRegistry()
+ {
+ static BackendRegistry impl;
+ return impl;
+ }
+private:
+ BackendRegistry()
+ {
+#ifdef HAVE_HALIDE
+ backends.push_back(std::make_pair(DNN_BACKEND_HALIDE, DNN_TARGET_CPU));
+# ifdef HAVE_OPENCL
+ if (cv::ocl::useOpenCL())
+ backends.push_back(std::make_pair(DNN_BACKEND_HALIDE, DNN_TARGET_OPENCL));
+# endif
+#endif // HAVE_HALIDE
+
+#ifdef HAVE_INF_ENGINE
+ if (checkIETarget(DNN_TARGET_CPU))
+ backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_CPU));
+ if (checkIETarget(DNN_TARGET_MYRIAD))
+ backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD));
+ if (checkIETarget(DNN_TARGET_FPGA))
+ backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_FPGA));
+# ifdef HAVE_OPENCL
+ if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
+ {
+ if (checkIETarget(DNN_TARGET_OPENCL))
+ backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL));
+ if (checkIETarget(DNN_TARGET_OPENCL_FP16))
+ backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16));
+ }
+# endif
+#endif // HAVE_INF_ENGINE
+
+#ifdef HAVE_OPENCL
+ if (cv::ocl::useOpenCL())
+ {
+ backends.push_back(std::make_pair(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL));
+ backends.push_back(std::make_pair(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16));
+ }
+#endif
+
+ backends.push_back(std::make_pair(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
+ }
+ static inline bool checkIETarget(int target)
+ {
+#ifndef HAVE_INF_ENGINE
+ return false;
+#else
+ cv::dnn::Net net;
+ cv::dnn::LayerParams lp;
+ net.addLayerToPrev("testLayer", "Identity", lp);
+ net.setPreferableBackend(cv::dnn::DNN_BACKEND_INFERENCE_ENGINE);
+ net.setPreferableTarget(target);
+ static int inpDims[] = {1, 2, 3, 4};
+ net.setInput(cv::Mat(4, &inpDims[0], CV_32FC1, cv::Scalar(0)));
+ try
+ {
+ net.forward();
+ }
+ catch(...)
+ {
+ return false;
+ }
+ return true;
+#endif
+ }
+
+ BackendsList backends;
+};
+
+
+std::vector< std::pair<Backend, Target> > getAvailableBackends()
+{
+ return BackendRegistry::getRegistry().getBackends();
+}
+
+std::vector<Target> getAvailableTargets(Backend be)
+{
+ std::vector<Target> result;
+ const BackendRegistry::BackendsList all_backends = getAvailableBackends();
+ for(BackendRegistry::BackendsList::const_iterator i = all_backends.begin(); i != all_backends.end(); ++i )
+ {
+ if (i->first == be)
+ result.push_back(i->second);
+ }
+ return result;
+}
+
+//==================================================================================================
+
// Additional checks (slowdowns execution!)
static bool DNN_CHECK_NAN_INF = utils::getConfigurationParameterBool("OPENCV_DNN_CHECK_NAN_INF", false);
static bool DNN_CHECK_NAN_INF_DUMP = utils::getConfigurationParameterBool("OPENCV_DNN_CHECK_NAN_INF_DUMP", false);
processNet("dnn/fast_neural_style_eccv16_starry_night.t7", "", inp, "", "", l1, lInf);
}
-INSTANTIATE_TEST_CASE_P(/*nothing*/, DNNTestNetwork, dnnBackendsAndTargets(true, true, false));
+INSTANTIATE_TEST_CASE_P(/*nothing*/, DNNTestNetwork, dnnBackendsAndTargets());
}} // namespace
typedef testing::TestWithParam<Target> Reproducibility_SqueezeNet_v1_1;
TEST_P(Reproducibility_SqueezeNet_v1_1, Accuracy)
{
+ int targetId = GetParam();
+ if(targetId == DNN_TARGET_OPENCL_FP16)
+ throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/squeezenet_v1.1.prototxt", false),
findDataFile("dnn/squeezenet_v1.1.caffemodel", false));
-
- int targetId = GetParam();
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
Mat ref = blobFromNPY(_tf("squeezenet_v1.1_prob.npy"));
normAssert(ref, out);
}
-INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_SqueezeNet_v1_1, availableDnnTargets());
+INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_SqueezeNet_v1_1,
+ testing::ValuesIn(getAvailableTargets(DNN_BACKEND_OPENCV)));
TEST(Reproducibility_AlexNet_fp16, Accuracy)
{
testBoxes, comment, confThreshold, scores_diff, boxes_iou_diff);
}
-static inline bool checkIETarget(int target)
-{
-#ifndef HAVE_INF_ENGINE
- return false;
-#else
- cv::dnn::Net net;
- cv::dnn::LayerParams lp;
- net.addLayerToPrev("testLayer", "Identity", lp);
- net.setPreferableBackend(cv::dnn::DNN_BACKEND_INFERENCE_ENGINE);
- net.setPreferableTarget(target);
- static int inpDims[] = {1, 2, 3, 4};
- net.setInput(cv::Mat(4, &inpDims[0], CV_32FC1, cv::Scalar(0)));
- try
- {
- net.forward();
- }
- catch(...)
- {
- return false;
- }
- return true;
-#endif
-}
-
static inline bool readFileInMemory(const std::string& filename, std::string& content)
{
std::ios::openmode mode = std::ios::in | std::ios::binary;
using namespace cv::dnn;
static inline
-testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargets(
+testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargets(
bool withInferenceEngine = true,
bool withHalide = false,
bool withCpuOCV = true
)
{
- std::vector<tuple<Backend, Target> > targets;
-#ifdef HAVE_HALIDE
+ std::vector< tuple<Backend, Target> > targets;
+ std::vector< Target > available;
if (withHalide)
{
- targets.push_back(make_tuple(DNN_BACKEND_HALIDE, DNN_TARGET_CPU));
-#ifdef HAVE_OPENCL
- if (cv::ocl::useOpenCL())
- targets.push_back(make_tuple(DNN_BACKEND_HALIDE, DNN_TARGET_OPENCL));
-#endif
+ available = getAvailableTargets(DNN_BACKEND_HALIDE);
+ for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
+ targets.push_back(make_tuple(DNN_BACKEND_HALIDE, *i));
}
-#endif
-#ifdef HAVE_INF_ENGINE
if (withInferenceEngine)
{
- targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_CPU));
-#ifdef HAVE_OPENCL
- if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
- {
- targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL));
- targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16));
- }
-#endif
- if (checkIETarget(DNN_TARGET_MYRIAD))
- targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD));
+ available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
+ for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
+ targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, *i));
}
-#endif
- if (withCpuOCV)
- targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
-#ifdef HAVE_OPENCL
- if (cv::ocl::useOpenCL())
{
- targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL));
- targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16));
+ available = getAvailableTargets(DNN_BACKEND_OPENCV);
+ for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
+ targets.push_back(make_tuple(DNN_BACKEND_OPENCV, *i));
}
-#endif
- if (targets.empty()) // validate at least CPU mode
- targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
return testing::ValuesIn(targets);
}
namespace opencv_test {
using namespace cv::dnn;
-static inline
-testing::internal::ParamGenerator<Target> availableDnnTargets()
-{
- static std::vector<Target> targets;
- if (targets.empty())
- {
- targets.push_back(DNN_TARGET_CPU);
-#ifdef HAVE_OPENCL
- if (cv::ocl::useOpenCL())
- targets.push_back(DNN_TARGET_OPENCL);
-#endif
- }
- return testing::ValuesIn(targets);
-}
-
class DNNTestLayer : public TestWithParam<tuple<Backend, Target> >
{
public:
}
}
- static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0)
- {
- if (backend == DNN_BACKEND_OPENCV && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
- {
-#ifdef HAVE_OPENCL
- if (!cv::ocl::useOpenCL())
-#endif
- {
- throw SkipTestException("OpenCL is not available/disabled in OpenCV");
- }
- }
+ static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0)
+ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
{
- if (!checkIETarget(DNN_TARGET_MYRIAD))
- {
- throw SkipTestException("Myriad is not available/disabled in OpenCV");
- }
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
if (inp && ref && inp->size[0] != 1)
{
typedef testing::TestWithParam<Target> Reproducibility_GoogLeNet;
TEST_P(Reproducibility_GoogLeNet, Batching)
{
+ const int targetId = GetParam();
+ if(targetId == DNN_TARGET_OPENCL_FP16)
+ throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
- int targetId = GetParam();
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
TEST_P(Reproducibility_GoogLeNet, IntermediateBlobs)
{
+ const int targetId = GetParam();
+ if(targetId == DNN_TARGET_OPENCL_FP16)
+ throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
- int targetId = GetParam();
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
TEST_P(Reproducibility_GoogLeNet, SeveralCalls)
{
+ const int targetId = GetParam();
+ if(targetId == DNN_TARGET_OPENCL_FP16)
+ throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
- int targetId = GetParam();
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
normAssert(outs[0], ref, "", 1E-4, 1E-2);
}
-INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_GoogLeNet, availableDnnTargets());
+INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_GoogLeNet,
+ testing::ValuesIn(getAvailableTargets(DNN_BACKEND_OPENCV)));
}} // namespace
std::map<std::string, cv::Mat> inputsMap;
std::map<std::string, cv::Mat> ieOutputsMap, cvOutputsMap;
// Single Myriad device cannot be shared across multiple processes.
- resetMyriadDevice();
+ if (target == DNN_TARGET_MYRIAD)
+ resetMyriadDevice();
runIE(target, xmlPath, binPath, inputsMap, ieOutputsMap);
runCV(target, xmlPath, binPath, inputsMap, cvOutputsMap);
return ValuesIn(modelsNames);
}
-static testing::internal::ParamGenerator<Target> dnnDLIETargets()
-{
- std::vector<Target> targets;
- targets.push_back(DNN_TARGET_CPU);
-#ifdef HAVE_OPENCL
- if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
- {
- targets.push_back(DNN_TARGET_OPENCL);
- targets.push_back(DNN_TARGET_OPENCL_FP16);
- }
-#endif
- if (checkIETarget(DNN_TARGET_MYRIAD))
- targets.push_back(DNN_TARGET_MYRIAD);
- if (checkIETarget(DNN_TARGET_FPGA))
- targets.push_back(DNN_TARGET_FPGA);
- return testing::ValuesIn(targets);
-}
-
-INSTANTIATE_TEST_CASE_P(/**/, DNNTestOpenVINO, Combine(
- dnnDLIETargets(), intelModels()
-));
+INSTANTIATE_TEST_CASE_P(/**/,
+ DNNTestOpenVINO,
+ Combine(testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)), intelModels())
+);
}}
#endif // HAVE_INF_ENGINE
const int target = get<1>(get<3>(GetParam()));
const bool kSwapRB = true;
- if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD && !checkIETarget(DNN_TARGET_MYRIAD))
- throw SkipTestException("Myriad is not available/disabled in OpenCV");
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16 && dtype != CV_32F)
throw SkipTestException("");