/**
* @brief Enum of computation backends supported by layers.
+ * @see Net::setPreferableBackend
*/
enum Backend
{
+ //! DNN_BACKEND_DEFAULT equals to DNN_BACKEND_INFERENCE_ENGINE if
+ //! OpenCV is built with Intel's Inference Engine library or
+ //! DNN_BACKEND_OPENCV otherwise.
DNN_BACKEND_DEFAULT,
DNN_BACKEND_HALIDE,
- DNN_BACKEND_INFERENCE_ENGINE
+ DNN_BACKEND_INFERENCE_ENGINE,
+ DNN_BACKEND_OPENCV
};
/**
* @brief Enum of target devices for computations.
+ * @see Net::setPreferableTarget
*/
enum Target
{
* @brief Ask network to use specific computation backend where it supported.
* @param[in] backendId backend identifier.
* @see Backend
+ *
+ * If OpenCV is compiled with Intel's Inference Engine library, DNN_BACKEND_DEFAULT
+ * means DNN_BACKEND_INFERENCE_ENGINE. Otherwise it equals to DNN_BACKEND_OPENCV.
*/
CV_WRAP void setPreferableBackend(int backendId);
* @brief Ask network to make computations on specific target device.
* @param[in] targetId target identifier.
* @see Target
+ *
+ * List of supported combinations backend / target:
+ * | | DNN_BACKEND_OPENCV | DNN_BACKEND_INFERENCE_ENGINE | DNN_BACKEND_HALIDE |
+ * |------------------------|--------------------|------------------------------|--------------------|
+ * | DNN_TARGET_CPU | + | + | + |
+ * | DNN_TARGET_OPENCL | + | + | + |
+ * | DNN_TARGET_OPENCL_FP16 | + | + | |
+ * | DNN_TARGET_MYRIAD | | + | |
*/
CV_WRAP void setPreferableTarget(int targetId);
namespace opencv_test {
-CV_ENUM(DNNBackend, DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE, DNN_BACKEND_INFERENCE_ENGINE)
+CV_ENUM(DNNBackend, DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE, DNN_BACKEND_INFERENCE_ENGINE, DNN_BACKEND_OPENCV)
CV_ENUM(DNNTarget, DNN_TARGET_CPU, DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16, DNN_TARGET_MYRIAD)
class DNNTestNetwork : public ::perf::TestBaseWithParam< tuple<DNNBackend, DNNTarget> >
void processNet(std::string weights, std::string proto, std::string halide_scheduler,
const Mat& input, const std::string& outputLayer = "")
{
- if (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL)
+ if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL)
{
#if defined(HAVE_OPENCL)
if (!cv::ocl::useOpenCL())
PERF_TEST_P_(DNNTestNetwork, ENet)
{
if ((backend == DNN_BACKEND_INFERENCE_ENGINE) ||
- (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16))
+ (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
processNet("dnn/Enet-model-best.net", "", "enet.yml",
Mat(cv::Size(512, 256), CV_32FC3));
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16),
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD),
#endif
- tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_CPU),
- tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL),
- tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL_FP16)
+ tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_CPU),
+ tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL),
+ tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16)
};
INSTANTIATE_TEST_CASE_P(/*nothing*/, DNNTestNetwork, testing::ValuesIn(testCases));
class OpenCLBackendWrapper : public BackendWrapper
{
public:
- OpenCLBackendWrapper(Mat& m) : BackendWrapper(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL)
+ OpenCLBackendWrapper(Mat& m) : BackendWrapper(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL)
{
m.copyTo(umat);
host = &m;
}
OpenCLBackendWrapper(const Ptr<BackendWrapper>& baseBuffer, Mat& m)
- : BackendWrapper(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL)
+ : BackendWrapper(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL)
{
Ptr<OpenCLBackendWrapper> base = baseBuffer.dynamicCast<OpenCLBackendWrapper>();
CV_Assert(!base.empty());
static Ptr<BackendWrapper> wrapMat(int backendId, int targetId, cv::Mat& m)
{
- if (backendId == DNN_BACKEND_DEFAULT)
+ if (backendId == DNN_BACKEND_OPENCV)
{
if (targetId == DNN_TARGET_CPU)
return Ptr<BackendWrapper>();
Ptr<BackendWrapper> wrap(Mat& host)
{
- if (preferableBackend == DNN_BACKEND_DEFAULT && preferableTarget == DNN_TARGET_CPU)
+ if (preferableBackend == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU)
return Ptr<BackendWrapper>();
MatShape shape(host.dims);
if (backendWrappers.find(data) != backendWrappers.end())
{
Ptr<BackendWrapper> baseBuffer = backendWrappers[data];
- if (preferableBackend == DNN_BACKEND_DEFAULT)
+ if (preferableBackend == DNN_BACKEND_OPENCV)
{
CV_Assert(IS_DNN_OPENCL_TARGET(preferableTarget));
return OpenCLBackendWrapper::create(baseBuffer, host);
{
CV_TRACE_FUNCTION();
+ if (preferableBackend == DNN_BACKEND_DEFAULT)
+#ifdef HAVE_INF_ENGINE
+ preferableBackend = DNN_BACKEND_INFERENCE_ENGINE;
+#else
+ preferableBackend = DNN_BACKEND_OPENCV;
+#endif
+ CV_Assert(preferableBackend != DNN_BACKEND_OPENCV ||
+ preferableTarget == DNN_TARGET_CPU ||
+ preferableTarget == DNN_TARGET_OPENCL ||
+ preferableTarget == DNN_TARGET_OPENCL_FP16);
+ CV_Assert(preferableBackend != DNN_BACKEND_HALIDE ||
+ preferableTarget == DNN_TARGET_CPU ||
+ preferableTarget == DNN_TARGET_OPENCL);
+ CV_Assert(preferableBackend != DNN_BACKEND_INFERENCE_ENGINE ||
+ preferableTarget == DNN_TARGET_CPU ||
+ preferableTarget == DNN_TARGET_OPENCL ||
+ preferableTarget == DNN_TARGET_OPENCL_FP16 ||
+ preferableTarget == DNN_TARGET_MYRIAD);
if (!netWasAllocated || this->blobsToKeep != blobsToKeep_)
{
- if (preferableBackend == DNN_BACKEND_DEFAULT && IS_DNN_OPENCL_TARGET(preferableTarget))
+ if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
#ifndef HAVE_OPENCL
{
CV_LOG_WARNING(NULL, "DNN: OpenCL target is not available in this OpenCV build, switching to CPU.");
void initBackend()
{
CV_TRACE_FUNCTION();
- if (preferableBackend == DNN_BACKEND_DEFAULT)
+ if (preferableBackend == DNN_BACKEND_OPENCV)
CV_Assert(preferableTarget == DNN_TARGET_CPU || IS_DNN_OPENCL_TARGET(preferableTarget));
else if (preferableBackend == DNN_BACKEND_HALIDE)
initHalideBackend();
std::vector<LayerPin> pinsForInternalBlobs;
blobManager.allocateBlobsForLayer(ld, layerShapesIt->second, pinsForInternalBlobs,
preferableBackend == DNN_BACKEND_INFERENCE_ENGINE,
- preferableBackend == DNN_BACKEND_DEFAULT &&
+ preferableBackend == DNN_BACKEND_OPENCV &&
preferableTarget == DNN_TARGET_OPENCL_FP16);
ld.outputBlobsWrappers.resize(ld.outputBlobs.size());
for (int i = 0; i < ld.outputBlobs.size(); ++i)
void fuseLayers(const std::vector<LayerPin>& blobsToKeep_)
{
- if( !fusion || preferableBackend != DNN_BACKEND_DEFAULT &&
+ if( !fusion || preferableBackend != DNN_BACKEND_OPENCV &&
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE)
return;
// some other layers.
// TODO: OpenCL target support more fusion styles.
- if ( preferableBackend == DNN_BACKEND_DEFAULT && IS_DNN_OPENCL_TARGET(preferableTarget) &&
+ if ( preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget) &&
(!cv::ocl::useOpenCL() || (ld.layerInstance->type != "Convolution" &&
ld.layerInstance->type != "MVN")) )
continue;
break;
}
- if (preferableBackend != DNN_BACKEND_DEFAULT)
+ if (preferableBackend != DNN_BACKEND_OPENCV)
continue; // Go to the next layer.
// For now, OpenCL target support fusion with activation of ReLU/ChannelsPReLU/Power/Tanh
}
}
- if (preferableBackend != DNN_BACKEND_DEFAULT)
+ if (preferableBackend != DNN_BACKEND_OPENCV)
continue; // Go to the next layer.
// the optimization #2. if there is no layer that takes max pooling layer's computed
{
CV_Assert(layers[0].outputBlobs[i].total());
if (layers[0].outputBlobs[i].depth() == CV_32F &&
- preferableBackend == DNN_BACKEND_DEFAULT &&
+ preferableBackend == DNN_BACKEND_OPENCV &&
preferableTarget == DNN_TARGET_OPENCL_FP16)
{
Mat mat = layers[0].outputBlobs[i].clone();
TickMeter tm;
tm.start();
- if (preferableBackend == DNN_BACKEND_DEFAULT ||
+ if (preferableBackend == DNN_BACKEND_OPENCV ||
!layer->supportBackend(preferableBackend))
{
if( !ld.skip )
{
- if (preferableBackend == DNN_BACKEND_DEFAULT && IS_DNN_OPENCL_TARGET(preferableTarget))
+ if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
{
std::vector<UMat> umat_outputBlobs = OpenCLBackendWrapper::getUMatVector(ld.outputBlobsWrappers);
layer->forward(OpenCLBackendWrapper::getUMatVector(ld.inputBlobsWrappers),
{
std::vector<UMat> & outputvec = *(std::vector<UMat> *)outputBlobs.getObj();
- if (impl->preferableBackend == DNN_BACKEND_DEFAULT &&
+ if (impl->preferableBackend == DNN_BACKEND_OPENCV &&
IS_DNN_OPENCL_TARGET(impl->preferableTarget))
{
if (impl->preferableTarget == DNN_TARGET_OPENCL)
ld.outputBlobsWrappers.resize(ld.outputBlobs.size());
MatShape prevShape = shape(ld.outputBlobs[pin.oid]);
Mat blob_;
- if (impl->preferableBackend == DNN_BACKEND_DEFAULT &&
+ if (impl->preferableBackend == DNN_BACKEND_OPENCV &&
impl->preferableTarget == DNN_TARGET_OPENCL_FP16)
{
Mat blob_mat = blob.getMat();
bool Layer::supportBackend(int backendId)
{
- return backendId == DNN_BACKEND_DEFAULT;
+ return backendId == DNN_BACKEND_OPENCV;
}
Ptr<BackendNode> Layer::initHalide(const std::vector<Ptr<BackendWrapper> > &)
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding || // By channels
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !padding;
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
return Ptr<BackendNode>();
}
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
+ {
+#ifdef HAVE_INF_ENGINE
+ const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
+ const int group = numOutput / outGroupCn;
+
+ InferenceEngine::LayerParams lp;
+ lp.name = name;
+ lp.type = "Deconvolution";
+ lp.precision = InferenceEngine::Precision::FP32;
+ std::shared_ptr<InferenceEngine::DeconvolutionLayer> ieLayer(new InferenceEngine::DeconvolutionLayer(lp));
+
+ ieLayer->_kernel_x = kernel.width;
+ ieLayer->_kernel_y = kernel.height;
+ ieLayer->_stride_x = stride.width;
+ ieLayer->_stride_y = stride.height;
+ ieLayer->_out_depth = numOutput;
+ ieLayer->_padding_x = pad.width;
+ ieLayer->_padding_y = pad.height;
+ ieLayer->_dilation_x = dilation.width;
+ ieLayer->_dilation_y = dilation.height;
+ ieLayer->_group = group;
+
+ ieLayer->_weights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
+ if (hasBias())
+ {
+ ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C);
+ }
+ return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
+#endif // HAVE_INF_ENGINE
+ return Ptr<BackendNode>();
+ }
+
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !_locPredTransposed;
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
#ifdef HAVE_INF_ENGINE
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
{
- CV_Error(Error::StsNotImplemented, "TanH");
- return InferenceEngine::CNNLayerPtr();
+ lp.type = "TanH";
+ std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
+ return ieLayer;
}
#endif // HAVE_INF_ENGINE
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && axis == 1;
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() &&
!poolPad.width && !poolPad.height;
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() &&
pnorm == 2 && !blobs.empty();
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4;
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() &&
(type == MAX || type == AVE && !pad.width && !pad.height) ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && (type == MAX || type == AVE);
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
return false;
}
- virtual bool supportBackend(int backendId) CV_OVERRIDE
- {
- return backendId == DNN_BACKEND_DEFAULT;
- }
-
#ifdef HAVE_OPENCL
bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
{
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT ||
+ return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1 ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !logSoftMax;
}
std::string halideScheduler = "",
double l1 = 0.0, double lInf = 0.0)
{
- if (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL)
+ if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL)
{
#ifdef HAVE_OPENCL
if (!cv::ocl::useOpenCL())
// Create two networks - with default backend and target and a tested one.
Net netDefault = readNet(weights, proto);
- Net net = readNet(weights, proto);
-
+ netDefault.setPreferableBackend(DNN_BACKEND_OPENCV);
netDefault.setInput(inp);
Mat outDefault = netDefault.forward(outputLayer).clone();
+ Net net = readNet(weights, proto);
net.setInput(inp);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
TEST_P(DNNTestNetwork, ENet)
{
if ((backend == DNN_BACKEND_INFERENCE_ENGINE) ||
- (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16))
+ (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
processNet("dnn/Enet-model-best.net", "", Size(512, 512), "l367_Deconvolution",
target == DNN_TARGET_OPENCL ? "dnn/halide_scheduler_opencl_enet.yml" :
throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false));
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
- float l1 = (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16) ? 0.0007 : 0.0;
- float lInf = (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16) ? 0.011 : 0.0;
+ float l1 = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 0.0007 : 0.0;
+ float lInf = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 0.011 : 0.0;
processNet("dnn/MobileNetSSD_deploy.caffemodel", "dnn/MobileNetSSD_deploy.prototxt",
inp, "detection_out", "", l1, lInf);
throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false));
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
- float l1 = (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16) ? 0.008 : 0.0;
- float lInf = (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16) ? 0.06 : 0.0;
+ float l1 = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 0.008 : 0.0;
+ float lInf = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 0.06 : 0.0;
processNet("dnn/ssd_mobilenet_v1_coco.pb", "dnn/ssd_mobilenet_v1_coco.pbtxt",
inp, "detection_out", "", l1, lInf);
}
TEST_P(DNNTestNetwork, DenseNet_121)
{
if ((backend == DNN_BACKEND_HALIDE) ||
- (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16) ||
+ (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL_FP16 ||
target == DNN_TARGET_MYRIAD)))
throw SkipTestException("");
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16),
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD),
#endif
- tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL),
- tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL_FP16)
+ tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL),
+ tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16)
};
INSTANTIATE_TEST_CASE_P(/*nothing*/, DNNTestNetwork, testing::ValuesIn(testCases));
ASSERT_TRUE(readFileInMemory(model, dataModel));
Net net = readNetFromCaffe(dataProto.c_str(), dataProto.size());
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
ASSERT_FALSE(net.empty());
Net net2 = readNetFromCaffe(dataProto.c_str(), dataProto.size(),
const float l1 = 1e-5;
const float lInf = (targetId == DNN_TARGET_OPENCL_FP16) ? 3e-3 : 1e-4;
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
Mat sample = imread(_tf("grace_hopper_227.png"));
net = readNetFromCaffe(proto, model);
ASSERT_FALSE(net.empty());
}
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat sample = imread(_tf("street.png"));
ASSERT_TRUE(!sample.empty());
net = readNetFromCaffe(proto, model);
ASSERT_FALSE(net.empty());
}
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat sample = imread(_tf("street.png"));
ASSERT_TRUE(!sample.empty());
const float l1 = (targetId == DNN_TARGET_OPENCL_FP16) ? 1.5e-4 : 1e-5;
const float lInf = (targetId == DNN_TARGET_OPENCL_FP16) ? 4e-4 : 1e-4;
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
Mat sample = imread(_tf("street.png"));
findDataFile("dnn/ResNet-50-model.caffemodel", false));
int targetId = GetParam();
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
float l1 = (targetId == DNN_TARGET_OPENCL_FP16) ? 3e-5 : 1e-5;
findDataFile("dnn/squeezenet_v1.1.caffemodel", false));
int targetId = GetParam();
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
Mat input = blobFromImage(imread(_tf("googlenet_0.png")), 1.0f, Size(227,227), Scalar(), false);
shrinkCaffeModel(model, "bvlc_alexnet.caffemodel_fp16");
Net net = readNetFromCaffe(proto, "bvlc_alexnet.caffemodel_fp16");
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat sample = imread(findDataFile("dnn/grace_hopper_227.png", false));
shrinkCaffeModel(model, "bvlc_googlenet.caffemodel_fp16");
Net net = readNetFromCaffe(proto, "bvlc_googlenet.caffemodel_fp16");
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
std::vector<Mat> inpMats;
inpMats.push_back( imread(_tf("googlenet_0.png")) );
const string proto = findDataFile("dnn/colorization_deploy_v2.prototxt", false);
const string model = findDataFile("dnn/colorization_release_v2.caffemodel", false);
Net net = readNetFromCaffe(proto, model);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.getLayer(net.getLayerId("class8_ab"))->blobs.push_back(kernel);
net.getLayer(net.getLayerId("conv8_313_rh"))->blobs.push_back(Mat(1, 313, CV_32F, 2.606));
Mat ref = blobFromNPY(_tf("densenet_121_output.npy"));
Net net = readNetFromCaffe(proto, model);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setInput(inp);
Mat out = net.forward();
{
const string proto = findDataFile("dnn/layers/net_input.prototxt", false);
Net net = readNetFromCaffe(proto);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat first_image(10, 11, CV_32FC3);
Mat second_image(10, 11, CV_32FC3);
Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
Mat blob = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
- net.setPreferableBackend(DNN_BACKEND_DEFAULT);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
net.setInput(blob);
std::string model = findDataFile("dnn/" + models[i], false);
Net net = readNetFromCaffe(proto, model);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat img = imread(findDataFile("dnn/dog416.png", false));
resize(img, img, Size(800, 600));
Mat blob = blobFromImage(img, 1.0, Size(), Scalar(102.9801, 115.9465, 122.7717), false, false);
int backendId, int targetId, float scoreDiff = 0.0,
float iouDiff = 0.0, float confThreshold = 0.24)
{
- if (backendId == DNN_BACKEND_DEFAULT && targetId == DNN_TARGET_OPENCL)
+ if (backendId == DNN_BACKEND_OPENCV && targetId == DNN_TARGET_OPENCL)
{
#ifdef HAVE_OPENCL
if (!cv::ocl::useOpenCL())
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16),
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD),
#endif
- tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_CPU),
- tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL),
- tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL_FP16)
+ tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_CPU),
+ tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL),
+ tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16)
};
INSTANTIATE_TEST_CASE_P(/**/, Test_Darknet_nets, testing::ValuesIn(testCases));
Mat ref = blobFromNPY(findDataFile("dnn/darknet/" + name + "_out.npy", false));
Net net = readNet(cfg, model);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setInput(inp);
Mat out = net.forward();
normAssert(out, ref);
return (getOpenCVExtraDir() + "/dnn/") + filename;
}
-TEST(Reproducibility_GoogLeNet, Accuracy)
+typedef testing::TestWithParam<DNNTarget> Reproducibility_GoogLeNet;
+TEST_P(Reproducibility_GoogLeNet, Batching)
{
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
+ int targetId = GetParam();
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
+ net.setPreferableTarget(targetId);
- std::vector<Mat> inpMats;
- inpMats.push_back( imread(_tf("googlenet_0.png")) );
- inpMats.push_back( imread(_tf("googlenet_1.png")) );
- ASSERT_TRUE(!inpMats[0].empty() && !inpMats[1].empty());
-
- net.setInput(blobFromImages(inpMats, 1.0f, Size(), Scalar(), false), "data");
- Mat out = net.forward("prob");
-
- Mat ref = blobFromNPY(_tf("googlenet_prob.npy"));
- normAssert(out, ref);
-}
-
-OCL_TEST(Reproducibility_GoogLeNet, Accuracy)
-{
- Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
- findDataFile("dnn/bvlc_googlenet.caffemodel", false));
-
- net.setPreferableBackend(DNN_BACKEND_DEFAULT);
- net.setPreferableTarget(DNN_TARGET_OPENCL);
-
- // Initialize network for a single image in the batch but test with batch size=2.
- Mat inp = Mat(224, 224, CV_8UC3);
- randu(inp, -1, 1);
- net.setInput(blobFromImage(inp));
- net.forward();
+ if (targetId == DNN_TARGET_OPENCL)
+ {
+ // Initialize network for a single image in the batch but test with batch size=2.
+ Mat inp = Mat(224, 224, CV_8UC3);
+ randu(inp, -1, 1);
+ net.setInput(blobFromImage(inp));
+ net.forward();
+ }
std::vector<Mat> inpMats;
inpMats.push_back( imread(_tf("googlenet_0.png")) );
normAssert(out, ref);
}
-TEST(IntermediateBlobs_GoogLeNet, Accuracy)
+TEST_P(Reproducibility_GoogLeNet, IntermediateBlobs)
{
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
+ int targetId = GetParam();
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
+ net.setPreferableTarget(targetId);
std::vector<String> blobsNames;
blobsNames.push_back("conv1/7x7_s2");
}
}
-OCL_TEST(IntermediateBlobs_GoogLeNet, Accuracy)
-{
- Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
- findDataFile("dnn/bvlc_googlenet.caffemodel", false));
-
- net.setPreferableBackend(DNN_BACKEND_DEFAULT);
- net.setPreferableTarget(DNN_TARGET_OPENCL);
-
- std::vector<String> blobsNames;
- blobsNames.push_back("conv1/7x7_s2");
- blobsNames.push_back("conv1/relu_7x7");
- blobsNames.push_back("inception_4c/1x1");
- blobsNames.push_back("inception_4c/relu_1x1");
- std::vector<Mat> outs;
- Mat in = blobFromImage(imread(_tf("googlenet_0.png")), 1.0f, Size(), Scalar(), false);
- net.setInput(in, "data");
- net.forward(outs, blobsNames);
- CV_Assert(outs.size() == blobsNames.size());
-
- for (size_t i = 0; i < blobsNames.size(); i++)
- {
- std::string filename = blobsNames[i];
- std::replace( filename.begin(), filename.end(), '/', '#');
- Mat ref = blobFromNPY(_tf("googlenet_" + filename + ".npy"));
-
- normAssert(outs[i], ref, "", 1E-4, 1E-2);
- }
-}
-
-TEST(SeveralCalls_GoogLeNet, Accuracy)
+TEST_P(Reproducibility_GoogLeNet, SeveralCalls)
{
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
+ int targetId = GetParam();
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
+ net.setPreferableTarget(targetId);
std::vector<Mat> inpMats;
inpMats.push_back( imread(_tf("googlenet_0.png")) );
normAssert(outs[0], ref, "", 1E-4, 1E-2);
}
-OCL_TEST(SeveralCalls_GoogLeNet, Accuracy)
-{
- Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
- findDataFile("dnn/bvlc_googlenet.caffemodel", false));
-
- net.setPreferableBackend(DNN_BACKEND_DEFAULT);
- net.setPreferableTarget(DNN_TARGET_OPENCL);
-
- std::vector<Mat> inpMats;
- inpMats.push_back( imread(_tf("googlenet_0.png")) );
- inpMats.push_back( imread(_tf("googlenet_1.png")) );
- ASSERT_TRUE(!inpMats[0].empty() && !inpMats[1].empty());
-
- net.setInput(blobFromImages(inpMats, 1.0f, Size(), Scalar(), false), "data");
- Mat out = net.forward();
-
- Mat ref = blobFromNPY(_tf("googlenet_prob.npy"));
- normAssert(out, ref);
-
- std::vector<String> blobsNames;
- blobsNames.push_back("conv1/7x7_s2");
- std::vector<Mat> outs;
- Mat in = blobFromImage(inpMats[0], 1.0f, Size(), Scalar(), false);
- net.setInput(in, "data");
- net.forward(outs, blobsNames);
- CV_Assert(outs.size() == blobsNames.size());
-
- ref = blobFromNPY(_tf("googlenet_conv1#7x7_s2.npy"));
-
- normAssert(outs[0], ref, "", 1E-4, 1E-2);
-}
+INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_GoogLeNet, availableDnnTargets());
}} // namespace
net.connect(0, 0, lid, 0);
net.setInput(input);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat outputDefault = net.forward(params.name).clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
Mat input({1, 1, 4, 4}, CV_32F);
randu(input, -1.0f, 1.0f);
net.setInput(input);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat outputDefault = net.forward("testUnpool").clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
Mat input({1, kNumChannels, 10, 10}, CV_32F);
randu(input, -1.0f, 1.0f);
net.setInput(input);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat outputDefault = net.forward(lp.name).clone();
net.setInput(input);
randu(input, -1.0f, 1.0f);
net.setInput(input);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat outputDefault = net.forward(concatParam.name).clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
randu(input, -1.0f, 1.0f);
net.setInput(input);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat outputDefault = net.forward(eltwiseParam.name).clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
Mat input({4, 3, 5, 6}, CV_32F);
randu(input, -1.0f, 1.0f);
net.setInput(input);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat outputDefault = net.forward().clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
Net net = readNetFromCaffe(prototxt, (useCaffeModel) ? caffemodel : String());
ASSERT_FALSE(net.empty());
- net.setPreferableBackend(DNN_BACKEND_DEFAULT);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
Mat inp = blobFromNPY(inpfile);
randu(input, 0.0f, 1.0f); // [0, 1] to make AbsVal an identity transformation.
net.setInput(input);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat out = net.forward();
normAssert(slice(out, Range::all(), Range(0, 2), Range::all(), Range::all()), input);
Net net = readNetFromCaffe(_tf("reshape_and_slice_routines.prototxt"));
ASSERT_FALSE(net.empty());
- net.setPreferableBackend(DNN_BACKEND_DEFAULT);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
Mat input(6, 12, CV_32F);
Mat ref = blobFromNPY(_tf("layer_elu_out.npy"));
net.setInput(inp, "input");
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat out = net.forward();
normAssert(ref, out);
Mat ref = blobFromNPY(outfile);
net.setInput(inp, "data");
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat out = net.forward();
normAssert(ref, out);
net.setInput(inp, "input");
net.setInput(rois, "rois");
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat out = net.forward();
net.setInput(imInfo, "im_info");
std::vector<Mat> outs;
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.forward(outs, "output");
for (int i = 0; i < 2; ++i)
net.setInputsNames(inpNames);
net.setInput(input, inpNames[0]);
net.setInput(weights, inpNames[1]);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat out = net.forward();
Mat ref(input.dims, input.size, CV_32F);
net.setInputsNames(inpNames);
net.setInput(inpImage, inpNames[0]);
net.setInput(sizImage, inpNames[1]);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
// There are a few conditions that represent invalid input to the crop
// layer, so in those cases we want to verify an exception is thrown.
Mat target = (Mat_<float>(2, 2) << (1 + 2 + 4 + 5) / 4.f, (3 + 6) / 2.f, (7 + 8) / 2.f, 9);
Mat tmp = blobFromImage(inp);
net.setInput(blobFromImage(inp));
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat out = net.forward();
normAssert(out, blobFromImage(target));
}
Mat inp(1, 2, CV_32F);
randu(inp, -1, 1);
net.setInput(blobFromImage(inp));
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat out = net.forward();
Mat target = (Mat_<float>(4, 4) << 0.0, 0.0, 0.75, 1.0,
Mat inp = blobFromNPY(_tf("blob.npy"));
netDefault.setInput(inp);
+ netDefault.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat outDefault = netDefault.forward();
net.setInput(inp);
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_DEFAULT;
+ return backendId == DNN_BACKEND_OPENCV;
}
virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &internals) CV_OVERRIDE {}
net.addLayerToPrev(lp.name, lp.type, lp);
net.setInput(inp);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat output = net.forward();
if (i == 0) EXPECT_EQ(output.at<float>(0), 1);
namespace opencv_test {
using namespace cv::dnn;
-CV_ENUM(DNNBackend, DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE, DNN_BACKEND_INFERENCE_ENGINE)
+CV_ENUM(DNNBackend, DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE, DNN_BACKEND_INFERENCE_ENGINE, DNN_BACKEND_OPENCV)
CV_ENUM(DNNTarget, DNN_TARGET_CPU, DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16, DNN_TARGET_MYRIAD)
static testing::internal::ParamGenerator<DNNTarget> availableDnnTargets()
net = readNetFromTensorflow(model);
ASSERT_FALSE(net.empty());
}
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat sample = imread(_tf("grace_hopper_227.png"));
ASSERT_TRUE(!sample.empty());
net = readNetFromTensorflow(model);
ASSERT_FALSE(net.empty());
}
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat sample = imread(_tf("grace_hopper_227.png"));
ASSERT_TRUE(!sample.empty());
ASSERT_FALSE(net.empty());
- net.setPreferableBackend(DNN_BACKEND_DEFAULT);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
cv::Mat input = blobFromNPY(inpPath);
}
Net net = readNetFromTensorflow(netPath, netConfig);
-
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(GetParam());
net.setInput(inp);
Mat img = imread(findDataFile("dnn/street.png", false));
Mat blob = blobFromImage(img, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), true, false);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(GetParam());
net.setInput(blob);
std::string model = findDataFile("dnn/faster_rcnn_inception_v2_coco_2018_01_28.pb", false);
Net net = readNetFromTensorflow(model, proto);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat img = imread(findDataFile("dnn/dog416.png", false));
Mat blob = blobFromImage(img, 1.0f / 127.5, Size(800, 600), Scalar(127.5, 127.5, 127.5), true, false);
Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
Mat blob = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(GetParam());
net.setInput(blob);
std::string refGeometryPath = findDataFile("dnn/east_text_detection.geometry.npy", false);
Net net = readNet(findDataFile("dnn/frozen_east_text_detection.pb", false));
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat img = imread(imgPath);
Mat inp = blobFromImage(img, 1.0, Size(), Scalar(123.68, 116.78, 103.94), true, false);
Net net = readNetFromTorch(_tf(prefix + "_net" + suffix), isBinary);
ASSERT_FALSE(net.empty());
- net.setPreferableBackend(DNN_BACKEND_DEFAULT);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
Mat inp, outRef;
const string model = findDataFile("dnn/openface_nn4.small2.v1.t7", false);
Net net = readNetFromTorch(model);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(GetParam());
Mat sample = imread(findDataFile("cv/shared/lena.png", false));
ASSERT_TRUE(!net.empty());
}
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(GetParam());
Mat sample = imread(_tf("street.png", false));
const string model = findDataFile(models[i], false);
Net net = readNetFromTorch(model);
+ net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(GetParam());
Mat img = imread(findDataFile("dnn/googlenet_1.png", false));
"{ height | | Preprocess input image by resizing to a specific height. }"
"{ rgb | | Indicate that model works with RGB input images instead BGR ones. }"
"{ backend | 0 | Choose one of computation backends: "
- "0: default C++ backend, "
+ "0: automatically (by default), "
"1: Halide language (http://halide-lang.org/), "
- "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)}"
+ "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
+ "3: OpenCV implementation }"
"{ target | 0 | Choose one of target computation devices: "
- "0: CPU target (by default),"
- "1: OpenCL }";
+ "0: CPU target (by default), "
+ "1: OpenCL, "
+ "2: OpenCL fp16 (half-float precision), "
+ "3: VPU }";
using namespace cv;
using namespace dnn;
import numpy as np
import sys
-backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL)
+backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
parser = argparse.ArgumentParser(description='Use this script to run classification deep learning networks using OpenCV.')
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
help='Indicate that model works with RGB input images instead BGR ones.')
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
help="Choose one of computation backends: "
- "%d: default C++ backend, "
+ "%d: automatically (by default), "
"%d: Halide language (http://halide-lang.org/), "
- "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)" % backends)
+ "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
+ "%d: OpenCV implementation" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
- '%d: OpenCL' % targets)
+ '%d: OpenCL, '
+ '%d: OpenCL fp16 (half-float precision), '
+ '%d: VPU' % targets)
args = parser.parse_args()
# Load names of classes
// run the L channel through the network
Mat inputBlob = blobFromImage(input);
net.setInput(inputBlob);
- Mat result = net.forward("class8_ab");
+ Mat result = net.forward();
// retrieve the calculated a,b channels from the network output
Size siz(result.size[2], result.size[3]);
img_l_rs -= 50 # subtract 50 for mean-centering
net.setInput(cv.dnn.blobFromImage(img_l_rs))
- ab_dec = net.forward('class8_ab')[0,:,:,:].transpose((1,2,0)) # this is our result
+ ab_dec = net.forward()[0,:,:,:].transpose((1,2,0)) # this is our result
(H_out,W_out) = ab_dec.shape[:2]
ab_dec_us = cv.resize(ab_dec, (W_orig, H_orig))
args = parser.parse_args()
net = cv.dnn.readNetFromTorch(args.model)
+net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV);
if args.input:
cap = cv.VideoCapture(args.input)
### Get OpenCV predictions #####################################################
net = cv.dnn.readNetFromTensorflow(args.weights, args.prototxt)
+net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV);
detections = []
for imgName in os.listdir(args.images):
"{ rgb | | Indicate that model works with RGB input images instead BGR ones. }"
"{ thr | .5 | Confidence threshold. }"
"{ backend | 0 | Choose one of computation backends: "
- "0: default C++ backend, "
+ "0: automatically (by default), "
"1: Halide language (http://halide-lang.org/), "
- "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)}"
- "{ target | 0 | Choose one of target computation devices: "
- "0: CPU target (by default),"
- "1: OpenCL }";
+ "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
+ "3: OpenCV implementation }"
+ "{ target | 0 | Choose one of target computation devices: "
+ "0: CPU target (by default), "
+ "1: OpenCL, "
+ "2: OpenCL fp16 (half-float precision), "
+ "3: VPU }";
+
using namespace cv;
using namespace dnn;
import sys
import numpy as np
-backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL)
+backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
parser = argparse.ArgumentParser(description='Use this script to run object detection deep learning networks using OpenCV.')
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--thr', type=float, default=0.5, help='Confidence threshold')
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
help="Choose one of computation backends: "
- "%d: default C++ backend, "
+ "%d: automatically (by default), "
"%d: Halide language (http://halide-lang.org/), "
- "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)" % backends)
+ "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
+ "%d: OpenCV implementation" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
- '%d: OpenCL' % targets)
+ '%d: OpenCL, '
+ '%d: OpenCL fp16 (half-float precision), '
+ '%d: VPU' % targets)
args = parser.parse_args()
# Load names of classes
parser.add_argument('--thr', default=0.1, type=float, help='Threshold value for pose parts heat map')
parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.')
parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.')
-parser.add_argument('--inf_engine', action='store_true',
- help='Enable Intel Inference Engine computational backend. '
- 'Check that plugins folder is in LD_LIBRARY_PATH environment variable')
args = parser.parse_args()
inHeight = args.height
net = cv.dnn.readNetFromCaffe(args.proto, args.model)
-if args.inf_engine:
- net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
cap = cv.VideoCapture(args.input if args.input else 0)
"{ height | | Preprocess input image by resizing to a specific height. }"
"{ rgb | | Indicate that model works with RGB input images instead BGR ones. }"
"{ backend | 0 | Choose one of computation backends: "
- "0: default C++ backend, "
+ "0: automatically (by default), "
"1: Halide language (http://halide-lang.org/), "
- "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)}"
+ "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
+ "3: OpenCV implementation }"
"{ target | 0 | Choose one of target computation devices: "
- "0: CPU target (by default),"
- "1: OpenCL }";
+ "0: CPU target (by default), "
+ "1: OpenCL, "
+ "2: OpenCL fp16 (half-float precision), "
+ "3: VPU }";
using namespace cv;
using namespace dnn;
import numpy as np
import sys
-backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL)
+backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
parser = argparse.ArgumentParser(description='Use this script to run semantic segmentation deep learning networks using OpenCV.')
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
help='Indicate that model works with RGB input images instead BGR ones.')
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
help="Choose one of computation backends: "
- "%d: default C++ backend, "
+ "%d: automatically (by default), "
"%d: Halide language (http://halide-lang.org/), "
- "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)" % backends)
+ "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
+ "%d: OpenCV implementation" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
- '%d: OpenCL' % targets)
+ '%d: OpenCL, '
+ '%d: OpenCL fp16 (half-float precision), '
+ '%d: VPU' % targets)
args = parser.parse_args()
np.random.seed(324)