#include "opencv2/gpu.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/video.hpp"
+#include "opencv2/legacy.hpp"
#include "opencv2/ts.hpp"
#include "opencv2/ts/gpu_perf.hpp"
(void)dst;
(void)flags;
(void)stream;
- CV_Error(CV_StsNotImplemented, "The library was build without CUBLAS");
+ CV_Error(cv::Error::StsNotImplemented, "The library was build without CUBLAS");
#else
// CUBLAS works with column-major matrices
if (src1.depth() == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
bool tr1 = (flags & GEMM_1_T) != 0;
if (src1.type() == CV_64FC2)
{
if (tr1 || tr2 || tr3)
- CV_Error(CV_StsNotImplemented, "transpose operation doesn't implemented for CV_64FC2 type");
+ CV_Error(cv::Error::StsNotImplemented, "transpose operation doesn't implemented for CV_64FC2 type");
}
Size src1Size = tr1 ? Size(src1.rows, src1.cols) : src1.size();
else // if (src.elemSize() == 8)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
NppStStreamHandler h(stream);
}
else
{
- CV_Error(CV_StsBadArg, "Unknown/unsupported norm type");
+ CV_Error(cv::Error::StsBadArg, "Unknown/unsupported norm type");
}
if (mask.empty())
blendLinearCaller<float>(size.height, size.width, cn, img1, img2, weights1, weights2, result, StreamAccessor::getStream(stream));
break;
default:
- CV_Error(CV_StsUnsupportedFormat, "bad image depth in linear blending function");
+ CV_Error(cv::Error::StsUnsupportedFormat, "bad image depth in linear blending function");
}
}
#include "precomp.hpp"
#include <vector>
#include <iostream>
+#include "opencv2/objdetect/objdetect_c.h"
using namespace cv;
using namespace cv::gpu;
cv::Size getClassifierCvSize() const { return cv::Size(haar.ClassifierSize.width, haar.ClassifierSize.height); }
private:
- static void NCVDebugOutputHandler(const String &msg) { CV_Error(CV_GpuApiCallError, msg.c_str()); }
+ static void NCVDebugOutputHandler(const String &msg) { CV_Error(cv::Error::GpuApiCallError, msg.c_str()); }
NCVStatus load(const String& classifierFile)
{
GpuMat buff = integralBuffer;
// generate integral for scale
- gpu::resize(image, src, level.sFrame, 0, 0, CV_INTER_LINEAR);
+ gpu::resize(image, src, level.sFrame, 0, 0, cv::INTER_LINEAR);
gpu::integralBuffered(src, sint, buff);
// calculate job
func_t func = funcs[code];
if (func == 0)
- CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" );
+ CV_Error( cv::Error::StsBadFlag, "Unknown/unsupported color conversion code" );
func(src, dst, dcn, stream);
}
switch (code)
{
- case CV_BayerBG2GRAY: case CV_BayerGB2GRAY: case CV_BayerRG2GRAY: case CV_BayerGR2GRAY:
- bayer_to_gray(src, dst, code == CV_BayerBG2GRAY || code == CV_BayerGB2GRAY, code == CV_BayerGB2GRAY || code == CV_BayerGR2GRAY, stream);
+ case cv::COLOR_BayerBG2GRAY: case cv::COLOR_BayerGB2GRAY: case cv::COLOR_BayerRG2GRAY: case cv::COLOR_BayerGR2GRAY:
+ bayer_to_gray(src, dst, code == cv::COLOR_BayerBG2GRAY || code == cv::COLOR_BayerGB2GRAY, code == cv::COLOR_BayerGB2GRAY || code == cv::COLOR_BayerGR2GRAY, stream);
break;
- case CV_BayerBG2BGR: case CV_BayerGB2BGR: case CV_BayerRG2BGR: case CV_BayerGR2BGR:
- bayer_to_bgr(src, dst, dcn, code == CV_BayerBG2BGR || code == CV_BayerGB2BGR, code == CV_BayerGB2BGR || code == CV_BayerGR2BGR, stream);
+ case cv::COLOR_BayerBG2BGR: case cv::COLOR_BayerGB2BGR: case cv::COLOR_BayerRG2BGR: case cv::COLOR_BayerGR2BGR:
+ bayer_to_bgr(src, dst, dcn, code == cv::COLOR_BayerBG2BGR || code == cv::COLOR_BayerGB2BGR, code == cv::COLOR_BayerGB2BGR || code == cv::COLOR_BayerGR2BGR, stream);
break;
case COLOR_BayerBG2BGR_MHT: case COLOR_BayerGB2BGR_MHT: case COLOR_BayerRG2BGR_MHT: case COLOR_BayerGR2BGR_MHT:
}
default:
- CV_Error( CV_StsBadFlag, "Unknown / unsupported color conversion code" );
+ CV_Error( cv::Error::StsBadFlag, "Unknown / unsupported color conversion code" );
}
}
(void)dst;
(void)forward;
(void)stream;
- CV_Error( CV_StsNotImplemented, "This function works only with CUDA 5.0 or higher" );
+ CV_Error( cv::Error::StsNotImplemented, "This function works only with CUDA 5.0 or higher" );
#else
typedef NppStatus (*func_t)(const Npp8u* pSrc, int nSrcStep, Npp8u* pDst, int nDstStep, NppiSize oSizeROI);
typedef NppStatus (*func_inplace_t)(Npp8u* pSrcDst, int nSrcDstStep, NppiSize oSizeROI);
CV_Assert(src.type() == CV_8UC3);
lab.create(src.size(), src.type());
- cv::gpu::cvtColor(src, lab, CV_BGR2Lab, 0, s);
+ cv::gpu::cvtColor(src, lab, cv::COLOR_BGR2Lab, 0, s);
l.create(src.size(), CV_8U);
ab.create(src.size(), CV_8UC2);
simpleMethod(ab, ab, h_color, search_window, block_window, s);
cudev::imgproc::fnlm_merge_channels(l, ab, lab, StreamAccessor::getStream(s));
- cv::gpu::cvtColor(lab, dst, CV_Lab2BGR, 0, s);
+ cv::gpu::cvtColor(lab, dst, cv::COLOR_Lab2BGR, 0, s);
}
if (sdepth == CV_64F || ddepth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src1.size(), CV_MAKE_TYPE(ddepth, cn));
const func_t func = funcs[sdepth][ddepth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src1_, src2_, dst_, mask, stream);
}
if (sdepth == CV_64F || ddepth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), CV_MAKE_TYPE(ddepth, cn));
const func_t func = funcs[sdepth][ddepth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src, sc.val[0], dst, mask, stream);
}
if (sdepth == CV_64F || ddepth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src1.size(), CV_MAKE_TYPE(ddepth, cn));
const func_t func = funcs[sdepth][ddepth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src1_, src2_, dst_, mask, stream);
}
if (sdepth == CV_64F || ddepth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), CV_MAKE_TYPE(ddepth, cn));
const func_t func = funcs[sdepth][ddepth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src, sc.val[0], dst, mask, stream);
}
if (sdepth == CV_64F || ddepth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src1.size(), CV_MAKE_TYPE(ddepth, cn));
const func_t func = funcs[sdepth][ddepth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src1_, src2_, dst_, scale, stream);
}
if (sdepth == CV_64F || ddepth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), CV_MAKE_TYPE(ddepth, cn));
const func_t func = funcs[sdepth][ddepth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src, nsc.val[0], dst, stream);
}
if (sdepth == CV_64F || ddepth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src1.size(), CV_MAKE_TYPE(ddepth, cn));
const func_t func = funcs[sdepth][ddepth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src1_, src2_, dst_, scale, stream);
}
if (sdepth == CV_64F || ddepth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), CV_MAKE_TYPE(ddepth, cn));
const func_t func = funcs[sdepth][ddepth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src, nsc.val[0], dst, stream);
}
if (sdepth == CV_64F || ddepth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), CV_MAKE_TYPE(ddepth, cn));
const func_t func = funcs[sdepth][ddepth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src, scale, dst, stream);
}
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src1.size(), src1.type());
const func_t func = funcs[depth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src1_, src2_, dst_, stream);
}
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src1.size(), src1.type());
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), src.type());
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), src.type());
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), src.type());
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), src.type());
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), src.type());
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src1.size(), CV_MAKE_TYPE(CV_8U, cn));
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), CV_MAKE_TYPE(CV_8U, cn));
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src1.size(), src1.type());
const func_t func = funcs[depth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src1_, src2_, dst_, stream);
}
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src1.size(), src1.type());
const func_t func = funcs[depth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src1_, src2_, dst_, stream);
}
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), src.type());
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), src.type());
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), src.type());
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src.size(), src.type());
if (sdepth1 == CV_64F || sdepth2 == CV_64F || ddepth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
dst.create(src1.size(), CV_MAKE_TYPE(ddepth, cn));
const func_t func = funcs[sdepth1][sdepth2][ddepth];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src1_, alpha, src2_, beta, gamma, dst_, StreamAccessor::getStream(stream));
}
stream_ = create_InputMediaStream_FFMPEG_p(fname.c_str(), &codec, &chroma_format, &width, &height);
if (!stream_)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported video source");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported video source");
format_.codec = static_cast<VideoReader_GPU::Codec>(codec);
format_.chromaFormat = static_cast<VideoReader_GPU::ChromaFormat>(chroma_format);
#else
#include "fgd_bgfg_common.hpp"
+#include "opencv2/imgproc/imgproc_c.h"
namespace
{
erode(src, buf2, kernel, buf1, anchor, iterations, stream);
dilate(buf2, dst, kernel, buf1, anchor, iterations, stream);
break;
- case CV_MOP_CLOSE:
+ case MORPH_CLOSE:
dilate(src, buf2, kernel, buf1, anchor, iterations, stream);
erode(buf2, dst, kernel, buf1, anchor, iterations, stream);
break;
- case CV_MOP_GRADIENT:
+ case MORPH_GRADIENT:
erode(src, buf2, kernel, buf1, anchor, iterations, stream);
dilate(src, dst, kernel, buf1, anchor, iterations, stream);
subtract(dst, buf2, dst, GpuMat(), -1, stream);
break;
- case CV_MOP_TOPHAT:
+ case MORPH_TOPHAT:
erode(src, dst, kernel, buf1, anchor, iterations, stream);
dilate(dst, buf2, kernel, buf1, anchor, iterations, stream);
subtract(src, buf2, dst, GpuMat(), -1, stream);
break;
- case CV_MOP_BLACKHAT:
+ case MORPH_BLACKHAT:
dilate(src, dst, kernel, buf1, anchor, iterations, stream);
erode(dst, buf2, kernel, buf1, anchor, iterations, stream);
subtract(buf2, src, dst, GpuMat(), -1, stream);
break;
default:
- CV_Error(CV_StsBadArg, "unknown morphological operation");
+ CV_Error(cv::Error::StsBadArg, "unknown morphological operation");
}
}
DeviceInfo devInfo;
int cc = devInfo.majorVersion() * 10 + devInfo.minorVersion();
if (ksize > 16 && cc < 20)
- CV_Error(CV_StsNotImplemented, "column linear filter doesn't implemented for kernel size > 16 for device with compute capabilities less than 2.0");
+ CV_Error(cv::Error::StsNotImplemented, "column linear filter doesn't implemented for kernel size > 16 for device with compute capabilities less than 2.0");
func(src, dst, kernel.ptr<float>(), ksize, anchor, brd_type, cc, StreamAccessor::getStream(s));
}
CV_Assert(!mask.empty() && mask.type() == CV_8U);
if (!deviceSupports(SHARED_ATOMICS))
- CV_Error(CV_StsNotImplemented, "The device doesn't support shared atomics and communicative synchronization!");
+ CV_Error(cv::Error::StsNotImplemented, "The device doesn't support shared atomics and communicative synchronization!");
components.create(mask.size(), CV_32SC1);
win_stride.height, win_stride.width, img.rows, img.cols, block_hists.ptr<float>(), descriptors);
break;
default:
- CV_Error(CV_StsBadArg, "Unknown descriptor format");
+ CV_Error(cv::Error::StsBadArg, "Unknown descriptor format");
}
}
Size scaled_win_size(cvRound(win_size.width * scale), cvRound(win_size.height * scale));
for (size_t j = 0; j < locations.size(); j++)
- all_candidates.push_back(Rect(Point2d((CvPoint)locations[j]) * scale, scaled_win_size));
+ all_candidates.push_back(Rect(Point2d(locations[j]) * scale, scaled_win_size));
}
found_locations.assign(all_candidates.begin(), all_candidates.end());
detect(smaller_img, locations, hit_threshold, win_stride, padding);
Size scaled_win_size(cvRound(win_size.width * scale), cvRound(win_size.height * scale));
for (size_t j = 0; j < locations.size(); j++)
- all_candidates.push_back(Rect(Point2d((CvPoint)locations[j]) * scale, scaled_win_size));
+ all_candidates.push_back(Rect(Point2d(locations[j]) * scale, scaled_win_size));
}
found_locations.assign(all_candidates.begin(), all_candidates.end());
CV_Assert(src.type() == CV_8UC1);
CV_Assert(src.cols < std::numeric_limits<unsigned short>::max());
CV_Assert(src.rows < std::numeric_limits<unsigned short>::max());
- CV_Assert(method == CV_HOUGH_GRADIENT);
+ CV_Assert(method == cv::HOUGH_GRADIENT);
CV_Assert(dp > 0);
CV_Assert(minRadius > 0 && maxRadius > minRadius);
CV_Assert(cannyThreshold > 0);
return new GHT_Guil_Full();
}
- CV_Error(CV_StsBadArg, "Unsupported method");
+ CV_Error(cv::Error::StsBadArg, "Unsupported method");
return Ptr<GeneralizedHough_GPU>();
}
using namespace ::cv::gpu::cudev::imgproc;
if( src.empty() )
- CV_Error( CV_StsBadArg, "The input image is empty" );
+ CV_Error( cv::Error::StsBadArg, "The input image is empty" );
if( src.depth() != CV_8U || src.channels() != 4 )
- CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
+ CV_Error( cv::Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
dst.create( src.size(), CV_8UC4 );
using namespace ::cv::gpu::cudev::imgproc;
if( src.empty() )
- CV_Error( CV_StsBadArg, "The input image is empty" );
+ CV_Error( cv::Error::StsBadArg, "The input image is empty" );
if( src.depth() != CV_8U || src.channels() != 4 )
- CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
+ CV_Error( cv::Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
dstr.create( src.size(), CV_8UC4 );
dstsp.create( src.size(), CV_16SC2 );
CV_Assert(src.type() == CV_8UC1);
if (!deviceSupports(SHARED_ATOMICS))
- CV_Error(CV_StsNotImplemented, "The device doesn't support shared atomics");
+ CV_Error(cv::Error::StsNotImplemented, "The device doesn't support shared atomics");
if( low_thresh > high_thresh )
std::swap( low_thresh, high_thresh);
{
switch (method)
{
- case CV_TM_CCORR:
+ case cv::TM_CCORR:
if (depth == CV_32F) return 250;
if (depth == CV_8U) return 300;
break;
- case CV_TM_SQDIFF:
+ case cv::TM_SQDIFF:
if (depth == CV_8U) return 300;
break;
}
- CV_Error(CV_StsBadArg, "getTemplateThreshold: unsupported match template mode");
+ CV_Error(cv::Error::StsBadArg, "getTemplateThreshold: unsupported match template mode");
return 0;
}
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);
- if (templ.size().area() < getTemplateThreshold(CV_TM_CCORR, CV_32F))
+ if (templ.size().area() < getTemplateThreshold(cv::TM_CCORR, CV_32F))
{
matchTemplateNaive_CCORR_32F(image, templ, result, image.channels(), StreamAccessor::getStream(stream));
return;
void matchTemplate_CCORR_8U(
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
- if (templ.size().area() < getTemplateThreshold(CV_TM_CCORR, CV_8U))
+ if (templ.size().area() < getTemplateThreshold(cv::TM_CCORR, CV_8U))
{
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);
matchTemplateNaive_CCORR_8U(image, templ, result, image.channels(), StreamAccessor::getStream(stream));
void matchTemplate_SQDIFF_8U(
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
- if (templ.size().area() < getTemplateThreshold(CV_TM_SQDIFF, CV_8U))
+ if (templ.size().area() < getTemplateThreshold(cv::TM_SQDIFF, CV_8U))
{
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);
matchTemplateNaive_SQDIFF_8U(image, templ, result, image.channels(), StreamAccessor::getStream(stream));
(unsigned int)templ_sum[3], result, StreamAccessor::getStream(stream));
break;
default:
- CV_Error(CV_StsBadArg, "matchTemplate: unsupported number of channels");
+ CV_Error(cv::Error::StsBadArg, "matchTemplate: unsupported number of channels");
}
}
}
result, StreamAccessor::getStream(stream));
break;
default:
- CV_Error(CV_StsBadArg, "matchTemplate: unsupported number of channels");
+ CV_Error(cv::Error::StsBadArg, "matchTemplate: unsupported number of channels");
}
}
}
{
case CV_8U: callers = callers8U; break;
case CV_32F: callers = callers32F; break;
- default: CV_Error(CV_StsBadArg, "matchTemplate: unsupported data type");
+ default: CV_Error(cv::Error::StsBadArg, "matchTemplate: unsupported data type");
}
Caller caller = callers[method];
CV_Assert(src.type() == CV_8UC1);
if (!deviceSupports(FEATURE_SET_COMPUTE_13))
- CV_Error(CV_StsNotImplemented, "Not sufficient compute capebility");
+ CV_Error(cv::Error::StsNotImplemented, "Not sufficient compute capebility");
NppiSize sz;
sz.width = src.cols;
if (src.depth() == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
Size buf_size;
if (src.depth() == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
Size buf_size;
if (src.depth() == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
Size buf_size;
if (src.depth() == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
Size buf_size;
if (src.depth() == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
Size valbuf_size, locbuf_size;
if (src.depth() == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
Size buf_size;
{
CV_Assert( src.channels() <= 4 );
CV_Assert( dim == 0 || dim == 1 );
- CV_Assert( reduceOp == CV_REDUCE_SUM || reduceOp == CV_REDUCE_AVG || reduceOp == CV_REDUCE_MAX || reduceOp == CV_REDUCE_MIN );
+ CV_Assert( reduceOp == REDUCE_SUM || reduceOp == REDUCE_AVG || reduceOp == REDUCE_MAX || reduceOp == REDUCE_MIN );
if (dtype < 0)
dtype = src.depth();
const func_t func = funcs[src.depth()][dst.depth()];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of input and output array formats");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of input and output array formats");
func(src.reshape(1), dst.data, reduceOp, StreamAccessor::getStream(stream));
}
const func_t func = funcs[src.depth()][dst.depth()];
if (!func)
- CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of input and output array formats");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of input and output array formats");
func(src, dst.data, src.channels(), reduceOp, StreamAccessor::getStream(stream));
}
namespace
{
- static void outputHandler(const String &msg) { CV_Error(CV_GpuApiCallError, msg.c_str()); }
+ static void outputHandler(const String &msg) { CV_Error(cv::Error::GpuApiCallError, msg.c_str()); }
}
void cv::gpu::BroxOpticalFlow::operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& s)
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
bool single_channel_only = true;
if (depth == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
- CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
+ CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
if (num_channels == 1)
bool cv::gpu::VideoReader_GPU::Impl::grab(GpuMat& frame)
{
if (videoSource_->hasError() || videoParser_->hasError())
- CV_Error(CV_StsUnsupportedFormat, "Unsupported video source");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported video source");
if (!videoSource_->isStarted() || frameQueue_->isEndOfDecode())
return false;
break;
if (videoSource_->hasError() || videoParser_->hasError())
- CV_Error(CV_StsUnsupportedFormat, "Unsupported video source");
+ CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported video source");
if (frameQueue_->isEndOfDecode())
return false;
//M*/
#include "test_precomp.hpp"
+#include "opencv2/legacy.hpp"
#ifdef HAVE_CUDA
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else if (type == CV_64FC2 && flags != 0)
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsNotImplemented, e.code);
+ ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsNotImplemented, e.code);
+ ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
//////////////////////////////////////////////////////////////////////////////
// Reduce
-CV_ENUM(ReduceCode, CV_REDUCE_SUM, CV_REDUCE_AVG, CV_REDUCE_MAX, CV_REDUCE_MIN)
-#define ALL_REDUCE_CODES testing::Values(ReduceCode(CV_REDUCE_SUM), ReduceCode(CV_REDUCE_AVG), ReduceCode(CV_REDUCE_MAX), ReduceCode(CV_REDUCE_MIN))
+CV_ENUM(ReduceCode, REDUCE_SUM, REDUCE_AVG, REDUCE_MAX, REDUCE_MIN)
PARAM_TEST_CASE(Reduce, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, ReduceCode, UseRoi)
{
type = CV_MAKE_TYPE(depth, channels);
- if (reduceOp == CV_REDUCE_MAX || reduceOp == CV_REDUCE_MIN)
+ if (reduceOp == cv::REDUCE_MAX || reduceOp == cv::REDUCE_MIN)
dst_depth = depth;
- else if (reduceOp == CV_REDUCE_SUM)
+ else if (reduceOp == cv::REDUCE_SUM)
dst_depth = depth == CV_8U ? CV_32S : depth < CV_64F ? CV_32F : depth;
else
dst_depth = depth < CV_32F ? CV_32F : depth;
MatDepth(CV_32F),
MatDepth(CV_64F)),
ALL_CHANNELS,
- ALL_REDUCE_CODES,
+ ReduceCode::all(),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////////////////////////////
ASSERT_FALSE(bgr.empty());
cv::Mat gray;
- cv::cvtColor(bgr, gray, CV_BGR2GRAY);
+ cv::cvtColor(bgr, gray, cv::COLOR_BGR2GRAY);
GpuMat dbgr, dgray;
cv::gpu::nonLocalMeans(GpuMat(bgr), dbgr, 20);
ASSERT_FALSE(bgr.empty());
cv::Mat gray;
- cv::cvtColor(bgr, gray, CV_BGR2GRAY);
+ cv::cvtColor(bgr, gray, cv::COLOR_BGR2GRAY);
GpuMat dbgr, dgray;
cv::gpu::FastNonLocalMeansDenoising fnlmd;
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsNotImplemented, e.code);
+ ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsNotImplemented, e.code);
+ ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsNotImplemented, e.code);
+ ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsNotImplemented, e.code);
+ ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsNotImplemented, e.code);
+ ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
drawCircles(src, circles_gold, true);
cv::gpu::GpuMat d_circles;
- cv::gpu::HoughCircles(loadMat(src, useRoi), d_circles, CV_HOUGH_GRADIENT, dp, minDist, cannyThreshold, votesThreshold, minRadius, maxRadius);
+ cv::gpu::HoughCircles(loadMat(src, useRoi), d_circles, cv::HOUGH_GRADIENT, dp, minDist, cannyThreshold, votesThreshold, minRadius, maxRadius);
std::vector<cv::Vec3f> circles;
cv::gpu::HoughCirclesDownload(d_circles, circles);
ASSERT_FALSE(img.empty());
cv::Mat hsv;
- cv::cvtColor(img, hsv, CV_BGR2HSV);
+ cv::cvtColor(img, hsv, cv::COLOR_BGR2HSV);
int hbins = 30;
float hranges[] = {0.0f, 180.0f};
}
catch (const cv::Exception& e)
{
- ASSERT_EQ(CV_StsNotImplemented, e.code);
+ ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
}
}
else
cv::Mat dst(d_dst);
cv::Mat result;
- cv::cvtColor(dst, result, CV_BGRA2BGR);
+ cv::cvtColor(dst, result, cv::COLOR_BGRA2BGR);
EXPECT_MAT_NEAR(img_template, result, 0.0);
}
cv::gpu::meanShiftSegmentation(loadMat(img), dst, 10, 10, minsize);
cv::Mat dst_rgb;
- cv::cvtColor(dst, dst_rgb, CV_BGRA2BGR);
+ cv::cvtColor(dst, dst_rgb, cv::COLOR_BGRA2BGR);
EXPECT_MAT_SIMILAR(dst_gold, dst_rgb, 1e-3);
}
ASSERT_FALSE(pattern.empty());
cv::gpu::GpuMat d_dst;
- cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, CV_TM_CCOEFF_NORMED);
+ cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, cv::TM_CCOEFF_NORMED);
cv::Mat dst(d_dst);
cv::minMaxLoc(dst, &minVal, &maxVal, &minLoc, &maxLoc);
cv::Mat dstGold;
- cv::matchTemplate(image, pattern, dstGold, CV_TM_CCOEFF_NORMED);
+ cv::matchTemplate(image, pattern, dstGold, cv::TM_CCOEFF_NORMED);
double minValGold, maxValGold;
cv::Point minLocGold, maxLocGold;
ASSERT_FALSE(templ.empty());
cv::gpu::GpuMat d_result;
- cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, CV_TM_SQDIFF_NORMED);
+ cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, cv::TM_SQDIFF_NORMED);
cv::Mat result(d_result);
ASSERT_FALSE(templ.empty());
cv::gpu::GpuMat d_result;
- cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, CV_TM_SQDIFF);
+ cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, cv::TM_SQDIFF);
cv::Mat result(d_result);
GPU_TEST_P(Labeling, DISABLED_ConnectedComponents)
{
cv::Mat image;
- cvtColor(loat_image(), image, CV_BGR2GRAY);
+ cvtColor(loat_image(), image, cv::COLOR_BGR2GRAY);
- cv::threshold(image, image, 150, 255, CV_THRESH_BINARY);
+ cv::threshold(image, image, 150, 255, cv::THRESH_BINARY);
ASSERT_TRUE(image.type() == CV_8UC1);
// Test on color image
cv::Mat img;
- cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
testDetect(img);
// Test on gray image
- cv::cvtColor(img_rgb, img, CV_BGR2GRAY);
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2GRAY);
testDetect(img);
f.close();
// Convert to C4
cv::Mat img;
- cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
cv::gpu::GpuMat d_img(img);
img_rgb = readImage("hog/positive1.png");
ASSERT_TRUE(!img_rgb.empty());
- cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
// Everything is fine with interpolation for left top subimage
ASSERT_EQ(0.0, cv::norm((cv::Mat)block_hists, (cv::Mat)descriptors.rowRange(0, 1)));
img_rgb = readImage("hog/positive2.png");
ASSERT_TRUE(!img_rgb.empty());
- cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(1, 2)));
img_rgb = readImage("hog/negative1.png");
ASSERT_TRUE(!img_rgb.empty());
- cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(2, 3)));
img_rgb = readImage("hog/negative2.png");
ASSERT_TRUE(!img_rgb.empty());
- cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(3, 4)));
img_rgb = readImage("hog/positive3.png");
ASSERT_TRUE(!img_rgb.empty());
- cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(4, 5)));
img_rgb = readImage("hog/negative3.png");
ASSERT_TRUE(!img_rgb.empty());
- cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(5, 6)));
}
cv::Mat image = cv::imread(imagePath);
image = image.colRange(0, image.cols/2);
cv::Mat grey;
- cvtColor(image, grey, CV_BGR2GRAY);
+ cvtColor(image, grey, cv::COLOR_BGR2GRAY);
ASSERT_FALSE(image.empty());
std::vector<cv::Rect> rects;
std::vector<cv::Rect>::iterator it = rects.begin();
for (; it != rects.end(); ++it)
- cv::rectangle(markedImage, *it, CV_RGB(0, 0, 255));
+ cv::rectangle(markedImage, *it, cv::Scalar(255, 0, 0));
cv::gpu::CascadeClassifier_GPU gpuClassifier;
ASSERT_TRUE(gpuClassifier.load(classifierXmlPath));
//M*/
#include "test_precomp.hpp"
+#include "opencv2/legacy.hpp"
#ifdef HAVE_CUDA
void cv::ocl::BruteForceMatcher_OCL_base::match(const oclMat &query, const oclMat &train, std::vector<DMatch> &matches, const oclMat &mask)
{
- assert(mask.empty()); // mask is not supported at the moment
+ CV_Assert(mask.empty()); // mask is not supported at the moment
oclMat trainIdx, distance;
matchSingle(query, train, trainIdx, distance, mask);
matchDownload(trainIdx, distance, matches);
//M*/
#include "opencv2/core/cuda_devptrs.hpp"
-
-namespace cv { namespace softcascade { namespace internal {
-void error(const char *error_string, const char *file, const int line, const char *func);
-}}}
-#if defined(__GNUC__)
- #define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
-#else /* defined(__CUDACC__) || defined(__MSVC__) */
- #define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
-#endif
-
-static inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "")
-{
- if (cudaSuccess != err) cv::softcascade::internal::error(cudaGetErrorString(err), file, line, func);
-}
-
-__host__ __device__ __forceinline__ int divUp(int total, int grain)
-{
- return (total + grain - 1) / grain;
-}
+#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace softcascade { namespace cudev
{
{
const dim3 block(32, 8);
- const dim3 grid(divUp(integral.cols, block.x), 1);
+ const dim3 grid(cv::gpu::cudev::divUp(integral.cols, block.x), 1);
shfl_integral_vertical<<<grid, block, 0, stream>>>(integral);
cudaSafeCall( cudaGetLastError() );
{
const dim3 block(32, 8);
- const dim3 grid(divUp(integral.cols, block.x), 1);
+ const dim3 grid(cv::gpu::cudev::divUp(integral.cols, block.x), 1);
shfl_integral_vertical<<<grid, block, 0, stream>>>((cv::gpu::PtrStepSz<unsigned int>)buffer, integral);
cudaSafeCall( cudaGetLastError() );
void transform(const cv::gpu::PtrStepSz<uchar3>& bgr, cv::gpu::PtrStepSzb gray)
{
const dim3 block(32, 8);
- const dim3 grid(divUp(bgr.cols, block.x), divUp(bgr.rows, block.y));
+ const dim3 grid(cv::gpu::cudev::divUp(bgr.cols, block.x), cv::gpu::cudev::divUp(bgr.rows, block.y));
device_transform<<<grid, block>>>(bgr, gray);
cudaSafeCall(cudaDeviceSynchronize());
}
-}}}
\ No newline at end of file
+}}}
#include <cuda_invoker.hpp>
#include <float.h>
#include <stdio.h>
-
-namespace cv { namespace softcascade { namespace internal {
-void error(const char *error_string, const char *file, const int line, const char *func);
-}}}
-#if defined(__GNUC__)
- #define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
-#else /* defined(__CUDACC__) || defined(__MSVC__) */
- #define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
-#endif
-
-static inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "")
-{
- if (cudaSuccess != err) cv::softcascade::internal::error(cudaGetErrorString(err), file, line, func);
-}
-
-#ifndef CV_PI
- #define CV_PI 3.1415926535897932384626433832795
-#endif
+#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace softcascade { namespace cudev {
else
res.copyTo(inner);
}
- else {CV_Error(CV_GpuNotSupported, ": CC 3.x required.");}
+ else {CV_Error(cv::Error::GpuNotSupported, ": CC 3.x required.");}
}
}
if (i < 0 || i >= getCudaEnabledDeviceCount())
{
msg << "Incorrect device number - " << i;
- CV_Error(CV_StsBadArg, msg.str());
+ CV_Error(cv::Error::StsBadArg, msg.str());
}
DeviceInfo info(i);
if (!info.isCompatible())
{
msg << "Device " << i << " [" << info.name() << "] is NOT compatible with current GPU module build";
- CV_Error(CV_StsBadArg, msg.str());
+ CV_Error(cv::Error::StsBadArg, msg.str());
}
devices_.push_back(info);
Ptr<FrameSource> cv::superres::createFrameSource_Video(const String& fileName)
{
(void) fileName;
- CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
+ CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<FrameSource>();
}
Ptr<FrameSource> cv::superres::createFrameSource_Camera(int deviceId)
{
(void) deviceId;
- CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
+ CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<FrameSource>();
}
Ptr<FrameSource> cv::superres::createFrameSource_Video_GPU(const String& fileName)
{
(void) fileName;
- CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
+ CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<FrameSource>();
}
#ifdef HAVE_OPENCV_GPU
gpu::cvtColor(src.getGpuMat(), dst.getGpuMatRef(), code, cn);
#else
- CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
+ CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
#endif
break;
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_GPU()
{
- CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
+ CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_GPU()
{
- CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
+ CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_GPU()
{
- CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
+ CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_GPU()
{
- CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
+ CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
{
metrics.terminationReason = performance_metrics::TERM_EXCEPTION;
#ifdef HAVE_CUDA
- if (e.code == CV_GpuApiCallError)
+ if (e.code == cv::Error::GpuApiCallError)
cv::gpu::resetDevice();
#endif
FAIL() << "Expected: PerfTestBody() doesn't throw an exception.\n Actual: it throws cv::Exception:\n " << e.what();
#include <opencv2/core/core_c.h>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/legacy/compat.hpp>
+#include <opencv2/calib3d/calib3d_c.h>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <cstdio>
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/objdetect/objdetect_c.h"
#ifdef HAVE_CUDA
#include "NCVHaarObjectDetection.hpp"
Point org;
org.x = 1;
org.y = 3 * fontSize.height * (lineOffsY + 1) / 2;
- putText(img, ss, org, fontFace, fontScale, CV_RGB(0,0,0), 5*fontThickness/2, 16);
+ putText(img, ss, org, fontFace, fontScale, Scalar(0,0,0), 5*fontThickness/2, 16);
putText(img, ss, org, fontFace, fontScale, fontColor, fontThickness, 16);
}
static void displayState(Mat &canvas, bool bHelp, bool bGpu, bool bLargestFace, bool bFilter, double fps)
{
- Scalar fontColorRed = CV_RGB(255,0,0);
- Scalar fontColorNV = CV_RGB(118,185,0);
+ Scalar fontColorRed(0,0,255);
+ Scalar fontColorNV(0,185,118);
ostringstream ss;
ss << "FPS = " << setprecision(1) << fixed << fps;