double cv::gpu::norm(const GpuMat& src1, int normType) { throw_nogpu(); return 0.0; }\r
double cv::gpu::norm(const GpuMat& src1, const GpuMat& src2, int normType) { throw_nogpu(); return 0.0; }\r
\r
+void cv::gpu::flip(const GpuMat& a, GpuMat& b, int flipCode) { throw_nogpu(); }\r
+\r
+void cv::gpu::resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx, double fy, int interpolation) { throw_nogpu(); }\r
+\r
+Scalar cv::gpu::sum(const GpuMat& m) { throw_nogpu(); return Scalar(); }\r
+\r
+void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal) { throw_nogpu(); }\r
+\r
+void cv::gpu::copyConstBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, const Scalar& value) { throw_nogpu(); }\r
+\r
#else /* !defined (HAVE_CUDA) */\r
\r
namespace\r
sz.height = src1.rows;\r
\r
int funcIdx = normType >> 1;\r
- Npp64f retVal[3];\r
+ Scalar retVal;\r
\r
npp_norm_diff_func[funcIdx]((const Npp8u*)src1.ptr<char>(), src1.step, \r
(const Npp8u*)src2.ptr<char>(), src2.step, \r
- sz, retVal);\r
+ sz, retVal.val);\r
\r
return retVal[0];\r
}\r
\r
+void cv::gpu::flip(const GpuMat& src, GpuMat& dst, int flipCode)\r
+{\r
+ CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4);\r
+\r
+ dst.create( src.size(), src.type() );\r
+\r
+ NppiSize sz;\r
+ sz.width = src.cols;\r
+ sz.height = src.rows;\r
+\r
+ if (src.channels() == 1)\r
+ {\r
+ nppiMirror_8u_C1R((const Npp8u*)src.ptr<char>(), src.step, \r
+ (Npp8u*)dst.ptr<char>(), dst.step, sz, \r
+ (flipCode == 0 ? NPP_HORIZONTAL_AXIS : (flipCode > 0 ? NPP_VERTICAL_AXIS : NPP_BOTH_AXIS)));\r
+ }\r
+ else\r
+ {\r
+ nppiMirror_8u_C4R((const Npp8u*)src.ptr<char>(), src.step, \r
+ (Npp8u*)dst.ptr<char>(), dst.step, sz, \r
+ (flipCode == 0 ? NPP_HORIZONTAL_AXIS : (flipCode > 0 ? NPP_VERTICAL_AXIS : NPP_BOTH_AXIS)));\r
+ }\r
+}\r
+\r
+void cv::gpu::resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx, double fy, int interpolation)\r
+{\r
+ static const int npp_inter[] = {NPPI_INTER_NN, NPPI_INTER_LINEAR, NPPI_INTER_CUBIC, 0, NPPI_INTER_LANCZOS};\r
+\r
+ CV_Assert((src.type() == CV_8UC1 || src.type() == CV_8UC4) && \r
+ (interpolation == INTER_NEAREST || interpolation == INTER_LINEAR || interpolation == INTER_CUBIC || interpolation == INTER_LANCZOS4));\r
+\r
+ CV_Assert( src.size().area() > 0 );\r
+ CV_Assert( !(dsize == Size()) || (fx > 0 && fy > 0) );\r
+ if( dsize == Size() )\r
+ {\r
+ dsize = Size(saturate_cast<int>(src.cols * fx), saturate_cast<int>(src.rows * fy));\r
+ }\r
+ else\r
+ {\r
+ fx = (double)dsize.width / src.cols;\r
+ fy = (double)dsize.height / src.rows;\r
+ }\r
+ dst.create(dsize, src.type());\r
+\r
+ NppiSize srcsz;\r
+ srcsz.width = src.cols;\r
+ srcsz.height = src.rows;\r
+ NppiRect srcrect;\r
+ srcrect.x = srcrect.y = 0;\r
+ srcrect.width = src.cols;\r
+ srcrect.height = src.rows;\r
+ NppiSize dstsz;\r
+ dstsz.width = dst.cols;\r
+ dstsz.height = dst.rows;\r
+\r
+ if (src.channels() == 1)\r
+ {\r
+ nppiResize_8u_C1R((const Npp8u*)src.ptr<char>(), srcsz, src.step, srcrect,\r
+ (Npp8u*)dst.ptr<char>(), dst.step, dstsz, fx, fy, npp_inter[interpolation]);\r
+ }\r
+ else\r
+ {\r
+ nppiResize_8u_C4R((const Npp8u*)src.ptr<char>(), srcsz, src.step, srcrect,\r
+ (Npp8u*)dst.ptr<char>(), dst.step, dstsz, fx, fy, npp_inter[interpolation]);\r
+ }\r
+}\r
+\r
+Scalar cv::gpu::sum(const GpuMat& src)\r
+{\r
+ CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4);\r
+ \r
+ Scalar res;\r
+\r
+ NppiSize sz;\r
+ sz.width = src.cols;\r
+ sz.height = src.rows;\r
+\r
+ if (src.channels() == 1)\r
+ {\r
+ nppiSum_8u_C1R((const Npp8u*)src.ptr<char>(), src.step, sz, res.val);\r
+ }\r
+ else\r
+ {\r
+ nppiSum_8u_C4R((const Npp8u*)src.ptr<char>(), src.step, sz, res.val);\r
+ }\r
+\r
+ return res;\r
+}\r
+\r
+void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal) \r
+{\r
+ CV_Assert(src.type() == CV_8UC1);\r
+\r
+ NppiSize sz;\r
+ sz.width = src.cols;\r
+ sz.height = src.rows;\r
+\r
+ Npp8u min_res, max_res;\r
+\r
+ nppiMinMax_8u_C1R((const Npp8u*)src.ptr<char>(), src.step, sz, &min_res, &max_res);\r
+\r
+ if (minVal)\r
+ *minVal = min_res;\r
+\r
+ if (maxVal)\r
+ *maxVal = max_res;\r
+}\r
+\r
+void cv::gpu::copyConstBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, const Scalar& value) \r
+{\r
+ CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4 || src.type() == CV_32SC1);\r
+\r
+ dst.create(src.rows + top + bottom, src.cols + left + right, src.type());\r
+\r
+ NppiSize srcsz;\r
+ srcsz.width = src.cols;\r
+ srcsz.height = src.rows;\r
+ NppiSize dstsz;\r
+ dstsz.width = dst.cols;\r
+ dstsz.height = dst.rows;\r
+\r
+ if (src.depth() == CV_8U)\r
+ {\r
+ if (src.channels() == 1)\r
+ {\r
+ Npp8u nVal = (Npp8u)value[0];\r
+ nppiCopyConstBorder_8u_C1R((const Npp8u*)src.ptr<char>(), src.step, srcsz, \r
+ (Npp8u*)dst.ptr<char>(), dst.step, dstsz, top, left, nVal);\r
+ }\r
+ else\r
+ {\r
+ Npp8u nVal[] = {(Npp8u)value[0], (Npp8u)value[1], (Npp8u)value[2], (Npp8u)value[3]};\r
+ nppiCopyConstBorder_8u_C4R((const Npp8u*)src.ptr<char>(), src.step, srcsz, \r
+ (Npp8u*)dst.ptr<char>(), dst.step, dstsz, top, left, nVal);\r
+ } \r
+ }\r
+ else //if (src.depth() == CV_32S)\r
+ {\r
+ Npp32s nVal = (Npp32s)value[0];\r
+ nppiCopyConstBorder_32s_C1R((const Npp32s*)src.ptr<char>(), src.step, srcsz, \r
+ (Npp32s*)dst.ptr<char>(), dst.step, dstsz, top, left, nVal);\r
+ }\r
+}\r
+\r
#endif /* !defined (HAVE_CUDA) */
\ No newline at end of file
//M*/
#include <iostream>
+#include <cmath>
+#include <limits>
#include "gputest.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
int test8UC1(const Mat& cpu1, const Mat& cpu2);
int test8UC4(const Mat& cpu1, const Mat& cpu2);
+ int test32SC1(const Mat& cpu1, const Mat& cpu2);
int test32FC1(const Mat& cpu1, const Mat& cpu2);
virtual int test(const Mat& cpu1, const Mat& cpu2) = 0;
int CheckNorm(const Mat& m1, const Mat& m2);
+ int CheckNorm(const Scalar& s1, const Scalar& s2);
+ int CheckNorm(double d1, double d2);
};
CV_GpuNppImageArithmTest::CV_GpuNppImageArithmTest(const char* test_name, const char* test_funcs): CvTest(test_name, test_funcs)
return test(imgL_C4, imgR_C4);
}
+int CV_GpuNppImageArithmTest::test32SC1( const Mat& cpu1, const Mat& cpu2 )
+{
+ cv::Mat imgL_C1;
+ cv::Mat imgR_C1;
+ cvtColor(cpu1, imgL_C1, CV_BGR2GRAY);
+ cvtColor(cpu2, imgR_C1, CV_BGR2GRAY);
+
+ imgL_C1.convertTo(imgL_C1, CV_32S);
+ imgR_C1.convertTo(imgR_C1, CV_32S);
+
+ return test(imgL_C1, imgR_C1);
+}
+
int CV_GpuNppImageArithmTest::test32FC1( const Mat& cpu1, const Mat& cpu2 )
{
cv::Mat imgL_C1;
int CV_GpuNppImageArithmTest::CheckNorm(const Mat& m1, const Mat& m2)
{
- double ret = norm(m1, m2);
+ double ret = norm(m1, m2, NORM_INF);
+
+ if (ret < std::numeric_limits<double>::epsilon())
+ {
+ return CvTS::OK;
+ }
+ else
+ {
+ ts->printf(CvTS::LOG, "\nNorm: %f\n", ret);
+ return CvTS::FAIL_GENERIC;
+ }
+}
+
+int CV_GpuNppImageArithmTest::CheckNorm(const Scalar& s1, const Scalar& s2)
+{
+ double ret0 = CheckNorm(s1[0], s2[0]), ret1 = CheckNorm(s1[1], s2[1]), ret2 = CheckNorm(s1[2], s2[2]), ret3 = CheckNorm(s1[3], s2[3]);
- if (ret < 1.0)
+ return (ret0 == CvTS::OK && ret1 == CvTS::OK && ret2 == CvTS::OK && ret3 == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC;
+}
+
+int CV_GpuNppImageArithmTest::CheckNorm(double d1, double d2)
+{
+ double ret = ::fabs(d1 - d2);
+
+ if (ret < std::numeric_limits<double>::epsilon())
{
return CvTS::OK;
}
void CV_GpuNppImageArithmTest::run( int )
{
//load images
- cv::Mat img_l = cv::imread(std::string(ts->get_data_path()) + "stereobm/aloe-L.png");
- cv::Mat img_r = cv::imread(std::string(ts->get_data_path()) + "stereobm/aloe-R.png");
+ //cv::Mat img_l = cv::imread(std::string(ts->get_data_path()) + "stereobm/aloe-L.png");
+ //cv::Mat img_r = cv::imread(std::string(ts->get_data_path()) + "stereobm/aloe-R.png");
+
+ cv::Mat img_l = cv::imread(std::string(ts->get_data_path()) + "stereobp/aloe-L.png");
+ cv::Mat img_r = cv::imread(std::string(ts->get_data_path()) + "stereobp/aloe-R.png");
if (img_l.empty() || img_r.empty())
{
return;
}
+ testResult = test32SC1(img_l, img_r);
+ if (testResult != CvTS::OK)
+ {
+ ts->set_failed_test_info(testResult);
+ return;
+ }
+
testResult = test32FC1(img_l, img_r);
if (testResult != CvTS::OK)
{
int CV_GpuNppImageAddTest::test( const Mat& cpu1, const Mat& cpu2 )
{
+ if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4 && cpu1.type() != CV_32FC1)
+ return CvTS::OK;
+
cv::Mat cpuRes;
cv::add(cpu1, cpu2, cpuRes);
int CV_GpuNppImageSubtractTest::test( const Mat& cpu1, const Mat& cpu2 )
{
+ if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4 && cpu1.type() != CV_32FC1)
+ return CvTS::OK;
+
cv::Mat cpuRes;
cv::subtract(cpu1, cpu2, cpuRes);
int CV_GpuNppImageMultiplyTest::test( const Mat& cpu1, const Mat& cpu2 )
{
+ if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4 && cpu1.type() != CV_32FC1)
+ return CvTS::OK;
+
cv::Mat cpuRes;
cv::multiply(cpu1, cpu2, cpuRes);
int CV_GpuNppImageDivideTest::test( const Mat& cpu1, const Mat& cpu2 )
{
+ if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4 && cpu1.type() != CV_32FC1)
+ return CvTS::OK;
+
cv::Mat cpuRes;
cv::divide(cpu1, cpu2, cpuRes);
int CV_GpuNppImageTransposeTest::test( const Mat& cpu1, const Mat& )
{
- if (!((cpu1.depth() == CV_8U) && cpu1.channels() == 1))
+ if (cpu1.type() != CV_8UC1)
return CvTS::OK;
cv::Mat cpuRes;
int CV_GpuNppImageAbsdiffTest::test( const Mat& cpu1, const Mat& cpu2 )
{
- if (!((cpu1.depth() == CV_8U || cpu1.depth() == CV_32F) && cpu1.channels() == 1))
+ if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_32FC1)
return CvTS::OK;
cv::Mat cpuRes;
int CV_GpuNppImageThresholdTest::test( const Mat& cpu1, const Mat& )
{
- if (!((cpu1.depth() == CV_32F) && cpu1.channels() == 1))
+ if (cpu1.type() != CV_32FC1)
return CvTS::OK;
const double thresh = 0.5;
Scalar gpustddev;
cv::gpu::meanStdDev(gpu1, gpumean, gpustddev);
- return (cpumean == gpumean && cpustddev == gpustddev) ? CvTS::OK : CvTS::FAIL_GENERIC;
+ return (CheckNorm(cpumean, gpumean) == CvTS::OK && CheckNorm(cpustddev, gpustddev) == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC;
}
CV_GpuNppImageMeanStdDevTest CV_GpuNppImageMeanStdDev_test;
double gpu_norm_L1 = cv::gpu::norm(gpu1, gpu2, NORM_L1);
double gpu_norm_L2 = cv::gpu::norm(gpu1, gpu2, NORM_L2);
- return (cpu_norm_inf == gpu_norm_inf && cpu_norm_L1 == gpu_norm_L1 && cpu_norm_L2 == gpu_norm_L2) ? CvTS::OK : CvTS::FAIL_GENERIC;
+ return (CheckNorm(cpu_norm_inf, gpu_norm_inf) == CvTS::OK
+ && CheckNorm(cpu_norm_L1, gpu_norm_L1) == CvTS::OK
+ && CheckNorm(cpu_norm_L2, gpu_norm_L2) == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC;
+}
+
+CV_GpuNppImageNormTest CV_GpuNppImageNorm_test;
+
+////////////////////////////////////////////////////////////////////////////////
+// flip
+class CV_GpuNppImageFlipTest : public CV_GpuNppImageArithmTest
+{
+public:
+ CV_GpuNppImageFlipTest();
+
+protected:
+ virtual int test(const Mat& cpu1, const Mat& cpu2);
+};
+
+CV_GpuNppImageFlipTest::CV_GpuNppImageFlipTest(): CV_GpuNppImageArithmTest( "GPU-NppImageFlip", "flip" )
+{
+}
+
+int CV_GpuNppImageFlipTest::test( const Mat& cpu1, const Mat& )
+{
+ if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4)
+ return CvTS::OK;
+
+ Mat cpux, cpuy, cpub;
+ cv::flip(cpu1, cpux, 0);
+ cv::flip(cpu1, cpuy, 1);
+ cv::flip(cpu1, cpub, -1);
+
+ GpuMat gpu1(cpu1);
+ GpuMat gpux, gpuy, gpub;
+ cv::gpu::flip(gpu1, gpux, 0);
+ cv::gpu::flip(gpu1, gpuy, 1);
+ cv::gpu::flip(gpu1, gpub, -1);
+
+ return (CheckNorm(cpux, gpux) == CvTS::OK &&
+ CheckNorm(cpuy, gpuy) == CvTS::OK &&
+ CheckNorm(cpub, gpub) == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC;
+}
+
+CV_GpuNppImageFlipTest CV_GpuNppImageFlip_test;
+
+////////////////////////////////////////////////////////////////////////////////
+// resize
+class CV_GpuNppImageResizeTest : public CV_GpuNppImageArithmTest
+{
+public:
+ CV_GpuNppImageResizeTest();
+
+protected:
+ virtual int test(const Mat& cpu1, const Mat& cpu2);
+};
+
+CV_GpuNppImageResizeTest::CV_GpuNppImageResizeTest(): CV_GpuNppImageArithmTest( "GPU-NppImageResize", "resize" )
+{
+}
+
+int CV_GpuNppImageResizeTest::test( const Mat& cpu1, const Mat& )
+{
+ if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4)
+ return CvTS::OK;
+
+ Mat cpunn, cpulin, cpucub, cpulanc;
+ cv::resize(cpu1, cpunn, Size(), 0.5, 0.5, INTER_NEAREST);
+ cv::resize(cpu1, cpulin, Size(), 0.5, 0.5, INTER_LINEAR);
+ cv::resize(cpu1, cpucub, Size(), 0.5, 0.5, INTER_CUBIC);
+ cv::resize(cpu1, cpulanc, Size(), 0.5, 0.5, INTER_LANCZOS4);
+
+ GpuMat gpu1(cpu1);
+ GpuMat gpunn, gpulin, gpucub, gpulanc;
+ cv::gpu::resize(gpu1, gpunn, Size(), 0.5, 0.5, INTER_NEAREST);
+ cv::gpu::resize(gpu1, gpulin, Size(), 0.5, 0.5, INTER_LINEAR);
+ cv::gpu::resize(gpu1, gpucub, Size(), 0.5, 0.5, INTER_CUBIC);
+ cv::gpu::resize(gpu1, gpulanc, Size(), 0.5, 0.5, INTER_LANCZOS4);
+
+ int nnres =CheckNorm(cpunn, gpunn);
+ int linres = CheckNorm(cpulin, gpulin);
+ int cubres = CheckNorm(cpucub, gpucub);
+ int lancres = CheckNorm(cpulanc, gpulanc);
+
+ return (nnres == CvTS::OK && linres == CvTS::OK && cubres == CvTS::OK && lancres == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC;
+}
+
+CV_GpuNppImageResizeTest CV_GpuNppImageResize_test;
+
+////////////////////////////////////////////////////////////////////////////////
+// sum
+class CV_GpuNppImageSumTest : public CV_GpuNppImageArithmTest
+{
+public:
+ CV_GpuNppImageSumTest();
+
+protected:
+ virtual int test(const Mat& cpu1, const Mat& cpu2);
+};
+
+CV_GpuNppImageSumTest::CV_GpuNppImageSumTest(): CV_GpuNppImageArithmTest( "GPU-NppImageSum", "sum" )
+{
+}
+
+int CV_GpuNppImageSumTest::test( const Mat& cpu1, const Mat& )
+{
+ if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4)
+ return CvTS::OK;
+
+ Scalar cpures = cv::sum(cpu1);
+
+ GpuMat gpu1(cpu1);
+ Scalar gpures = cv::gpu::sum(gpu1);
+
+ return CheckNorm(cpures, gpures);
+}
+
+CV_GpuNppImageSumTest CV_GpuNppImageSum_test;
+
+////////////////////////////////////////////////////////////////////////////////
+// minNax
+class CV_GpuNppImageMinNaxTest : public CV_GpuNppImageArithmTest
+{
+public:
+ CV_GpuNppImageMinNaxTest();
+
+protected:
+ virtual int test(const Mat& cpu1, const Mat& cpu2);
+};
+
+CV_GpuNppImageMinNaxTest::CV_GpuNppImageMinNaxTest(): CV_GpuNppImageArithmTest( "GPU-NppImageMinNax", "minNax" )
+{
+}
+
+int CV_GpuNppImageMinNaxTest::test( const Mat& cpu1, const Mat& )
+{
+ if (cpu1.type() != CV_8UC1)
+ return CvTS::OK;
+
+ double cpumin, cpumax;
+ cv::minMaxLoc(cpu1, &cpumin, &cpumax);
+
+ GpuMat gpu1(cpu1);
+ double gpumin, gpumax;
+ cv::gpu::minMax(gpu1, &gpumin, &gpumax);
+
+ return (CheckNorm(cpumin, gpumin) == CvTS::OK && CheckNorm(cpumax, gpumax) == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC;
+}
+
+CV_GpuNppImageMinNaxTest CV_GpuNppImageMinNax_test;
+
+////////////////////////////////////////////////////////////////////////////////
+// copyConstBorder
+class CV_GpuNppImageCopyConstBorderTest : public CV_GpuNppImageArithmTest
+{
+public:
+ CV_GpuNppImageCopyConstBorderTest();
+
+protected:
+ virtual int test(const Mat& cpu1, const Mat& cpu2);
+};
+
+CV_GpuNppImageCopyConstBorderTest::CV_GpuNppImageCopyConstBorderTest(): CV_GpuNppImageArithmTest( "GPU-NppImageCopyConstBorder", "copyConstBorder" )
+{
+}
+
+int CV_GpuNppImageCopyConstBorderTest::test( const Mat& cpu1, const Mat& )
+{
+ if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4 && cpu1.type() != CV_32SC1)
+ return CvTS::OK;
+
+ Mat cpudst;
+ cv::copyMakeBorder(cpu1, cpudst, 5, 5, 5, 5, BORDER_CONSTANT);
+
+ GpuMat gpu1(cpu1);
+ GpuMat gpudst;
+ cv::gpu::copyConstBorder(gpu1, gpudst, 5, 5, 5, 5);
+
+ return CheckNorm(cpudst, gpudst);
}
-CV_GpuNppImageNormTest CV_GpuNppImageNorm_test;
\ No newline at end of file
+CV_GpuNppImageCopyConstBorderTest CV_GpuNppImageCopyConstBorder_test;
\ No newline at end of file