--- /dev/null
- EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "test_precomp.hpp"
+
+#ifdef HAVE_CUDA
+
+using namespace cvtest;
+
+////////////////////////////////////////////////////////////////////////////////
+// SetTo
+
+PARAM_TEST_CASE(SetTo, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int type;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ type = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(SetTo, Zero)
+{
+ cv::Scalar zero = cv::Scalar::all(0);
+
+ cv::cuda::GpuMat mat = createMat(size, type, useRoi);
+ mat.setTo(zero);
+
+ EXPECT_MAT_NEAR(cv::Mat::zeros(size, type), mat, 0.0);
+}
+
+CUDA_TEST_P(SetTo, SameVal)
+{
+ cv::Scalar val = cv::Scalar::all(randomDouble(0.0, 255.0));
+
+ if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat mat = createMat(size, type, useRoi);
+ mat.setTo(val);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat mat = createMat(size, type, useRoi);
+ mat.setTo(val);
+
+ EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
+ }
+}
+
+CUDA_TEST_P(SetTo, DifferentVal)
+{
+ cv::Scalar val = randomScalar(0.0, 255.0);
+
+ if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat mat = createMat(size, type, useRoi);
+ mat.setTo(val);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat mat = createMat(size, type, useRoi);
+ mat.setTo(val);
+
+ EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
+ }
+}
+
+CUDA_TEST_P(SetTo, Masked)
+{
+ cv::Scalar val = randomScalar(0.0, 255.0);
+ cv::Mat mat_gold = randomMat(size, type);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
+
+ if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat mat = createMat(size, type, useRoi);
+ mat.setTo(val, loadMat(mask));
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat mat = loadMat(mat_gold, useRoi);
+ mat.setTo(val, loadMat(mask, useRoi));
+
+ mat_gold.setTo(val, mask);
+
+ EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_GpuMat, SetTo, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_TYPES,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// CopyTo
+
+PARAM_TEST_CASE(CopyTo, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int type;
+ bool useRoi;
+
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ type = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(CopyTo, WithOutMask)
+{
+ cv::Mat src = randomMat(size, type);
+
+ cv::cuda::GpuMat d_src = loadMat(src, useRoi);
+ cv::cuda::GpuMat dst = createMat(size, type, useRoi);
+ d_src.copyTo(dst);
+
+ EXPECT_MAT_NEAR(src, dst, 0.0);
+}
+
+CUDA_TEST_P(CopyTo, Masked)
+{
+ cv::Mat src = randomMat(size, type);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
+
+ if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat d_src = loadMat(src);
+ cv::cuda::GpuMat dst;
+ d_src.copyTo(dst, loadMat(mask, useRoi));
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat d_src = loadMat(src, useRoi);
+ cv::cuda::GpuMat dst = loadMat(cv::Mat::zeros(size, type), useRoi);
+ d_src.copyTo(dst, loadMat(mask, useRoi));
+
+ cv::Mat dst_gold = cv::Mat::zeros(size, type);
+ src.copyTo(dst_gold, mask);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_GpuMat, CopyTo, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_TYPES,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// ConvertTo
+
+PARAM_TEST_CASE(ConvertTo, cv::cuda::DeviceInfo, cv::Size, MatDepth, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth1;
+ int depth2;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth1 = GET_PARAM(2);
+ depth2 = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(ConvertTo, WithOutScaling)
+{
+ cv::Mat src = randomMat(size, depth1);
+
+ if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat d_src = loadMat(src);
+ cv::cuda::GpuMat dst;
+ d_src.convertTo(dst, depth2);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat d_src = loadMat(src, useRoi);
+ cv::cuda::GpuMat dst = createMat(size, depth2, useRoi);
+ d_src.convertTo(dst, depth2);
+
+ cv::Mat dst_gold;
+ src.convertTo(dst_gold, depth2);
+
++ EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
+ }
+}
+
+CUDA_TEST_P(ConvertTo, WithScaling)
+{
+ cv::Mat src = randomMat(size, depth1);
+ double a = randomDouble(0.0, 1.0);
+ double b = randomDouble(-10.0, 10.0);
+
+ if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat d_src = loadMat(src);
+ cv::cuda::GpuMat dst;
+ d_src.convertTo(dst, depth2, a, b);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat d_src = loadMat(src, useRoi);
+ cv::cuda::GpuMat dst = createMat(size, depth2, useRoi);
+ d_src.convertTo(dst, depth2, a, b);
+
+ cv::Mat dst_gold;
+ src.convertTo(dst_gold, depth2, a, b);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth2 < CV_32F ? 1.0 : 1e-4);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_GpuMat, ConvertTo, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// ensureSizeIsEnough
+
+struct EnsureSizeIsEnough : testing::TestWithParam<cv::cuda::DeviceInfo>
+{
+ virtual void SetUp()
+ {
+ cv::cuda::DeviceInfo devInfo = GetParam();
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(EnsureSizeIsEnough, BufferReuse)
+{
+ cv::cuda::GpuMat buffer(100, 100, CV_8U);
+ cv::cuda::GpuMat old = buffer;
+
+ // don't reallocate memory
+ cv::cuda::ensureSizeIsEnough(10, 20, CV_8U, buffer);
+ EXPECT_EQ(10, buffer.rows);
+ EXPECT_EQ(20, buffer.cols);
+ EXPECT_EQ(CV_8UC1, buffer.type());
+ EXPECT_EQ(reinterpret_cast<intptr_t>(old.data), reinterpret_cast<intptr_t>(buffer.data));
+
+ // don't reallocate memory
+ cv::cuda::ensureSizeIsEnough(20, 30, CV_8U, buffer);
+ EXPECT_EQ(20, buffer.rows);
+ EXPECT_EQ(30, buffer.cols);
+ EXPECT_EQ(CV_8UC1, buffer.type());
+ EXPECT_EQ(reinterpret_cast<intptr_t>(old.data), reinterpret_cast<intptr_t>(buffer.data));
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_GpuMat, EnsureSizeIsEnough, ALL_DEVICES);
+
+#endif // HAVE_CUDA
--- /dev/null
- CUDA_TEST_P(HOG, Detect)
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "test_precomp.hpp"
+
+#ifdef HAVE_CUDA
+
+using namespace cvtest;
+
+//#define DUMP
+
+struct HOG : testing::TestWithParam<cv::cuda::DeviceInfo>, cv::cuda::HOGDescriptor
+{
+ cv::cuda::DeviceInfo devInfo;
+
+#ifdef DUMP
+ std::ofstream f;
+#else
+ std::ifstream f;
+#endif
+
+ int wins_per_img_x;
+ int wins_per_img_y;
+ int blocks_per_win_x;
+ int blocks_per_win_y;
+ int block_hist_size;
+
+ virtual void SetUp()
+ {
+ devInfo = GetParam();
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+
+#ifdef DUMP
+ void dump(const cv::Mat& blockHists, const std::vector<cv::Point>& locations)
+ {
+ f.write((char*)&blockHists.rows, sizeof(blockHists.rows));
+ f.write((char*)&blockHists.cols, sizeof(blockHists.cols));
+
+ for (int i = 0; i < blockHists.rows; ++i)
+ {
+ for (int j = 0; j < blockHists.cols; ++j)
+ {
+ float val = blockHists.at<float>(i, j);
+ f.write((char*)&val, sizeof(val));
+ }
+ }
+
+ int nlocations = locations.size();
+ f.write((char*)&nlocations, sizeof(nlocations));
+
+ for (int i = 0; i < locations.size(); ++i)
+ f.write((char*)&locations[i], sizeof(locations[i]));
+ }
+#else
+ void compare(const cv::Mat& blockHists, const std::vector<cv::Point>& locations)
+ {
+ int rows, cols;
+ f.read((char*)&rows, sizeof(rows));
+ f.read((char*)&cols, sizeof(cols));
+ ASSERT_EQ(rows, blockHists.rows);
+ ASSERT_EQ(cols, blockHists.cols);
+
+ for (int i = 0; i < blockHists.rows; ++i)
+ {
+ for (int j = 0; j < blockHists.cols; ++j)
+ {
+ float val;
+ f.read((char*)&val, sizeof(val));
+ ASSERT_NEAR(val, blockHists.at<float>(i, j), 1e-3);
+ }
+ }
+
+ int nlocations;
+ f.read((char*)&nlocations, sizeof(nlocations));
+ ASSERT_EQ(nlocations, static_cast<int>(locations.size()));
+
+ for (int i = 0; i < nlocations; ++i)
+ {
+ cv::Point location;
+ f.read((char*)&location, sizeof(location));
+ ASSERT_EQ(location, locations[i]);
+ }
+ }
+#endif
+
+ void testDetect(const cv::Mat& img)
+ {
+ gamma_correction = false;
+ setSVMDetector(cv::cuda::HOGDescriptor::getDefaultPeopleDetector());
+
+ std::vector<cv::Point> locations;
+
+ // Test detect
+ detect(loadMat(img), locations, 0);
+
+#ifdef DUMP
+ dump(cv::Mat(block_hists), locations);
+#else
+ compare(cv::Mat(block_hists), locations);
+#endif
+
+ // Test detect on smaller image
+ cv::Mat img2;
+ cv::resize(img, img2, cv::Size(img.cols / 2, img.rows / 2));
+ detect(loadMat(img2), locations, 0);
+
+#ifdef DUMP
+ dump(cv::Mat(block_hists), locations);
+#else
+ compare(cv::Mat(block_hists), locations);
+#endif
+
+ // Test detect on greater image
+ cv::resize(img, img2, cv::Size(img.cols * 2, img.rows * 2));
+ detect(loadMat(img2), locations, 0);
+
+#ifdef DUMP
+ dump(cv::Mat(block_hists), locations);
+#else
+ compare(cv::Mat(block_hists), locations);
+#endif
+ }
+
+ // Does not compare border value, as interpolation leads to delta
+ void compare_inner_parts(cv::Mat d1, cv::Mat d2)
+ {
+ for (int i = 1; i < blocks_per_win_y - 1; ++i)
+ for (int j = 1; j < blocks_per_win_x - 1; ++j)
+ for (int k = 0; k < block_hist_size; ++k)
+ {
+ float a = d1.at<float>(0, (i * blocks_per_win_x + j) * block_hist_size);
+ float b = d2.at<float>(0, (i * blocks_per_win_x + j) * block_hist_size);
+ ASSERT_FLOAT_EQ(a, b);
+ }
+ }
+};
+
+// desabled while resize does not fixed
++CUDA_TEST_P(HOG, DISABLED_Detect)
+{
+ cv::Mat img_rgb = readImage("hog/road.png");
+ ASSERT_FALSE(img_rgb.empty());
+
+#ifdef DUMP
+ f.open((std::string(cvtest::TS::ptr()->get_data_path()) + "hog/expected_output.bin").c_str(), std::ios_base::binary);
+ ASSERT_TRUE(f.is_open());
+#else
+ f.open((std::string(cvtest::TS::ptr()->get_data_path()) + "hog/expected_output.bin").c_str(), std::ios_base::binary);
+ ASSERT_TRUE(f.is_open());
+#endif
+
+ // Test on color image
+ cv::Mat img;
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
+ testDetect(img);
+
+ // Test on gray image
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2GRAY);
+ testDetect(img);
+
+ f.close();
+}
+
+CUDA_TEST_P(HOG, GetDescriptors)
+{
+ // Load image (e.g. train data, composed from windows)
+ cv::Mat img_rgb = readImage("hog/train_data.png");
+ ASSERT_FALSE(img_rgb.empty());
+
+ // Convert to C4
+ cv::Mat img;
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat d_img(img);
+
+ // Convert train images into feature vectors (train table)
+ cv::cuda::GpuMat descriptors, descriptors_by_cols;
+ getDescriptors(d_img, win_size, descriptors, DESCR_FORMAT_ROW_BY_ROW);
+ getDescriptors(d_img, win_size, descriptors_by_cols, DESCR_FORMAT_COL_BY_COL);
+
+ // Check size of the result train table
+ wins_per_img_x = 3;
+ wins_per_img_y = 2;
+ blocks_per_win_x = 7;
+ blocks_per_win_y = 15;
+ block_hist_size = 36;
+ cv::Size descr_size_expected = cv::Size(blocks_per_win_x * blocks_per_win_y * block_hist_size,
+ wins_per_img_x * wins_per_img_y);
+ ASSERT_EQ(descr_size_expected, descriptors.size());
+
+ // Check both formats of output descriptors are handled correctly
+ cv::Mat dr(descriptors);
+ cv::Mat dc(descriptors_by_cols);
+ for (int i = 0; i < wins_per_img_x * wins_per_img_y; ++i)
+ {
+ const float* l = dr.rowRange(i, i + 1).ptr<float>();
+ const float* r = dc.rowRange(i, i + 1).ptr<float>();
+ for (int y = 0; y < blocks_per_win_y; ++y)
+ for (int x = 0; x < blocks_per_win_x; ++x)
+ for (int k = 0; k < block_hist_size; ++k)
+ ASSERT_EQ(l[(y * blocks_per_win_x + x) * block_hist_size + k],
+ r[(x * blocks_per_win_y + y) * block_hist_size + k]);
+ }
+
+ /* Now we want to extract the same feature vectors, but from single images. NOTE: results will
+ be defferent, due to border values interpolation. Using of many small images is slower, however we
+ wont't call getDescriptors and will use computeBlockHistograms instead of. computeBlockHistograms
+ works good, it can be checked in the gpu_hog sample */
+
+ img_rgb = readImage("hog/positive1.png");
+ ASSERT_TRUE(!img_rgb.empty());
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
+ computeBlockHistograms(cv::cuda::GpuMat(img));
+ // Everything is fine with interpolation for left top subimage
+ ASSERT_EQ(0.0, cv::norm((cv::Mat)block_hists, (cv::Mat)descriptors.rowRange(0, 1)));
+
+ img_rgb = readImage("hog/positive2.png");
+ ASSERT_TRUE(!img_rgb.empty());
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
+ computeBlockHistograms(cv::cuda::GpuMat(img));
+ compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(1, 2)));
+
+ img_rgb = readImage("hog/negative1.png");
+ ASSERT_TRUE(!img_rgb.empty());
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
+ computeBlockHistograms(cv::cuda::GpuMat(img));
+ compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(2, 3)));
+
+ img_rgb = readImage("hog/negative2.png");
+ ASSERT_TRUE(!img_rgb.empty());
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
+ computeBlockHistograms(cv::cuda::GpuMat(img));
+ compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(3, 4)));
+
+ img_rgb = readImage("hog/positive3.png");
+ ASSERT_TRUE(!img_rgb.empty());
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
+ computeBlockHistograms(cv::cuda::GpuMat(img));
+ compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(4, 5)));
+
+ img_rgb = readImage("hog/negative3.png");
+ ASSERT_TRUE(!img_rgb.empty());
+ cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
+ computeBlockHistograms(cv::cuda::GpuMat(img));
+ compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(5, 6)));
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_ObjDetect, HOG, ALL_DEVICES);
+
+//============== caltech hog tests =====================//
+
+struct CalTech : public ::testing::TestWithParam<std::tr1::tuple<cv::cuda::DeviceInfo, std::string> >
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Mat img;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ img = readImage(GET_PARAM(1), cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(img.empty());
+ }
+};
+
+CUDA_TEST_P(CalTech, HOG)
+{
+ cv::cuda::GpuMat d_img(img);
+ cv::Mat markedImage(img.clone());
+
+ cv::cuda::HOGDescriptor d_hog;
+ d_hog.setSVMDetector(cv::cuda::HOGDescriptor::getDefaultPeopleDetector());
+ d_hog.nlevels = d_hog.nlevels + 32;
+
+ std::vector<cv::Rect> found_locations;
+ d_hog.detectMultiScale(d_img, found_locations);
+
+#if defined (LOG_CASCADE_STATISTIC)
+ for (int i = 0; i < (int)found_locations.size(); i++)
+ {
+ cv::Rect r = found_locations[i];
+
+ std::cout << r.x << " " << r.y << " " << r.width << " " << r.height << std::endl;
+ cv::rectangle(markedImage, r , CV_RGB(255, 0, 0));
+ }
+
+ cv::imshow("Res", markedImage); cv::waitKey();
+#endif
+}
+
+INSTANTIATE_TEST_CASE_P(detect, CalTech, testing::Combine(ALL_DEVICES,
+ ::testing::Values<std::string>("caltech/image_00000009_0.png", "caltech/image_00000032_0.png",
+ "caltech/image_00000165_0.png", "caltech/image_00000261_0.png", "caltech/image_00000469_0.png",
+ "caltech/image_00000527_0.png", "caltech/image_00000574_0.png")));
+
+
+
+
+//////////////////////////////////////////////////////////////////////////////////////////
+/// LBP classifier
+
+PARAM_TEST_CASE(LBP_Read_classifier, cv::cuda::DeviceInfo, int)
+{
+ cv::cuda::DeviceInfo devInfo;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(LBP_Read_classifier, Accuracy)
+{
+ cv::cuda::CascadeClassifier_CUDA classifier;
+ std::string classifierXmlPath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/lbpcascade_frontalface.xml";
+ ASSERT_TRUE(classifier.load(classifierXmlPath));
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_ObjDetect, LBP_Read_classifier,
+ testing::Combine(ALL_DEVICES, testing::Values<int>(0)));
+
+
+PARAM_TEST_CASE(LBP_classify, cv::cuda::DeviceInfo, int)
+{
+ cv::cuda::DeviceInfo devInfo;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(LBP_classify, Accuracy)
+{
+ std::string classifierXmlPath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/lbpcascade_frontalface.xml";
+ std::string imagePath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/er.png";
+
+ cv::CascadeClassifier cpuClassifier(classifierXmlPath);
+ ASSERT_FALSE(cpuClassifier.empty());
+
+ cv::Mat image = cv::imread(imagePath);
+ image = image.colRange(0, image.cols/2);
+ cv::Mat grey;
+ cvtColor(image, grey, cv::COLOR_BGR2GRAY);
+ ASSERT_FALSE(image.empty());
+
+ std::vector<cv::Rect> rects;
+ cpuClassifier.detectMultiScale(grey, rects);
+ cv::Mat markedImage = image.clone();
+
+ std::vector<cv::Rect>::iterator it = rects.begin();
+ for (; it != rects.end(); ++it)
+ cv::rectangle(markedImage, *it, cv::Scalar(255, 0, 0));
+
+ cv::cuda::CascadeClassifier_CUDA gpuClassifier;
+ ASSERT_TRUE(gpuClassifier.load(classifierXmlPath));
+
+ cv::cuda::GpuMat gpu_rects;
+ cv::cuda::GpuMat tested(grey);
+ int count = gpuClassifier.detectMultiScale(tested, gpu_rects);
+
+#if defined (LOG_CASCADE_STATISTIC)
+ cv::Mat downloaded(gpu_rects);
+ const cv::Rect* faces = downloaded.ptr<cv::Rect>();
+ for (int i = 0; i < count; i++)
+ {
+ cv::Rect r = faces[i];
+
+ std::cout << r.x << " " << r.y << " " << r.width << " " << r.height << std::endl;
+ cv::rectangle(markedImage, r , CV_RGB(255, 0, 0));
+ }
+#endif
+
+#if defined (LOG_CASCADE_STATISTIC)
+ cv::imshow("Res", markedImage); cv::waitKey();
+#endif
+ (void)count;
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_ObjDetect, LBP_classify,
+ testing::Combine(ALL_DEVICES, testing::Values<int>(0)));
+
+#endif // HAVE_CUDA
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "perf_precomp.hpp"
+
+using namespace std;
+using namespace testing;
+using namespace perf;
+
+//////////////////////////////////////////////////////////////////////
+// GEMM
+
++#ifdef HAVE_CUBLAS
++
+CV_FLAGS(GemmFlags, 0, cv::GEMM_1_T, cv::GEMM_2_T, cv::GEMM_3_T)
+#define ALL_GEMM_FLAGS Values(GemmFlags(0), GemmFlags(cv::GEMM_1_T), GemmFlags(cv::GEMM_2_T), GemmFlags(cv::GEMM_3_T), \
+ GemmFlags(cv::GEMM_1_T | cv::GEMM_2_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_3_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_2_T | cv::GEMM_3_T))
+
+DEF_PARAM_TEST(Sz_Type_Flags, cv::Size, MatType, GemmFlags);
+
+PERF_TEST_P(Sz_Type_Flags, GEMM,
+ Combine(Values(cv::Size(512, 512), cv::Size(1024, 1024)),
+ Values(CV_32FC1, CV_32FC2, CV_64FC1),
+ ALL_GEMM_FLAGS))
+{
+ const cv::Size size = GET_PARAM(0);
+ const int type = GET_PARAM(1);
+ const int flags = GET_PARAM(2);
+
+ cv::Mat src1(size, type);
+ declare.in(src1, WARMUP_RNG);
+
+ cv::Mat src2(size, type);
+ declare.in(src2, WARMUP_RNG);
+
+ cv::Mat src3(size, type);
+ declare.in(src3, WARMUP_RNG);
+
+ if (PERF_RUN_CUDA())
+ {
+ declare.time(5.0);
+
+ const cv::cuda::GpuMat d_src1(src1);
+ const cv::cuda::GpuMat d_src2(src2);
+ const cv::cuda::GpuMat d_src3(src3);
+ cv::cuda::GpuMat dst;
+
+ TEST_CYCLE() cv::cuda::gemm(d_src1, d_src2, 1.0, d_src3, 1.0, dst, flags);
+
+ CUDA_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
+ }
+ else
+ {
+ declare.time(50.0);
+
+ cv::Mat dst;
+
+ TEST_CYCLE() cv::gemm(src1, src2, 1.0, src3, 1.0, dst, flags);
+
+ CPU_SANITY_CHECK(dst);
+ }
+}
+
++#endif
++
+//////////////////////////////////////////////////////////////////////
+// MulSpectrums
+
+CV_FLAGS(DftFlags, 0, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX_OUTPUT, cv::DFT_REAL_OUTPUT)
+
+DEF_PARAM_TEST(Sz_Flags, cv::Size, DftFlags);
+
+PERF_TEST_P(Sz_Flags, MulSpectrums,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
+ Values(0, DftFlags(cv::DFT_ROWS))))
+{
+ const cv::Size size = GET_PARAM(0);
+ const int flag = GET_PARAM(1);
+
+ cv::Mat a(size, CV_32FC2);
+ cv::Mat b(size, CV_32FC2);
+ declare.in(a, b, WARMUP_RNG);
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_a(a);
+ const cv::cuda::GpuMat d_b(b);
+ cv::cuda::GpuMat dst;
+
+ TEST_CYCLE() cv::cuda::mulSpectrums(d_a, d_b, dst, flag);
+
+ CUDA_SANITY_CHECK(dst);
+ }
+ else
+ {
+ cv::Mat dst;
+
+ TEST_CYCLE() cv::mulSpectrums(a, b, dst, flag);
+
+ CPU_SANITY_CHECK(dst);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////
+// MulAndScaleSpectrums
+
+PERF_TEST_P(Sz, MulAndScaleSpectrums,
+ CUDA_TYPICAL_MAT_SIZES)
+{
+ const cv::Size size = GetParam();
+
+ const float scale = 1.f / size.area();
+
+ cv::Mat src1(size, CV_32FC2);
+ cv::Mat src2(size, CV_32FC2);
+ declare.in(src1,src2, WARMUP_RNG);
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_src1(src1);
+ const cv::cuda::GpuMat d_src2(src2);
+ cv::cuda::GpuMat dst;
+
+ TEST_CYCLE() cv::cuda::mulAndScaleSpectrums(d_src1, d_src2, dst, cv::DFT_ROWS, scale, false);
+
+ CUDA_SANITY_CHECK(dst);
+ }
+ else
+ {
+ FAIL_NO_CPU();
+ }
+}
+
+//////////////////////////////////////////////////////////////////////
+// Dft
+
+PERF_TEST_P(Sz_Flags, Dft,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
+ Values(0, DftFlags(cv::DFT_ROWS), DftFlags(cv::DFT_INVERSE))))
+{
+ declare.time(10.0);
+
+ const cv::Size size = GET_PARAM(0);
+ const int flag = GET_PARAM(1);
+
+ cv::Mat src(size, CV_32FC2);
+ declare.in(src, WARMUP_RNG);
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_src(src);
+ cv::cuda::GpuMat dst;
+
+ TEST_CYCLE() cv::cuda::dft(d_src, dst, size, flag);
+
+ CUDA_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
+ }
+ else
+ {
+ cv::Mat dst;
+
+ TEST_CYCLE() cv::dft(src, dst, flag);
+
+ CPU_SANITY_CHECK(dst);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////
+// Convolve
+
+DEF_PARAM_TEST(Sz_KernelSz_Ccorr, cv::Size, int, bool);
+
+PERF_TEST_P(Sz_KernelSz_Ccorr, Convolve,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
+ Values(17, 27, 32, 64),
+ Bool()))
+{
+ declare.time(10.0);
+
+ const cv::Size size = GET_PARAM(0);
+ const int templ_size = GET_PARAM(1);
+ const bool ccorr = GET_PARAM(2);
+
+ const cv::Mat image(size, CV_32FC1);
+ const cv::Mat templ(templ_size, templ_size, CV_32FC1);
+ declare.in(image, templ, WARMUP_RNG);
+
+ if (PERF_RUN_CUDA())
+ {
+ cv::cuda::GpuMat d_image = cv::cuda::createContinuous(size, CV_32FC1);
+ d_image.upload(image);
+
+ cv::cuda::GpuMat d_templ = cv::cuda::createContinuous(templ_size, templ_size, CV_32FC1);
+ d_templ.upload(templ);
+
+ cv::Ptr<cv::cuda::Convolution> convolution = cv::cuda::createConvolution();
+
+ cv::cuda::GpuMat dst;
+
+ TEST_CYCLE() convolution->convolve(d_image, d_templ, dst, ccorr);
+
+ CUDA_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
+ }
+ else
+ {
+ if (ccorr)
+ FAIL_NO_CPU();
+
+ cv::Mat dst;
+
+ TEST_CYCLE() cv::filter2D(image, dst, image.depth(), templ);
+
+ CPU_SANITY_CHECK(dst);
+ }
+}
--- /dev/null
- EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 1.0 : 1e-3);
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "test_precomp.hpp"
+
+#ifdef HAVE_CUDA
+
+using namespace cvtest;
+
+////////////////////////////////////////////////////////////////////////////////
+// Add_Array
+
+PARAM_TEST_CASE(Add_Array, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, Channels, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ int channels;
+ bool useRoi;
+
+ int stype;
+ int dtype;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ channels = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ stype = CV_MAKE_TYPE(depth.first, channels);
+ dtype = CV_MAKE_TYPE(depth.second, channels);
+ }
+};
+
+CUDA_TEST_P(Add_Array, Accuracy)
+{
+ cv::Mat mat1 = randomMat(size, stype);
+ cv::Mat mat2 = randomMat(size, stype);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::add(loadMat(mat1), loadMat(mat2), dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::add(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, cv::cuda::GpuMat(), depth.second);
+
+ cv::Mat dst_gold(size, dtype, cv::Scalar::all(0));
+ cv::add(mat1, mat2, dst_gold, cv::noArray(), depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Array, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ ALL_CHANNELS,
+ WHOLE_SUBMAT));
+
+PARAM_TEST_CASE(Add_Array_Mask, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ bool useRoi;
+
+ int stype;
+ int dtype;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ stype = CV_MAKE_TYPE(depth.first, 1);
+ dtype = CV_MAKE_TYPE(depth.second, 1);
+ }
+};
+
+CUDA_TEST_P(Add_Array_Mask, Accuracy)
+{
+ cv::Mat mat1 = randomMat(size, stype);
+ cv::Mat mat2 = randomMat(size, stype);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::add(loadMat(mat1), loadMat(mat2), dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::add(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, loadMat(mask, useRoi), depth.second);
+
+ cv::Mat dst_gold(size, dtype, cv::Scalar::all(0));
+ cv::add(mat1, mat2, dst_gold, mask, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Array_Mask, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Add_Scalar
+
+PARAM_TEST_CASE(Add_Scalar, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Add_Scalar, WithOutMask)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::add(loadMat(mat), val, dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::add(loadMat(mat, useRoi), val, dst, cv::cuda::GpuMat(), depth.second);
+
+ cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
+ cv::add(mat, val, dst_gold, cv::noArray(), depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
+ }
+}
+
+CUDA_TEST_P(Add_Scalar, WithMask)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::add(loadMat(mat), val, dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::add(loadMat(mat, useRoi), val, dst, loadMat(mask, useRoi), depth.second);
+
+ cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
+ cv::add(mat, val, dst_gold, mask, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Scalar, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Add_Scalar_First
+
+PARAM_TEST_CASE(Add_Scalar_First, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Add_Scalar_First, WithOutMask)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::add(val, loadMat(mat), dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::add(val, loadMat(mat, useRoi), dst, cv::cuda::GpuMat(), depth.second);
+
+ cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
+ cv::add(val, mat, dst_gold, cv::noArray(), depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
+ }
+}
+
+CUDA_TEST_P(Add_Scalar_First, WithMask)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::add(val, loadMat(mat), dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::add(val, loadMat(mat, useRoi), dst, loadMat(mask, useRoi), depth.second);
+
+ cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
+ cv::add(val, mat, dst_gold, mask, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Scalar_First, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Subtract_Array
+
+PARAM_TEST_CASE(Subtract_Array, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, Channels, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ int channels;
+ bool useRoi;
+
+ int stype;
+ int dtype;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ channels = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ stype = CV_MAKE_TYPE(depth.first, channels);
+ dtype = CV_MAKE_TYPE(depth.second, channels);
+ }
+};
+
+CUDA_TEST_P(Subtract_Array, Accuracy)
+{
+ cv::Mat mat1 = randomMat(size, stype);
+ cv::Mat mat2 = randomMat(size, stype);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::subtract(loadMat(mat1), loadMat(mat2), dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::subtract(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, cv::cuda::GpuMat(), depth.second);
+
+ cv::Mat dst_gold(size, dtype, cv::Scalar::all(0));
+ cv::subtract(mat1, mat2, dst_gold, cv::noArray(), depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Array, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ ALL_CHANNELS,
+ WHOLE_SUBMAT));
+
+PARAM_TEST_CASE(Subtract_Array_Mask, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ bool useRoi;
+
+ int stype;
+ int dtype;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ stype = CV_MAKE_TYPE(depth.first, 1);
+ dtype = CV_MAKE_TYPE(depth.second, 1);
+ }
+};
+
+CUDA_TEST_P(Subtract_Array_Mask, Accuracy)
+{
+ cv::Mat mat1 = randomMat(size, stype);
+ cv::Mat mat2 = randomMat(size, stype);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::subtract(loadMat(mat1), loadMat(mat2), dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::subtract(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, loadMat(mask, useRoi), depth.second);
+
+ cv::Mat dst_gold(size, dtype, cv::Scalar::all(0));
+ cv::subtract(mat1, mat2, dst_gold, mask, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Array_Mask, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Subtract_Scalar
+
+PARAM_TEST_CASE(Subtract_Scalar, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Subtract_Scalar, WithOutMask)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::subtract(loadMat(mat), val, dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::subtract(loadMat(mat, useRoi), val, dst, cv::cuda::GpuMat(), depth.second);
+
+ cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
+ cv::subtract(mat, val, dst_gold, cv::noArray(), depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
+ }
+}
+
+CUDA_TEST_P(Subtract_Scalar, WithMask)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::subtract(loadMat(mat), val, dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::subtract(loadMat(mat, useRoi), val, dst, loadMat(mask, useRoi), depth.second);
+
+ cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
+ cv::subtract(mat, val, dst_gold, mask, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Scalar, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Subtract_Scalar_First
+
+PARAM_TEST_CASE(Subtract_Scalar_First, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Subtract_Scalar_First, WithOutMask)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::subtract(val, loadMat(mat), dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::subtract(val, loadMat(mat, useRoi), dst, cv::cuda::GpuMat(), depth.second);
+
+ cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
+ cv::subtract(val, mat, dst_gold, cv::noArray(), depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
+ }
+}
+
+CUDA_TEST_P(Subtract_Scalar_First, WithMask)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::subtract(val, loadMat(mat), dst, cv::cuda::GpuMat(), depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::subtract(val, loadMat(mat, useRoi), dst, loadMat(mask, useRoi), depth.second);
+
+ cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
+ cv::subtract(val, mat, dst_gold, mask, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Scalar_First, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Multiply_Array
+
+PARAM_TEST_CASE(Multiply_Array, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, Channels, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ int channels;
+ bool useRoi;
+
+ int stype;
+ int dtype;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ channels = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ stype = CV_MAKE_TYPE(depth.first, channels);
+ dtype = CV_MAKE_TYPE(depth.second, channels);
+ }
+};
+
+CUDA_TEST_P(Multiply_Array, WithOutScale)
+{
+ cv::Mat mat1 = randomMat(size, stype);
+ cv::Mat mat2 = randomMat(size, stype);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::multiply(loadMat(mat1), loadMat(mat2), dst, 1, depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
+ cv::cuda::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, 1, depth.second);
+
+ cv::Mat dst_gold;
+ cv::multiply(mat1, mat2, dst_gold, 1, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-2 : 0.0);
+ }
+}
+
+CUDA_TEST_P(Multiply_Array, WithScale)
+{
+ cv::Mat mat1 = randomMat(size, stype);
+ cv::Mat mat2 = randomMat(size, stype);
+ double scale = randomDouble(0.0, 255.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::multiply(loadMat(mat1), loadMat(mat2), dst, scale, depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
+ cv::cuda::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, scale, depth.second);
+
+ cv::Mat dst_gold;
+ cv::multiply(mat1, mat2, dst_gold, scale, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 2.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Array, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ ALL_CHANNELS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Multiply_Array_Special
+
+PARAM_TEST_CASE(Multiply_Array_Special, cv::cuda::DeviceInfo, cv::Size, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ useRoi = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Multiply_Array_Special, Case_8UC4x_32FC1)
+{
+ cv::Mat mat1 = randomMat(size, CV_8UC4);
+ cv::Mat mat2 = randomMat(size, CV_32FC1);
+
+ cv::cuda::GpuMat dst = createMat(size, CV_8UC4, useRoi);
+ cv::cuda::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
+
+ cv::Mat h_dst(dst);
+
+ for (int y = 0; y < h_dst.rows; ++y)
+ {
+ const cv::Vec4b* mat1_row = mat1.ptr<cv::Vec4b>(y);
+ const float* mat2_row = mat2.ptr<float>(y);
+ const cv::Vec4b* dst_row = h_dst.ptr<cv::Vec4b>(y);
+
+ for (int x = 0; x < h_dst.cols; ++x)
+ {
+ cv::Vec4b val1 = mat1_row[x];
+ float val2 = mat2_row[x];
+ cv::Vec4b actual = dst_row[x];
+
+ cv::Vec4b gold;
+
+ gold[0] = cv::saturate_cast<uchar>(val1[0] * val2);
+ gold[1] = cv::saturate_cast<uchar>(val1[1] * val2);
+ gold[2] = cv::saturate_cast<uchar>(val1[2] * val2);
+ gold[3] = cv::saturate_cast<uchar>(val1[3] * val2);
+
+ ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ }
+ }
+}
+
+CUDA_TEST_P(Multiply_Array_Special, Case_16SC4x_32FC1)
+{
+ cv::Mat mat1 = randomMat(size, CV_16SC4);
+ cv::Mat mat2 = randomMat(size, CV_32FC1);
+
+ cv::cuda::GpuMat dst = createMat(size, CV_16SC4, useRoi);
+ cv::cuda::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
+
+ cv::Mat h_dst(dst);
+
+ for (int y = 0; y < h_dst.rows; ++y)
+ {
+ const cv::Vec4s* mat1_row = mat1.ptr<cv::Vec4s>(y);
+ const float* mat2_row = mat2.ptr<float>(y);
+ const cv::Vec4s* dst_row = h_dst.ptr<cv::Vec4s>(y);
+
+ for (int x = 0; x < h_dst.cols; ++x)
+ {
+ cv::Vec4s val1 = mat1_row[x];
+ float val2 = mat2_row[x];
+ cv::Vec4s actual = dst_row[x];
+
+ cv::Vec4s gold;
+
+ gold[0] = cv::saturate_cast<short>(val1[0] * val2);
+ gold[1] = cv::saturate_cast<short>(val1[1] * val2);
+ gold[2] = cv::saturate_cast<short>(val1[2] * val2);
+ gold[3] = cv::saturate_cast<short>(val1[3] * val2);
+
+ ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Array_Special, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Multiply_Scalar
+
+PARAM_TEST_CASE(Multiply_Scalar, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Multiply_Scalar, WithOutScale)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::multiply(loadMat(mat), val, dst, 1, depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ cv::cuda::multiply(loadMat(mat, useRoi), val, dst, 1, depth.second);
+
+ cv::Mat dst_gold;
+ cv::multiply(mat, val, dst_gold, 1, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
+ }
+}
+
+
+CUDA_TEST_P(Multiply_Scalar, WithScale)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+ double scale = randomDouble(0.0, 255.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::multiply(loadMat(mat), val, dst, scale, depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ cv::cuda::multiply(loadMat(mat, useRoi), val, dst, scale, depth.second);
+
+ cv::Mat dst_gold;
+ cv::multiply(mat, val, dst_gold, scale, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Scalar, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Multiply_Scalar_First
+
+PARAM_TEST_CASE(Multiply_Scalar_First, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Multiply_Scalar_First, WithOutScale)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::multiply(val, loadMat(mat), dst, 1, depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ cv::cuda::multiply(val, loadMat(mat, useRoi), dst, 1, depth.second);
+
+ cv::Mat dst_gold;
+ cv::multiply(val, mat, dst_gold, 1, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
+ }
+}
+
+
+CUDA_TEST_P(Multiply_Scalar_First, WithScale)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(0, 255);
+ double scale = randomDouble(0.0, 255.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::multiply(val, loadMat(mat), dst, scale, depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ cv::cuda::multiply(val, loadMat(mat, useRoi), dst, scale, depth.second);
+
+ cv::Mat dst_gold;
+ cv::multiply(val, mat, dst_gold, scale, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Scalar_First, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Divide_Array
+
+PARAM_TEST_CASE(Divide_Array, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, Channels, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ int channels;
+ bool useRoi;
+
+ int stype;
+ int dtype;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ channels = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ stype = CV_MAKE_TYPE(depth.first, channels);
+ dtype = CV_MAKE_TYPE(depth.second, channels);
+ }
+};
+
+CUDA_TEST_P(Divide_Array, WithOutScale)
+{
+ cv::Mat mat1 = randomMat(size, stype);
+ cv::Mat mat2 = randomMat(size, stype, 1.0, 255.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::divide(loadMat(mat1), loadMat(mat2), dst, 1, depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
+ cv::cuda::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, 1, depth.second);
+
+ cv::Mat dst_gold;
+ cv::divide(mat1, mat2, dst_gold, 1, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
+ }
+}
+
+CUDA_TEST_P(Divide_Array, WithScale)
+{
+ cv::Mat mat1 = randomMat(size, stype);
+ cv::Mat mat2 = randomMat(size, stype, 1.0, 255.0);
+ double scale = randomDouble(0.0, 255.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::divide(loadMat(mat1), loadMat(mat2), dst, scale, depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
+ cv::cuda::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, scale, depth.second);
+
+ cv::Mat dst_gold;
+ cv::divide(mat1, mat2, dst_gold, scale, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-2 : 1.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Array, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ ALL_CHANNELS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Divide_Array_Special
+
+PARAM_TEST_CASE(Divide_Array_Special, cv::cuda::DeviceInfo, cv::Size, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ useRoi = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Divide_Array_Special, Case_8UC4x_32FC1)
+{
+ cv::Mat mat1 = randomMat(size, CV_8UC4);
+ cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0);
+
+ cv::cuda::GpuMat dst = createMat(size, CV_8UC4, useRoi);
+ cv::cuda::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
+
+ cv::Mat h_dst(dst);
+
+ for (int y = 0; y < h_dst.rows; ++y)
+ {
+ const cv::Vec4b* mat1_row = mat1.ptr<cv::Vec4b>(y);
+ const float* mat2_row = mat2.ptr<float>(y);
+ const cv::Vec4b* dst_row = h_dst.ptr<cv::Vec4b>(y);
+
+ for (int x = 0; x < h_dst.cols; ++x)
+ {
+ cv::Vec4b val1 = mat1_row[x];
+ float val2 = mat2_row[x];
+ cv::Vec4b actual = dst_row[x];
+
+ cv::Vec4b gold;
+
+ gold[0] = cv::saturate_cast<uchar>(val1[0] / val2);
+ gold[1] = cv::saturate_cast<uchar>(val1[1] / val2);
+ gold[2] = cv::saturate_cast<uchar>(val1[2] / val2);
+ gold[3] = cv::saturate_cast<uchar>(val1[3] / val2);
+
+ ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ }
+ }
+}
+
+CUDA_TEST_P(Divide_Array_Special, Case_16SC4x_32FC1)
+{
+ cv::Mat mat1 = randomMat(size, CV_16SC4);
+ cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0);
+
+ cv::cuda::GpuMat dst = createMat(size, CV_16SC4, useRoi);
+ cv::cuda::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
+
+ cv::Mat h_dst(dst);
+
+ for (int y = 0; y < h_dst.rows; ++y)
+ {
+ const cv::Vec4s* mat1_row = mat1.ptr<cv::Vec4s>(y);
+ const float* mat2_row = mat2.ptr<float>(y);
+ const cv::Vec4s* dst_row = h_dst.ptr<cv::Vec4s>(y);
+
+ for (int x = 0; x < h_dst.cols; ++x)
+ {
+ cv::Vec4s val1 = mat1_row[x];
+ float val2 = mat2_row[x];
+ cv::Vec4s actual = dst_row[x];
+
+ cv::Vec4s gold;
+
+ gold[0] = cv::saturate_cast<short>(val1[0] / val2);
+ gold[1] = cv::saturate_cast<short>(val1[1] / val2);
+ gold[2] = cv::saturate_cast<short>(val1[2] / val2);
+ gold[3] = cv::saturate_cast<short>(val1[3] / val2);
+
+ ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Array_Special, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Divide_Scalar
+
+PARAM_TEST_CASE(Divide_Scalar, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Divide_Scalar, WithOutScale)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(1.0, 255.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::divide(loadMat(mat), val, dst, 1, depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ cv::cuda::divide(loadMat(mat, useRoi), val, dst, 1, depth.second);
+
+ cv::Mat dst_gold;
+ cv::divide(mat, val, dst_gold, 1, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
+ }
+}
+
+CUDA_TEST_P(Divide_Scalar, WithScale)
+{
+ cv::Mat mat = randomMat(size, depth.first);
+ cv::Scalar val = randomScalar(1.0, 255.0);
+ double scale = randomDouble(0.0, 255.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::divide(loadMat(mat), val, dst, scale, depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ cv::cuda::divide(loadMat(mat, useRoi), val, dst, scale, depth.second);
+
+ cv::Mat dst_gold;
+ cv::divide(mat, val, dst_gold, scale, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-2 : 1.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Scalar, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Divide_Scalar_First
+
+PARAM_TEST_CASE(Divide_Scalar_First, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ std::pair<MatDepth, MatDepth> depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Divide_Scalar_First, Accuracy)
+{
+ double scale = randomDouble(0.0, 255.0);
+ cv::Mat mat = randomMat(size, depth.first, 1.0, 255.0);
+
+ if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::divide(scale, loadMat(mat), dst, depth.second);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
+ cv::cuda::divide(scale, loadMat(mat, useRoi), dst, depth.second);
+
+ cv::Mat dst_gold;
+ cv::divide(scale, mat, dst_gold, depth.second);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Scalar_First, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ DEPTH_PAIRS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// AbsDiff
+
+PARAM_TEST_CASE(AbsDiff, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(AbsDiff, Array)
+{
+ cv::Mat src1 = randomMat(size, depth);
+ cv::Mat src2 = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::absdiff(loadMat(src1), loadMat(src2), dst);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::absdiff(loadMat(src1, useRoi), loadMat(src2, useRoi), dst);
+
+ cv::Mat dst_gold;
+ cv::absdiff(src1, src2, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+ }
+}
+
+CUDA_TEST_P(AbsDiff, Scalar)
+{
+ cv::Mat src = randomMat(size, depth);
+ cv::Scalar val = randomScalar(0.0, 255.0);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::absdiff(loadMat(src), val, dst);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::absdiff(loadMat(src, useRoi), val, dst);
+
+ cv::Mat dst_gold;
+ cv::absdiff(src, val, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth <= CV_32F ? 1.0 : 1e-5);
+ }
+}
+
+CUDA_TEST_P(AbsDiff, Scalar_First)
+{
+ cv::Mat src = randomMat(size, depth);
+ cv::Scalar val = randomScalar(0.0, 255.0);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::absdiff(val, loadMat(src), dst);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::absdiff(val, loadMat(src, useRoi), dst);
+
+ cv::Mat dst_gold;
+ cv::absdiff(val, src, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth <= CV_32F ? 1.0 : 1e-5);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, AbsDiff, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Abs
+
+PARAM_TEST_CASE(Abs, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Abs, Accuracy)
+{
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::abs(loadMat(src, useRoi), dst);
+
+ cv::Mat dst_gold = cv::abs(src);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Abs, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_16S), MatDepth(CV_32F)),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Sqr
+
+PARAM_TEST_CASE(Sqr, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Sqr, Accuracy)
+{
+ cv::Mat src = randomMat(size, depth, 0, depth == CV_8U ? 16 : 255);
+
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::sqr(loadMat(src, useRoi), dst);
+
+ cv::Mat dst_gold;
+ cv::multiply(src, src, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Sqr, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U),
+ MatDepth(CV_16U),
+ MatDepth(CV_16S),
+ MatDepth(CV_32F)),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Sqrt
+
+namespace
+{
+ template <typename T> void sqrtImpl(const cv::Mat& src, cv::Mat& dst)
+ {
+ dst.create(src.size(), src.type());
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ dst.at<T>(y, x) = static_cast<T>(std::sqrt(static_cast<float>(src.at<T>(y, x))));
+ }
+ }
+
+ void sqrtGold(const cv::Mat& src, cv::Mat& dst)
+ {
+ typedef void (*func_t)(const cv::Mat& src, cv::Mat& dst);
+
+ const func_t funcs[] =
+ {
+ sqrtImpl<uchar>, sqrtImpl<schar>, sqrtImpl<ushort>, sqrtImpl<short>,
+ sqrtImpl<int>, sqrtImpl<float>
+ };
+
+ funcs[src.depth()](src, dst);
+ }
+}
+
+PARAM_TEST_CASE(Sqrt, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Sqrt, Accuracy)
+{
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::sqrt(loadMat(src, useRoi), dst);
+
+ cv::Mat dst_gold;
+ sqrtGold(src, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-5);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Sqrt, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U),
+ MatDepth(CV_16U),
+ MatDepth(CV_16S),
+ MatDepth(CV_32F)),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Log
+
+namespace
+{
+ template <typename T> void logImpl(const cv::Mat& src, cv::Mat& dst)
+ {
+ dst.create(src.size(), src.type());
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ dst.at<T>(y, x) = static_cast<T>(std::log(static_cast<float>(src.at<T>(y, x))));
+ }
+ }
+
+ void logGold(const cv::Mat& src, cv::Mat& dst)
+ {
+ typedef void (*func_t)(const cv::Mat& src, cv::Mat& dst);
+
+ const func_t funcs[] =
+ {
+ logImpl<uchar>, logImpl<schar>, logImpl<ushort>, logImpl<short>,
+ logImpl<int>, logImpl<float>
+ };
+
+ funcs[src.depth()](src, dst);
+ }
+}
+
+PARAM_TEST_CASE(Log, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Log, Accuracy)
+{
+ cv::Mat src = randomMat(size, depth, 1.0, 255.0);
+
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::log(loadMat(src, useRoi), dst);
+
+ cv::Mat dst_gold;
+ logGold(src, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-6);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Log, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U),
+ MatDepth(CV_16U),
+ MatDepth(CV_16S),
+ MatDepth(CV_32F)),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Exp
+
+namespace
+{
+ template <typename T> void expImpl(const cv::Mat& src, cv::Mat& dst)
+ {
+ dst.create(src.size(), src.type());
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ dst.at<T>(y, x) = cv::saturate_cast<T>(static_cast<int>(std::exp(static_cast<float>(src.at<T>(y, x)))));
+ }
+ }
+ void expImpl_float(const cv::Mat& src, cv::Mat& dst)
+ {
+ dst.create(src.size(), src.type());
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ dst.at<float>(y, x) = std::exp(static_cast<float>(src.at<float>(y, x)));
+ }
+ }
+
+ void expGold(const cv::Mat& src, cv::Mat& dst)
+ {
+ typedef void (*func_t)(const cv::Mat& src, cv::Mat& dst);
+
+ const func_t funcs[] =
+ {
+ expImpl<uchar>, expImpl<schar>, expImpl<ushort>, expImpl<short>,
+ expImpl<int>, expImpl_float
+ };
+
+ funcs[src.depth()](src, dst);
+ }
+}
+
+PARAM_TEST_CASE(Exp, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Exp, Accuracy)
+{
+ cv::Mat src = randomMat(size, depth, 0.0, 10.0);
+
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::exp(loadMat(src, useRoi), dst);
+
+ cv::Mat dst_gold;
+ expGold(src, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-2);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Exp, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U),
+ MatDepth(CV_16U),
+ MatDepth(CV_16S),
+ MatDepth(CV_32F)),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Pow
+
+PARAM_TEST_CASE(Pow, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Pow, Accuracy)
+{
+ cv::Mat src = randomMat(size, depth, 0.0, 10.0);
+ double power = randomDouble(2.0, 4.0);
+
+ if (src.depth() < CV_32F)
+ power = static_cast<int>(power);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::pow(loadMat(src), power, dst);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::pow(loadMat(src, useRoi), power, dst);
+
+ cv::Mat dst_gold;
+ cv::pow(src, power, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 0.0 : 1e-1);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Pow, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Compare_Array
+
+CV_ENUM(CmpCode, cv::CMP_EQ, cv::CMP_GT, cv::CMP_GE, cv::CMP_LT, cv::CMP_LE, cv::CMP_NE)
+#define ALL_CMP_CODES testing::Values(CmpCode(cv::CMP_EQ), CmpCode(cv::CMP_NE), CmpCode(cv::CMP_GT), CmpCode(cv::CMP_GE), CmpCode(cv::CMP_LT), CmpCode(cv::CMP_LE))
+
+PARAM_TEST_CASE(Compare_Array, cv::cuda::DeviceInfo, cv::Size, MatDepth, CmpCode, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ int cmp_code;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ cmp_code = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Compare_Array, Accuracy)
+{
+ cv::Mat src1 = randomMat(size, depth);
+ cv::Mat src2 = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::compare(loadMat(src1), loadMat(src2), dst, cmp_code);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, CV_8UC1, useRoi);
+ cv::cuda::compare(loadMat(src1, useRoi), loadMat(src2, useRoi), dst, cmp_code);
+
+ cv::Mat dst_gold;
+ cv::compare(src1, src2, dst_gold, cmp_code);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Compare_Array, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ ALL_CMP_CODES,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Compare_Scalar
+
+namespace
+{
+ template <template <typename> class Op, typename T>
+ void compareScalarImpl(const cv::Mat& src, cv::Scalar sc, cv::Mat& dst)
+ {
+ Op<T> op;
+
+ const int cn = src.channels();
+
+ dst.create(src.size(), CV_MAKE_TYPE(CV_8U, cn));
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ {
+ for (int c = 0; c < cn; ++c)
+ {
+ T src_val = src.at<T>(y, x * cn + c);
+ T sc_val = cv::saturate_cast<T>(sc.val[c]);
+ dst.at<uchar>(y, x * cn + c) = static_cast<uchar>(static_cast<int>(op(src_val, sc_val)) * 255);
+ }
+ }
+ }
+ }
+
+ void compareScalarGold(const cv::Mat& src, cv::Scalar sc, cv::Mat& dst, int cmpop)
+ {
+ typedef void (*func_t)(const cv::Mat& src, cv::Scalar sc, cv::Mat& dst);
+ static const func_t funcs[7][6] =
+ {
+ {compareScalarImpl<std::equal_to, unsigned char> , compareScalarImpl<std::greater, unsigned char> , compareScalarImpl<std::greater_equal, unsigned char> , compareScalarImpl<std::less, unsigned char> , compareScalarImpl<std::less_equal, unsigned char> , compareScalarImpl<std::not_equal_to, unsigned char> },
+ {compareScalarImpl<std::equal_to, signed char> , compareScalarImpl<std::greater, signed char> , compareScalarImpl<std::greater_equal, signed char> , compareScalarImpl<std::less, signed char> , compareScalarImpl<std::less_equal, signed char> , compareScalarImpl<std::not_equal_to, signed char> },
+ {compareScalarImpl<std::equal_to, unsigned short>, compareScalarImpl<std::greater, unsigned short>, compareScalarImpl<std::greater_equal, unsigned short>, compareScalarImpl<std::less, unsigned short>, compareScalarImpl<std::less_equal, unsigned short>, compareScalarImpl<std::not_equal_to, unsigned short>},
+ {compareScalarImpl<std::equal_to, short> , compareScalarImpl<std::greater, short> , compareScalarImpl<std::greater_equal, short> , compareScalarImpl<std::less, short> , compareScalarImpl<std::less_equal, short> , compareScalarImpl<std::not_equal_to, short> },
+ {compareScalarImpl<std::equal_to, int> , compareScalarImpl<std::greater, int> , compareScalarImpl<std::greater_equal, int> , compareScalarImpl<std::less, int> , compareScalarImpl<std::less_equal, int> , compareScalarImpl<std::not_equal_to, int> },
+ {compareScalarImpl<std::equal_to, float> , compareScalarImpl<std::greater, float> , compareScalarImpl<std::greater_equal, float> , compareScalarImpl<std::less, float> , compareScalarImpl<std::less_equal, float> , compareScalarImpl<std::not_equal_to, float> },
+ {compareScalarImpl<std::equal_to, double> , compareScalarImpl<std::greater, double> , compareScalarImpl<std::greater_equal, double> , compareScalarImpl<std::less, double> , compareScalarImpl<std::less_equal, double> , compareScalarImpl<std::not_equal_to, double> }
+ };
+
+ funcs[src.depth()][cmpop](src, sc, dst);
+ }
+}
+
+PARAM_TEST_CASE(Compare_Scalar, cv::cuda::DeviceInfo, cv::Size, MatType, CmpCode, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int type;
+ int cmp_code;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ type = GET_PARAM(2);
+ cmp_code = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Compare_Scalar, Accuracy)
+{
+ cv::Mat src = randomMat(size, type);
+ cv::Scalar sc = randomScalar(0.0, 255.0);
+
+ if (src.depth() < CV_32F)
+ {
+ sc.val[0] = cvRound(sc.val[0]);
+ sc.val[1] = cvRound(sc.val[1]);
+ sc.val[2] = cvRound(sc.val[2]);
+ sc.val[3] = cvRound(sc.val[3]);
+ }
+
+ if (src.depth() == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::compare(loadMat(src), sc, dst, cmp_code);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, CV_MAKE_TYPE(CV_8U, src.channels()), useRoi);
+
+ cv::cuda::compare(loadMat(src, useRoi), sc, dst, cmp_code);
+
+ cv::Mat dst_gold;
+ compareScalarGold(src, sc, dst_gold, cmp_code);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Compare_Scalar, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ TYPES(CV_8U, CV_64F, 1, 4),
+ ALL_CMP_CODES,
+ WHOLE_SUBMAT));
+
+//////////////////////////////////////////////////////////////////////////////
+// Bitwise_Array
+
+PARAM_TEST_CASE(Bitwise_Array, cv::cuda::DeviceInfo, cv::Size, MatType)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int type;
+
+ cv::Mat src1;
+ cv::Mat src2;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ type = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ src1 = randomMat(size, type, 0.0, std::numeric_limits<int>::max());
+ src2 = randomMat(size, type, 0.0, std::numeric_limits<int>::max());
+ }
+};
+
+CUDA_TEST_P(Bitwise_Array, Not)
+{
+ cv::cuda::GpuMat dst;
+ cv::cuda::bitwise_not(loadMat(src1), dst);
+
+ cv::Mat dst_gold = ~src1;
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(Bitwise_Array, Or)
+{
+ cv::cuda::GpuMat dst;
+ cv::cuda::bitwise_or(loadMat(src1), loadMat(src2), dst);
+
+ cv::Mat dst_gold = src1 | src2;
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(Bitwise_Array, And)
+{
+ cv::cuda::GpuMat dst;
+ cv::cuda::bitwise_and(loadMat(src1), loadMat(src2), dst);
+
+ cv::Mat dst_gold = src1 & src2;
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(Bitwise_Array, Xor)
+{
+ cv::cuda::GpuMat dst;
+ cv::cuda::bitwise_xor(loadMat(src1), loadMat(src2), dst);
+
+ cv::Mat dst_gold = src1 ^ src2;
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Bitwise_Array, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ TYPES(CV_8U, CV_32S, 1, 4)));
+
+//////////////////////////////////////////////////////////////////////////////
+// Bitwise_Scalar
+
+PARAM_TEST_CASE(Bitwise_Scalar, cv::cuda::DeviceInfo, cv::Size, MatDepth, Channels)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ int channels;
+
+ cv::Mat src;
+ cv::Scalar val;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ channels = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ src = randomMat(size, CV_MAKE_TYPE(depth, channels));
+ cv::Scalar_<int> ival = randomScalar(0.0, std::numeric_limits<int>::max());
+ val = ival;
+ }
+};
+
+CUDA_TEST_P(Bitwise_Scalar, Or)
+{
+ cv::cuda::GpuMat dst;
+ cv::cuda::bitwise_or(loadMat(src), val, dst);
+
+ cv::Mat dst_gold;
+ cv::bitwise_or(src, val, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(Bitwise_Scalar, And)
+{
+ cv::cuda::GpuMat dst;
+ cv::cuda::bitwise_and(loadMat(src), val, dst);
+
+ cv::Mat dst_gold;
+ cv::bitwise_and(src, val, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(Bitwise_Scalar, Xor)
+{
+ cv::cuda::GpuMat dst;
+ cv::cuda::bitwise_xor(loadMat(src), val, dst);
+
+ cv::Mat dst_gold;
+ cv::bitwise_xor(src, val, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Bitwise_Scalar, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32S)),
+ IMAGE_CHANNELS));
+
+//////////////////////////////////////////////////////////////////////////////
+// RShift
+
+namespace
+{
+ template <typename T> void rhiftImpl(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
+ {
+ const int cn = src.channels();
+
+ dst.create(src.size(), src.type());
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ {
+ for (int c = 0; c < cn; ++c)
+ dst.at<T>(y, x * cn + c) = src.at<T>(y, x * cn + c) >> val.val[c];
+ }
+ }
+ }
+
+ void rhiftGold(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
+ {
+ typedef void (*func_t)(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst);
+
+ const func_t funcs[] =
+ {
+ rhiftImpl<uchar>, rhiftImpl<schar>, rhiftImpl<ushort>, rhiftImpl<short>, rhiftImpl<int>
+ };
+
+ funcs[src.depth()](src, val, dst);
+ }
+}
+
+PARAM_TEST_CASE(RShift, cv::cuda::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ int channels;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ channels = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(RShift, Accuracy)
+{
+ int type = CV_MAKE_TYPE(depth, channels);
+ cv::Mat src = randomMat(size, type);
+ cv::Scalar_<int> val = randomScalar(0.0, 8.0);
+
+ cv::cuda::GpuMat dst = createMat(size, type, useRoi);
+ cv::cuda::rshift(loadMat(src, useRoi), val, dst);
+
+ cv::Mat dst_gold;
+ rhiftGold(src, val, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, RShift, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U),
+ MatDepth(CV_8S),
+ MatDepth(CV_16U),
+ MatDepth(CV_16S),
+ MatDepth(CV_32S)),
+ IMAGE_CHANNELS,
+ WHOLE_SUBMAT));
+
+//////////////////////////////////////////////////////////////////////////////
+// LShift
+
+namespace
+{
+ template <typename T> void lhiftImpl(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
+ {
+ const int cn = src.channels();
+
+ dst.create(src.size(), src.type());
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ {
+ for (int c = 0; c < cn; ++c)
+ dst.at<T>(y, x * cn + c) = src.at<T>(y, x * cn + c) << val.val[c];
+ }
+ }
+ }
+
+ void lhiftGold(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
+ {
+ typedef void (*func_t)(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst);
+
+ const func_t funcs[] =
+ {
+ lhiftImpl<uchar>, lhiftImpl<schar>, lhiftImpl<ushort>, lhiftImpl<short>, lhiftImpl<int>
+ };
+
+ funcs[src.depth()](src, val, dst);
+ }
+}
+
+PARAM_TEST_CASE(LShift, cv::cuda::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ int channels;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ channels = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(LShift, Accuracy)
+{
+ int type = CV_MAKE_TYPE(depth, channels);
+ cv::Mat src = randomMat(size, type);
+ cv::Scalar_<int> val = randomScalar(0.0, 8.0);
+
+ cv::cuda::GpuMat dst = createMat(size, type, useRoi);
+ cv::cuda::lshift(loadMat(src, useRoi), val, dst);
+
+ cv::Mat dst_gold;
+ lhiftGold(src, val, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, LShift, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32S)),
+ IMAGE_CHANNELS,
+ WHOLE_SUBMAT));
+
+//////////////////////////////////////////////////////////////////////////////
+// Min
+
+PARAM_TEST_CASE(Min, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Min, Array)
+{
+ cv::Mat src1 = randomMat(size, depth);
+ cv::Mat src2 = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::min(loadMat(src1), loadMat(src2), dst);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::min(loadMat(src1, useRoi), loadMat(src2, useRoi), dst);
+
+ cv::Mat dst_gold = cv::min(src1, src2);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+ }
+}
+
+CUDA_TEST_P(Min, Scalar)
+{
+ cv::Mat src = randomMat(size, depth);
+ double val = randomDouble(0.0, 255.0);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::min(loadMat(src), val, dst);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::min(loadMat(src, useRoi), val, dst);
+
+ cv::Mat dst_gold = cv::min(src, val);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-5);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Min, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+//////////////////////////////////////////////////////////////////////////////
+// Max
+
+PARAM_TEST_CASE(Max, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Max, Array)
+{
+ cv::Mat src1 = randomMat(size, depth);
+ cv::Mat src2 = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::max(loadMat(src1), loadMat(src2), dst);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::max(loadMat(src1, useRoi), loadMat(src2, useRoi), dst);
+
+ cv::Mat dst_gold = cv::max(src1, src2);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+ }
+}
+
+CUDA_TEST_P(Max, Scalar)
+{
+ cv::Mat src = randomMat(size, depth);
+ double val = randomDouble(0.0, 255.0);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::max(loadMat(src), val, dst);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
+ cv::cuda::max(loadMat(src, useRoi), val, dst);
+
+ cv::Mat dst_gold = cv::max(src, val);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-5);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Max, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+//////////////////////////////////////////////////////////////////////////////
+// AddWeighted
+
+PARAM_TEST_CASE(AddWeighted, cv::cuda::DeviceInfo, cv::Size, MatDepth, MatDepth, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth1;
+ int depth2;
+ int dst_depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth1 = GET_PARAM(2);
+ depth2 = GET_PARAM(3);
+ dst_depth = GET_PARAM(4);
+ useRoi = GET_PARAM(5);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(AddWeighted, Accuracy)
+{
+ cv::Mat src1 = randomMat(size, depth1);
+ cv::Mat src2 = randomMat(size, depth2);
+ double alpha = randomDouble(-10.0, 10.0);
+ double beta = randomDouble(-10.0, 10.0);
+ double gamma = randomDouble(-10.0, 10.0);
+
+ if ((depth1 == CV_64F || depth2 == CV_64F || dst_depth == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::GpuMat dst;
+ cv::cuda::addWeighted(loadMat(src1), alpha, loadMat(src2), beta, gamma, dst, dst_depth);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ cv::cuda::GpuMat dst = createMat(size, dst_depth, useRoi);
+ cv::cuda::addWeighted(loadMat(src1, useRoi), alpha, loadMat(src2, useRoi), beta, gamma, dst, dst_depth);
+
+ cv::Mat dst_gold;
+ cv::addWeighted(src1, alpha, src2, beta, gamma, dst_gold, dst_depth);
+
++ EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 2.0 : 1e-3);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, AddWeighted, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ ALL_DEPTH,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// Threshold
+
+CV_ENUM(ThreshOp, cv::THRESH_BINARY, cv::THRESH_BINARY_INV, cv::THRESH_TRUNC, cv::THRESH_TOZERO, cv::THRESH_TOZERO_INV)
+#define ALL_THRESH_OPS testing::Values(ThreshOp(cv::THRESH_BINARY), ThreshOp(cv::THRESH_BINARY_INV), ThreshOp(cv::THRESH_TRUNC), ThreshOp(cv::THRESH_TOZERO), ThreshOp(cv::THRESH_TOZERO_INV))
+
+PARAM_TEST_CASE(Threshold, cv::cuda::DeviceInfo, cv::Size, MatType, ThreshOp, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int type;
+ int threshOp;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ type = GET_PARAM(2);
+ threshOp = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Threshold, Accuracy)
+{
+ cv::Mat src = randomMat(size, type);
+ double maxVal = randomDouble(20.0, 127.0);
+ double thresh = randomDouble(0.0, maxVal);
+
+ cv::cuda::GpuMat dst = createMat(src.size(), src.type(), useRoi);
+ cv::cuda::threshold(loadMat(src, useRoi), dst, thresh, maxVal, threshOp);
+
+ cv::Mat dst_gold;
+ cv::threshold(src, dst_gold, thresh, maxVal, threshOp);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Threshold, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatType(CV_8UC1), MatType(CV_16SC1), MatType(CV_32FC1)),
+ ALL_THRESH_OPS,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Magnitude
+
+PARAM_TEST_CASE(Magnitude, cv::cuda::DeviceInfo, cv::Size, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ useRoi = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Magnitude, NPP)
+{
+ cv::Mat src = randomMat(size, CV_32FC2);
+
+ cv::cuda::GpuMat dst = createMat(size, CV_32FC1, useRoi);
+ cv::cuda::magnitude(loadMat(src, useRoi), dst);
+
+ cv::Mat arr[2];
+ cv::split(src, arr);
+ cv::Mat dst_gold;
+ cv::magnitude(arr[0], arr[1], dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-4);
+}
+
+CUDA_TEST_P(Magnitude, Sqr_NPP)
+{
+ cv::Mat src = randomMat(size, CV_32FC2);
+
+ cv::cuda::GpuMat dst = createMat(size, CV_32FC1, useRoi);
+ cv::cuda::magnitudeSqr(loadMat(src, useRoi), dst);
+
+ cv::Mat arr[2];
+ cv::split(src, arr);
+ cv::Mat dst_gold;
+ cv::magnitude(arr[0], arr[1], dst_gold);
+ cv::multiply(dst_gold, dst_gold, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-1);
+}
+
+CUDA_TEST_P(Magnitude, Accuracy)
+{
+ cv::Mat x = randomMat(size, CV_32FC1);
+ cv::Mat y = randomMat(size, CV_32FC1);
+
+ cv::cuda::GpuMat dst = createMat(size, CV_32FC1, useRoi);
+ cv::cuda::magnitude(loadMat(x, useRoi), loadMat(y, useRoi), dst);
+
+ cv::Mat dst_gold;
+ cv::magnitude(x, y, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-4);
+}
+
+CUDA_TEST_P(Magnitude, Sqr_Accuracy)
+{
+ cv::Mat x = randomMat(size, CV_32FC1);
+ cv::Mat y = randomMat(size, CV_32FC1);
+
+ cv::cuda::GpuMat dst = createMat(size, CV_32FC1, useRoi);
+ cv::cuda::magnitudeSqr(loadMat(x, useRoi), loadMat(y, useRoi), dst);
+
+ cv::Mat dst_gold;
+ cv::magnitude(x, y, dst_gold);
+ cv::multiply(dst_gold, dst_gold, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-1);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Magnitude, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// Phase
+
+namespace
+{
+ IMPLEMENT_PARAM_CLASS(AngleInDegrees, bool)
+}
+
+PARAM_TEST_CASE(Phase, cv::cuda::DeviceInfo, cv::Size, AngleInDegrees, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool angleInDegrees;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ angleInDegrees = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Phase, Accuracy)
+{
+ cv::Mat x = randomMat(size, CV_32FC1);
+ cv::Mat y = randomMat(size, CV_32FC1);
+
+ cv::cuda::GpuMat dst = createMat(size, CV_32FC1, useRoi);
+ cv::cuda::phase(loadMat(x, useRoi), loadMat(y, useRoi), dst, angleInDegrees);
+
+ cv::Mat dst_gold;
+ cv::phase(x, y, dst_gold, angleInDegrees);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, angleInDegrees ? 1e-2 : 1e-3);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Phase, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(AngleInDegrees(false), AngleInDegrees(true)),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// CartToPolar
+
+PARAM_TEST_CASE(CartToPolar, cv::cuda::DeviceInfo, cv::Size, AngleInDegrees, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool angleInDegrees;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ angleInDegrees = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(CartToPolar, Accuracy)
+{
+ cv::Mat x = randomMat(size, CV_32FC1);
+ cv::Mat y = randomMat(size, CV_32FC1);
+
+ cv::cuda::GpuMat mag = createMat(size, CV_32FC1, useRoi);
+ cv::cuda::GpuMat angle = createMat(size, CV_32FC1, useRoi);
+ cv::cuda::cartToPolar(loadMat(x, useRoi), loadMat(y, useRoi), mag, angle, angleInDegrees);
+
+ cv::Mat mag_gold;
+ cv::Mat angle_gold;
+ cv::cartToPolar(x, y, mag_gold, angle_gold, angleInDegrees);
+
+ EXPECT_MAT_NEAR(mag_gold, mag, 1e-4);
+ EXPECT_MAT_NEAR(angle_gold, angle, angleInDegrees ? 1e-2 : 1e-3);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, CartToPolar, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(AngleInDegrees(false), AngleInDegrees(true)),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// polarToCart
+
+PARAM_TEST_CASE(PolarToCart, cv::cuda::DeviceInfo, cv::Size, AngleInDegrees, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool angleInDegrees;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ angleInDegrees = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(PolarToCart, Accuracy)
+{
+ cv::Mat magnitude = randomMat(size, CV_32FC1);
+ cv::Mat angle = randomMat(size, CV_32FC1);
+
+ cv::cuda::GpuMat x = createMat(size, CV_32FC1, useRoi);
+ cv::cuda::GpuMat y = createMat(size, CV_32FC1, useRoi);
+ cv::cuda::polarToCart(loadMat(magnitude, useRoi), loadMat(angle, useRoi), x, y, angleInDegrees);
+
+ cv::Mat x_gold;
+ cv::Mat y_gold;
+ cv::polarToCart(magnitude, angle, x_gold, y_gold, angleInDegrees);
+
+ EXPECT_MAT_NEAR(x_gold, x, 1e-4);
+ EXPECT_MAT_NEAR(y_gold, y, 1e-4);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, PolarToCart, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(AngleInDegrees(false), AngleInDegrees(true)),
+ WHOLE_SUBMAT));
+
+#endif // HAVE_CUDA
--- /dev/null
- EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "test_precomp.hpp"
+
+#ifdef HAVE_CUDA
+
+using namespace cvtest;
+
+////////////////////////////////////////////////////////////////////////////////
+// Norm
+
+PARAM_TEST_CASE(Norm, cv::cuda::DeviceInfo, cv::Size, MatDepth, NormCode, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ int normCode;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ normCode = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Norm, Accuracy)
+{
+ cv::Mat src = randomMat(size, depth);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
+
+ cv::cuda::GpuMat d_buf;
+ double val = cv::cuda::norm(loadMat(src, useRoi), normCode, loadMat(mask, useRoi), d_buf);
+
+ double val_gold = cv::norm(src, normCode, mask);
+
+ EXPECT_NEAR(val_gold, val, depth < CV_32F ? 0.0 : 1.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Norm, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U),
+ MatDepth(CV_8S),
+ MatDepth(CV_16U),
+ MatDepth(CV_16S),
+ MatDepth(CV_32S),
+ MatDepth(CV_32F)),
+ testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF)),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// normDiff
+
+PARAM_TEST_CASE(NormDiff, cv::cuda::DeviceInfo, cv::Size, NormCode, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int normCode;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ normCode = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(NormDiff, Accuracy)
+{
+ cv::Mat src1 = randomMat(size, CV_8UC1);
+ cv::Mat src2 = randomMat(size, CV_8UC1);
+
+ double val = cv::cuda::norm(loadMat(src1, useRoi), loadMat(src2, useRoi), normCode);
+
+ double val_gold = cv::norm(src1, src2, normCode);
+
+ EXPECT_NEAR(val_gold, val, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, NormDiff, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF)),
+ WHOLE_SUBMAT));
+
+//////////////////////////////////////////////////////////////////////////////
+// Sum
+
+namespace
+{
+ template <typename T>
+ cv::Scalar absSumImpl(const cv::Mat& src)
+ {
+ const int cn = src.channels();
+
+ cv::Scalar sum = cv::Scalar::all(0);
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ {
+ for (int c = 0; c < cn; ++c)
+ sum[c] += std::abs(src.at<T>(y, x * cn + c));
+ }
+ }
+
+ return sum;
+ }
+
+ cv::Scalar absSumGold(const cv::Mat& src)
+ {
+ typedef cv::Scalar (*func_t)(const cv::Mat& src);
+
+ static const func_t funcs[] =
+ {
+ absSumImpl<uchar>,
+ absSumImpl<schar>,
+ absSumImpl<ushort>,
+ absSumImpl<short>,
+ absSumImpl<int>,
+ absSumImpl<float>,
+ absSumImpl<double>
+ };
+
+ return funcs[src.depth()](src);
+ }
+
+ template <typename T>
+ cv::Scalar sqrSumImpl(const cv::Mat& src)
+ {
+ const int cn = src.channels();
+
+ cv::Scalar sum = cv::Scalar::all(0);
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ {
+ for (int c = 0; c < cn; ++c)
+ {
+ const T val = src.at<T>(y, x * cn + c);
+ sum[c] += val * val;
+ }
+ }
+ }
+
+ return sum;
+ }
+
+ cv::Scalar sqrSumGold(const cv::Mat& src)
+ {
+ typedef cv::Scalar (*func_t)(const cv::Mat& src);
+
+ static const func_t funcs[] =
+ {
+ sqrSumImpl<uchar>,
+ sqrSumImpl<schar>,
+ sqrSumImpl<ushort>,
+ sqrSumImpl<short>,
+ sqrSumImpl<int>,
+ sqrSumImpl<float>,
+ sqrSumImpl<double>
+ };
+
+ return funcs[src.depth()](src);
+ }
+}
+
+PARAM_TEST_CASE(Sum, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int type;
+ bool useRoi;
+
+ cv::Mat src;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ type = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ src = randomMat(size, type, -128.0, 128.0);
+ }
+};
+
+CUDA_TEST_P(Sum, Simple)
+{
+ cv::Scalar val = cv::cuda::sum(loadMat(src, useRoi));
+
+ cv::Scalar val_gold = cv::sum(src);
+
+ EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
+}
+
+CUDA_TEST_P(Sum, Abs)
+{
+ cv::Scalar val = cv::cuda::absSum(loadMat(src, useRoi));
+
+ cv::Scalar val_gold = absSumGold(src);
+
+ EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
+}
+
+CUDA_TEST_P(Sum, Sqr)
+{
+ cv::Scalar val = cv::cuda::sqrSum(loadMat(src, useRoi));
+
+ cv::Scalar val_gold = sqrSumGold(src);
+
+ EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Sum, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ TYPES(CV_8U, CV_64F, 1, 4),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// MinMax
+
+PARAM_TEST_CASE(MinMax, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(MinMax, WithoutMask)
+{
+ cv::Mat src = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src), &minVal, &maxVal);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src, useRoi), &minVal, &maxVal);
+
+ double minVal_gold, maxVal_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+ }
+}
+
+CUDA_TEST_P(MinMax, WithMask)
+{
+ cv::Mat src = randomMat(size, depth);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src), &minVal, &maxVal, loadMat(mask));
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src, useRoi), &minVal, &maxVal, loadMat(mask, useRoi));
+
+ double minVal_gold, maxVal_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold, 0, 0, mask);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+ }
+}
+
+CUDA_TEST_P(MinMax, NullPtr)
+{
+ cv::Mat src = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src), &minVal, 0);
+ cv::cuda::minMax(loadMat(src), 0, &maxVal);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src, useRoi), &minVal, 0);
+ cv::cuda::minMax(loadMat(src, useRoi), 0, &maxVal);
+
+ double minVal_gold, maxVal_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold, 0, 0);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MinMax, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// MinMaxLoc
+
+namespace
+{
+ template <typename T>
+ void expectEqualImpl(const cv::Mat& src, cv::Point loc_gold, cv::Point loc)
+ {
+ EXPECT_EQ(src.at<T>(loc_gold.y, loc_gold.x), src.at<T>(loc.y, loc.x));
+ }
+
+ void expectEqual(const cv::Mat& src, cv::Point loc_gold, cv::Point loc)
+ {
+ typedef void (*func_t)(const cv::Mat& src, cv::Point loc_gold, cv::Point loc);
+
+ static const func_t funcs[] =
+ {
+ expectEqualImpl<uchar>,
+ expectEqualImpl<schar>,
+ expectEqualImpl<ushort>,
+ expectEqualImpl<short>,
+ expectEqualImpl<int>,
+ expectEqualImpl<float>,
+ expectEqualImpl<double>
+ };
+
+ funcs[src.depth()](src, loc_gold, loc);
+ }
+}
+
+PARAM_TEST_CASE(MinMaxLoc, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(MinMaxLoc, WithoutMask)
+{
+ cv::Mat src = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src), &minVal, &maxVal, &minLoc, &maxLoc);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, &maxVal, &minLoc, &maxLoc);
+
+ double minVal_gold, maxVal_gold;
+ cv::Point minLoc_gold, maxLoc_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+
+ expectEqual(src, minLoc_gold, minLoc);
+ expectEqual(src, maxLoc_gold, maxLoc);
+ }
+}
+
+CUDA_TEST_P(MinMaxLoc, WithMask)
+{
+ cv::Mat src = randomMat(size, depth);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src), &minVal, &maxVal, &minLoc, &maxLoc, loadMat(mask));
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, &maxVal, &minLoc, &maxLoc, loadMat(mask, useRoi));
+
+ double minVal_gold, maxVal_gold;
+ cv::Point minLoc_gold, maxLoc_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold, mask);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+
+ expectEqual(src, minLoc_gold, minLoc);
+ expectEqual(src, maxLoc_gold, maxLoc);
+ }
+}
+
+CUDA_TEST_P(MinMaxLoc, NullPtr)
+{
+ cv::Mat src = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, 0, 0, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, &maxVal, 0, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, &minLoc, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, 0, &maxLoc);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, 0, 0, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, &maxVal, 0, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, &minLoc, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, 0, &maxLoc);
+
+ double minVal_gold, maxVal_gold;
+ cv::Point minLoc_gold, maxLoc_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+
+ expectEqual(src, minLoc_gold, minLoc);
+ expectEqual(src, maxLoc_gold, maxLoc);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MinMaxLoc, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////
+// CountNonZero
+
+PARAM_TEST_CASE(CountNonZero, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(CountNonZero, Accuracy)
+{
+ cv::Mat srcBase = randomMat(size, CV_8U, 0.0, 1.5);
+ cv::Mat src;
+ srcBase.convertTo(src, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::countNonZero(loadMat(src));
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ int val = cv::cuda::countNonZero(loadMat(src, useRoi));
+
+ int val_gold = cv::countNonZero(src);
+
+ ASSERT_EQ(val_gold, val);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, CountNonZero, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+//////////////////////////////////////////////////////////////////////////////
+// Reduce
+
+CV_ENUM(ReduceCode, cv::REDUCE_SUM, cv::REDUCE_AVG, cv::REDUCE_MAX, cv::REDUCE_MIN)
+#define ALL_REDUCE_CODES testing::Values(ReduceCode(cv::REDUCE_SUM), ReduceCode(cv::REDUCE_AVG), ReduceCode(cv::REDUCE_MAX), ReduceCode(cv::REDUCE_MIN))
+
+PARAM_TEST_CASE(Reduce, cv::cuda::DeviceInfo, cv::Size, MatDepth, Channels, ReduceCode, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ int channels;
+ int reduceOp;
+ bool useRoi;
+
+ int type;
+ int dst_depth;
+ int dst_type;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ channels = GET_PARAM(3);
+ reduceOp = GET_PARAM(4);
+ useRoi = GET_PARAM(5);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ type = CV_MAKE_TYPE(depth, channels);
+
+ if (reduceOp == cv::REDUCE_MAX || reduceOp == cv::REDUCE_MIN)
+ dst_depth = depth;
+ else if (reduceOp == cv::REDUCE_SUM)
+ dst_depth = depth == CV_8U ? CV_32S : depth < CV_64F ? CV_32F : depth;
+ else
+ dst_depth = depth < CV_32F ? CV_32F : depth;
+
+ dst_type = CV_MAKE_TYPE(dst_depth, channels);
+ }
+
+};
+
+CUDA_TEST_P(Reduce, Rows)
+{
+ cv::Mat src = randomMat(size, type);
+
+ cv::cuda::GpuMat dst = createMat(cv::Size(src.cols, 1), dst_type, useRoi);
+ cv::cuda::reduce(loadMat(src, useRoi), dst, 0, reduceOp, dst_depth);
+
+ cv::Mat dst_gold;
+ cv::reduce(src, dst_gold, 0, reduceOp, dst_depth);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 0.02);
+}
+
+CUDA_TEST_P(Reduce, Cols)
+{
+ cv::Mat src = randomMat(size, type);
+
+ cv::cuda::GpuMat dst = createMat(cv::Size(src.rows, 1), dst_type, useRoi);
+ cv::cuda::reduce(loadMat(src, useRoi), dst, 1, reduceOp, dst_depth);
+
+ cv::Mat dst_gold;
+ cv::reduce(src, dst_gold, 1, reduceOp, dst_depth);
+ dst_gold.cols = dst_gold.rows;
+ dst_gold.rows = 1;
+ dst_gold.step = dst_gold.cols * dst_gold.elemSize();
+
+ EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 0.02);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Reduce, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U),
+ MatDepth(CV_16U),
+ MatDepth(CV_16S),
+ MatDepth(CV_32F),
+ MatDepth(CV_64F)),
+ ALL_CHANNELS,
+ ALL_REDUCE_CODES,
+ WHOLE_SUBMAT));
+
+//////////////////////////////////////////////////////////////////////////////
+// Normalize
+
+PARAM_TEST_CASE(Normalize, cv::cuda::DeviceInfo, cv::Size, MatDepth, NormCode, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int type;
+ int norm_type;
+ bool useRoi;
+
+ double alpha;
+ double beta;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ type = GET_PARAM(2);
+ norm_type = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ alpha = 1;
+ beta = 0;
+ }
+
+};
+
+CUDA_TEST_P(Normalize, WithOutMask)
+{
+ cv::Mat src = randomMat(size, type);
+
+ cv::cuda::GpuMat dst = createMat(size, type, useRoi);
+ cv::cuda::normalize(loadMat(src, useRoi), dst, alpha, beta, norm_type, type);
+
+ cv::Mat dst_gold;
+ cv::normalize(src, dst_gold, alpha, beta, norm_type, type);
+
++ EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
+}
+
+CUDA_TEST_P(Normalize, WithMask)
+{
+ cv::Mat src = randomMat(size, type);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
+
+ cv::cuda::GpuMat dst = createMat(size, type, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::normalize(loadMat(src, useRoi), dst, alpha, beta, norm_type, type, loadMat(mask, useRoi));
+
+ cv::Mat dst_gold(size, type);
+ dst_gold.setTo(cv::Scalar::all(0));
+ cv::normalize(src, dst_gold, alpha, beta, norm_type, type, mask);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Normalize, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF), NormCode(cv::NORM_MINMAX)),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// MeanStdDev
+
+PARAM_TEST_CASE(MeanStdDev, cv::cuda::DeviceInfo, cv::Size, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ useRoi = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(MeanStdDev, Accuracy)
+{
+ cv::Mat src = randomMat(size, CV_8UC1);
+
+ if (!supportFeature(devInfo, cv::cuda::FEATURE_SET_COMPUTE_13))
+ {
+ try
+ {
+ cv::Scalar mean;
+ cv::Scalar stddev;
+ cv::cuda::meanStdDev(loadMat(src, useRoi), mean, stddev);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
+ }
+ }
+ else
+ {
+ cv::Scalar mean;
+ cv::Scalar stddev;
+ cv::cuda::meanStdDev(loadMat(src, useRoi), mean, stddev);
+
+ cv::Scalar mean_gold;
+ cv::Scalar stddev_gold;
+ cv::meanStdDev(src, mean_gold, stddev_gold);
+
+ EXPECT_SCALAR_NEAR(mean_gold, mean, 1e-5);
+ EXPECT_SCALAR_NEAR(stddev_gold, stddev, 1e-5);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MeanStdDev, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ WHOLE_SUBMAT));
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// Integral
+
+PARAM_TEST_CASE(Integral, cv::cuda::DeviceInfo, cv::Size, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ useRoi = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Integral, Accuracy)
+{
+ cv::Mat src = randomMat(size, CV_8UC1);
+
+ cv::cuda::GpuMat dst = createMat(cv::Size(src.cols + 1, src.rows + 1), CV_32SC1, useRoi);
+ cv::cuda::integral(loadMat(src, useRoi), dst);
+
+ cv::Mat dst_gold;
+ cv::integral(src, dst_gold, CV_32S);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Integral, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ WHOLE_SUBMAT));
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// IntegralSqr
+
+PARAM_TEST_CASE(IntegralSqr, cv::cuda::DeviceInfo, cv::Size, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ useRoi = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(IntegralSqr, Accuracy)
+{
+ cv::Mat src = randomMat(size, CV_8UC1);
+
+ cv::cuda::GpuMat dst = createMat(cv::Size(src.cols + 1, src.rows + 1), CV_64FC1, useRoi);
+ cv::cuda::sqrIntegral(loadMat(src, useRoi), dst);
+
+ cv::Mat dst_gold, temp;
+ cv::integral(src, temp, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, IntegralSqr, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ WHOLE_SUBMAT));
+
+#endif // HAVE_CUDA
--- /dev/null
- EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "test_precomp.hpp"
+
+#ifdef HAVE_CUDA
+
+using namespace cvtest;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// cvtColor
+
+PARAM_TEST_CASE(CvtColor, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ cv::Mat img;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ img = randomMat(size, CV_MAKE_TYPE(depth, 3), 0.0, depth == CV_32F ? 1.0 : 255.0);
+ }
+};
+
+CUDA_TEST_P(CvtColor, BGR2RGB)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR2RGBA)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2RGBA);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2RGBA);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR2BGRA)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2BGRA);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2BGRA);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2RGB)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2RGBA)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2RGBA);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2RGBA);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR2GRAY)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, RGB2GRAY)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, GRAY2BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2GRAY);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_GRAY2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_GRAY2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, GRAY2BGRA)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2GRAY);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_GRAY2BGRA, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_GRAY2BGRA, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2GRAY)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2GRAY)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGBA2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGBA2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2BGR565)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2BGR565);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2BGR565);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, RGB2BGR565)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2BGR565);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2BGR565);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5652BGR)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR565);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5652BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5652BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5652RGB)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR565);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5652RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5652RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2BGR565)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2BGR565);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2BGR565);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2BGR565)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGBA2BGR565);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGBA2BGR565);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5652BGRA)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR565);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5652BGRA, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5652BGRA, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5652RGBA)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR565);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5652RGBA, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5652RGBA, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, GRAY2BGR565)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2GRAY);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_GRAY2BGR565);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_GRAY2BGR565);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5652GRAY)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR565);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5652GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5652GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR2BGR555)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2BGR555);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2BGR555);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, RGB2BGR555)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2BGR555);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2BGR555);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5552BGR)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR555);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5552BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5552BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5552RGB)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR555);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5552RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5552RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2BGR555)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2BGR555);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2BGR555);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2BGR555)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGBA2BGR555);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGBA2BGR555);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5552BGRA)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR555);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5552BGRA, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5552BGRA, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5552RGBA)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR555);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5552RGBA, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5552RGBA, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, GRAY2BGR555)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2GRAY);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_GRAY2BGR555);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_GRAY2BGR555);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5552GRAY)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR555);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5552GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5552GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR2XYZ)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2XYZ);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2XYZ);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, RGB2XYZ)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2XYZ);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2XYZ);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2XYZ4)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2XYZ, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2XYZ);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2XYZ4)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2XYZ, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2XYZ);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, XYZ2BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_XYZ2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_XYZ2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, XYZ2RGB)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_XYZ2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_XYZ2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, XYZ42BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_XYZ2BGR);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_XYZ2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, XYZ42BGRA)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_XYZ2BGR, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_XYZ2BGR, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2YCrCb)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2YCrCb);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2YCrCb);
+
- EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
++ EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
+}
+
+CUDA_TEST_P(CvtColor, RGB2YCrCb)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2YCrCb);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2YCrCb);
+
- EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
++ EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR2YCrCb4)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2YCrCb, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2YCrCb);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
- EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
++ EXPECT_MAT_NEAR(dst_gold, h_dst, 1.0);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2YCrCb4)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2YCrCb, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2YCrCb);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
++ EXPECT_MAT_NEAR(dst_gold, h_dst, 1.0);
+}
+
+CUDA_TEST_P(CvtColor, YCrCb2BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YCrCb2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YCrCb2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YCrCb2RGB)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YCrCb2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YCrCb2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YCrCb42RGB)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YCrCb2RGB);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YCrCb2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YCrCb42RGBA)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YCrCb2RGB, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YCrCb2RGB, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2HSV)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2HSV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2HSV);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HSV)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HSV4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2HSV4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, BGR2HLS)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2HLS);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2HLS);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HLS)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HLS4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2HLS4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV2BGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV2RGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV42BGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2BGR);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV42BGRA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2BGR, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2BGR, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS2BGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS2RGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS42RGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS42RGBA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, BGR2HSV_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2HSV_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2HSV_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HSV_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HSV4_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV_FULL, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV_FULL);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2HSV4_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV_FULL, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV_FULL);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, BGR2HLS_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2HLS_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2HLS_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HLS_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HLS4_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS_FULL, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS_FULL);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2HLS4_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS_FULL, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS_FULL);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV2BGR_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV_FULL);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2BGR_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2BGR_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV2RGB_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV_FULL);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2RGB_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2RGB_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV42RGB_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2RGB_FULL);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2RGB_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV42RGBA_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2RGB_FULL, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2RGB_FULL, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS2BGR_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS_FULL);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2BGR_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2BGR_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS2RGB_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS_FULL);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS42RGB_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB_FULL);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS42RGBA_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB_FULL, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB_FULL, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, BGR2YUV)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2YUV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2YUV);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, RGB2YUV)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2YUV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2YUV);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YUV2BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YUV);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YUV2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YUV2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YUV42BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YUV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YUV2BGR);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YUV2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YUV42BGRA)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YUV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YUV2BGR, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YUV2BGR, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YUV2RGB)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_RGB2YUV);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YUV2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YUV2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2YUV4)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2YUV, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2YUV);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2YUV4)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2YUV, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2YUV);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2Lab)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Lab);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Lab);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, RGB2Lab)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2Lab);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2Lab);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2Lab4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Lab, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Lab);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LBGR2Lab)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Lab);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Lab);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LRGB2Lab)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LRGB2Lab);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LRGB2Lab);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LBGRA2Lab4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Lab, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Lab);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, Lab2BGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, Lab2RGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, Lab2BGRA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2BGR, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, Lab2LBGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2LBGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2LBGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, Lab2LRGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2LRGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2LRGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, Lab2LRGBA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2LRGB, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2LRGB, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2Luv)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Luv);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Luv);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, RGB2Luv)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2Luv);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2Luv);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2Luv4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Luv, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Luv);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LBGR2Luv)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Luv);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Luv);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LRGB2Luv)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LRGB2Luv);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LRGB2Luv);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LBGRA2Luv4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Luv, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Luv);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, Luv2BGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+CUDA_TEST_P(CvtColor, Luv2RGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+CUDA_TEST_P(CvtColor, Luv2BGRA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2BGR, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+CUDA_TEST_P(CvtColor, Luv2LBGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2LBGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2LBGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+CUDA_TEST_P(CvtColor, Luv2LRGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2LRGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2LRGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+CUDA_TEST_P(CvtColor, Luv2LRGBA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2LRGB, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2LRGB, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+#if defined (CUDA_VERSION) && (CUDA_VERSION >= 5000)
+
+CUDA_TEST_P(CvtColor, RGBA2mRGBA)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src = randomMat(size, CV_MAKE_TYPE(depth, 4));
+
+ cv::cuda::GpuMat dst = createMat(src.size(), src.type(), useRoi);
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGBA2mRGBA);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGBA2mRGBA);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1);
+}
+
+#endif // defined (CUDA_VERSION) && (CUDA_VERSION >= 5000)
+
+CUDA_TEST_P(CvtColor, BayerBG2BGR)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerBG2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerBG2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerBG2BGR4)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerBG2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerBG2BGR);
+
+ cv::Mat dst4(dst);
+ cv::Mat dst3;
+ cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
+
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerGB2BGR)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGB2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGB2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerGB2BGR4)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGB2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGB2BGR);
+
+ cv::Mat dst4(dst);
+ cv::Mat dst3;
+ cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerRG2BGR)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerRG2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerRG2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerRG2BGR4)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerRG2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerRG2BGR);
+
+ cv::Mat dst4(dst);
+ cv::Mat dst3;
+ cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerGR2BGR)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGR2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGR2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerGR2BGR4)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGR2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGR2BGR);
+
+ cv::Mat dst4(dst);
+ cv::Mat dst3;
+ cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerBG2Gray)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerBG2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerBG2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
+}
+
+CUDA_TEST_P(CvtColor, BayerGB2Gray)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGB2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGB2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
+}
+
+CUDA_TEST_P(CvtColor, BayerRG2Gray)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerRG2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerRG2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
+}
+
+CUDA_TEST_P(CvtColor, BayerGR2Gray)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGR2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGR2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, CvtColor, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32F)),
+ WHOLE_SUBMAT));
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// Demosaicing
+
+struct Demosaicing : testing::TestWithParam<cv::cuda::DeviceInfo>
+{
+ cv::cuda::DeviceInfo devInfo;
+
+ virtual void SetUp()
+ {
+ devInfo = GetParam();
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+
+ static void mosaic(const cv::Mat_<cv::Vec3b>& src, cv::Mat_<uchar>& dst, cv::Point firstRed)
+ {
+ dst.create(src.size());
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ {
+ cv::Vec3b pix = src(y, x);
+
+ cv::Point alternate;
+ alternate.x = (x + firstRed.x) % 2;
+ alternate.y = (y + firstRed.y) % 2;
+
+ if (alternate.y == 0)
+ {
+ if (alternate.x == 0)
+ {
+ // RG
+ // GB
+ dst(y, x) = pix[2];
+ }
+ else
+ {
+ // GR
+ // BG
+ dst(y, x) = pix[1];
+ }
+ }
+ else
+ {
+ if (alternate.x == 0)
+ {
+ // GB
+ // RG
+ dst(y, x) = pix[1];
+ }
+ else
+ {
+ // BG
+ // GR
+ dst(y, x) = pix[0];
+ }
+ }
+ }
+ }
+ }
+};
+
+CUDA_TEST_P(Demosaicing, BayerBG2BGR)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(1, 1));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::COLOR_BayerBG2BGR);
+
+ EXPECT_MAT_SIMILAR(img, dst, 2e-2);
+}
+
+CUDA_TEST_P(Demosaicing, BayerGB2BGR)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(0, 1));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::COLOR_BayerGB2BGR);
+
+ EXPECT_MAT_SIMILAR(img, dst, 2e-2);
+}
+
+CUDA_TEST_P(Demosaicing, BayerRG2BGR)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(0, 0));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::COLOR_BayerRG2BGR);
+
+ EXPECT_MAT_SIMILAR(img, dst, 2e-2);
+}
+
+CUDA_TEST_P(Demosaicing, BayerGR2BGR)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(1, 0));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::COLOR_BayerGR2BGR);
+
+ EXPECT_MAT_SIMILAR(img, dst, 2e-2);
+}
+
+CUDA_TEST_P(Demosaicing, BayerBG2BGR_MHT)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(1, 1));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::cuda::COLOR_BayerBG2BGR_MHT);
+
+ EXPECT_MAT_SIMILAR(img, dst, 5e-3);
+}
+
+CUDA_TEST_P(Demosaicing, BayerGB2BGR_MHT)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(0, 1));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::cuda::COLOR_BayerGB2BGR_MHT);
+
+ EXPECT_MAT_SIMILAR(img, dst, 5e-3);
+}
+
+CUDA_TEST_P(Demosaicing, BayerRG2BGR_MHT)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(0, 0));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::cuda::COLOR_BayerRG2BGR_MHT);
+
+ EXPECT_MAT_SIMILAR(img, dst, 5e-3);
+}
+
+CUDA_TEST_P(Demosaicing, BayerGR2BGR_MHT)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(1, 0));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::cuda::COLOR_BayerGR2BGR_MHT);
+
+ EXPECT_MAT_SIMILAR(img, dst, 5e-3);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, Demosaicing, ALL_DEVICES);
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// swapChannels
+
+PARAM_TEST_CASE(SwapChannels, cv::cuda::DeviceInfo, cv::Size, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ useRoi = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(SwapChannels, Accuracy)
+{
+ cv::Mat src = readImageType("stereobm/aloe-L.png", CV_8UC4);
+ ASSERT_FALSE(src.empty());
+
+ cv::cuda::GpuMat d_src = loadMat(src, useRoi);
+
+ const int dstOrder[] = {2, 1, 0, 3};
+ cv::cuda::swapChannels(d_src, dstOrder);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2RGBA);
+
+ EXPECT_MAT_NEAR(dst_gold, d_src, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, SwapChannels, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ WHOLE_SUBMAT));
+
+#endif // HAVE_CUDA
--- /dev/null
- PERF_TEST_P(ImagePair, FastOpticalFlowBM,
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "perf_precomp.hpp"
+#include "opencv2/legacy.hpp"
+
+using namespace std;
+using namespace testing;
+using namespace perf;
+
+//////////////////////////////////////////////////////
+// InterpolateFrames
+
+typedef pair<string, string> pair_string;
+
+DEF_PARAM_TEST_1(ImagePair, pair_string);
+
+PERF_TEST_P(ImagePair, InterpolateFrames,
+ Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
+{
+ cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame0.empty());
+
+ cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame1.empty());
+
+ frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0);
+ frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0);
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_frame0(frame0);
+ const cv::cuda::GpuMat d_frame1(frame1);
+ cv::cuda::GpuMat d_fu, d_fv;
+ cv::cuda::GpuMat d_bu, d_bv;
+
+ cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
+ 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/);
+
+ d_flow(d_frame0, d_frame1, d_fu, d_fv);
+ d_flow(d_frame1, d_frame0, d_bu, d_bv);
+
+ cv::cuda::GpuMat newFrame;
+ cv::cuda::GpuMat d_buf;
+
+ TEST_CYCLE() cv::cuda::interpolateFrames(d_frame0, d_frame1, d_fu, d_fv, d_bu, d_bv, 0.5f, newFrame, d_buf);
+
+ CUDA_SANITY_CHECK(newFrame, 1e-4);
+ }
+ else
+ {
+ FAIL_NO_CPU();
+ }
+}
+
+//////////////////////////////////////////////////////
+// CreateOpticalFlowNeedleMap
+
+PERF_TEST_P(ImagePair, CreateOpticalFlowNeedleMap,
+ Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
+{
+ cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame0.empty());
+
+ cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame1.empty());
+
+ frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0);
+ frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0);
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_frame0(frame0);
+ const cv::cuda::GpuMat d_frame1(frame1);
+ cv::cuda::GpuMat u;
+ cv::cuda::GpuMat v;
+
+ cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
+ 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/);
+
+ d_flow(d_frame0, d_frame1, u, v);
+
+ cv::cuda::GpuMat vertex, colors;
+
+ TEST_CYCLE() cv::cuda::createOpticalFlowNeedleMap(u, v, vertex, colors);
+
+ CUDA_SANITY_CHECK(vertex, 1e-6);
+ CUDA_SANITY_CHECK(colors);
+ }
+ else
+ {
+ FAIL_NO_CPU();
+ }
+}
+
+//////////////////////////////////////////////////////
+// BroxOpticalFlow
+
+PERF_TEST_P(ImagePair, BroxOpticalFlow,
+ Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
+{
+ declare.time(300);
+
+ cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame0.empty());
+
+ cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame1.empty());
+
+ frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0);
+ frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0);
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_frame0(frame0);
+ const cv::cuda::GpuMat d_frame1(frame1);
+ cv::cuda::GpuMat u;
+ cv::cuda::GpuMat v;
+
+ cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
+ 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/);
+
+ TEST_CYCLE() d_flow(d_frame0, d_frame1, u, v);
+
+ CUDA_SANITY_CHECK(u, 1e-1);
+ CUDA_SANITY_CHECK(v, 1e-1);
+ }
+ else
+ {
+ FAIL_NO_CPU();
+ }
+}
+
+//////////////////////////////////////////////////////
+// PyrLKOpticalFlowSparse
+
+DEF_PARAM_TEST(ImagePair_Gray_NPts_WinSz_Levels_Iters, pair_string, bool, int, int, int, int);
+
+PERF_TEST_P(ImagePair_Gray_NPts_WinSz_Levels_Iters, PyrLKOpticalFlowSparse,
+ Combine(Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")),
+ Bool(),
+ Values(8000),
+ Values(21),
+ Values(1, 3),
+ Values(1, 30)))
+{
+ declare.time(20.0);
+
+ const pair_string imagePair = GET_PARAM(0);
+ const bool useGray = GET_PARAM(1);
+ const int points = GET_PARAM(2);
+ const int winSize = GET_PARAM(3);
+ const int levels = GET_PARAM(4);
+ const int iters = GET_PARAM(5);
+
+ const cv::Mat frame0 = readImage(imagePair.first, useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
+ ASSERT_FALSE(frame0.empty());
+
+ const cv::Mat frame1 = readImage(imagePair.second, useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
+ ASSERT_FALSE(frame1.empty());
+
+ cv::Mat gray_frame;
+ if (useGray)
+ gray_frame = frame0;
+ else
+ cv::cvtColor(frame0, gray_frame, cv::COLOR_BGR2GRAY);
+
+ cv::Mat pts;
+ cv::goodFeaturesToTrack(gray_frame, pts, points, 0.01, 0.0);
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_pts(pts.reshape(2, 1));
+
+ cv::cuda::PyrLKOpticalFlow d_pyrLK;
+ d_pyrLK.winSize = cv::Size(winSize, winSize);
+ d_pyrLK.maxLevel = levels - 1;
+ d_pyrLK.iters = iters;
+
+ const cv::cuda::GpuMat d_frame0(frame0);
+ const cv::cuda::GpuMat d_frame1(frame1);
+ cv::cuda::GpuMat nextPts;
+ cv::cuda::GpuMat status;
+
+ TEST_CYCLE() d_pyrLK.sparse(d_frame0, d_frame1, d_pts, nextPts, status);
+
+ CUDA_SANITY_CHECK(nextPts);
+ CUDA_SANITY_CHECK(status);
+ }
+ else
+ {
+ cv::Mat nextPts;
+ cv::Mat status;
+
+ TEST_CYCLE()
+ {
+ cv::calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts, status, cv::noArray(),
+ cv::Size(winSize, winSize), levels - 1,
+ cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, iters, 0.01));
+ }
+
+ CPU_SANITY_CHECK(nextPts);
+ CPU_SANITY_CHECK(status);
+ }
+}
+
+//////////////////////////////////////////////////////
+// PyrLKOpticalFlowDense
+
+DEF_PARAM_TEST(ImagePair_WinSz_Levels_Iters, pair_string, int, int, int);
+
+PERF_TEST_P(ImagePair_WinSz_Levels_Iters, PyrLKOpticalFlowDense,
+ Combine(Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")),
+ Values(3, 5, 7, 9, 13, 17, 21),
+ Values(1, 3),
+ Values(1, 10)))
+{
+ declare.time(30);
+
+ const pair_string imagePair = GET_PARAM(0);
+ const int winSize = GET_PARAM(1);
+ const int levels = GET_PARAM(2);
+ const int iters = GET_PARAM(3);
+
+ const cv::Mat frame0 = readImage(imagePair.first, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame0.empty());
+
+ const cv::Mat frame1 = readImage(imagePair.second, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame1.empty());
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_frame0(frame0);
+ const cv::cuda::GpuMat d_frame1(frame1);
+ cv::cuda::GpuMat u;
+ cv::cuda::GpuMat v;
+
+ cv::cuda::PyrLKOpticalFlow d_pyrLK;
+ d_pyrLK.winSize = cv::Size(winSize, winSize);
+ d_pyrLK.maxLevel = levels - 1;
+ d_pyrLK.iters = iters;
+
+ TEST_CYCLE() d_pyrLK.dense(d_frame0, d_frame1, u, v);
+
+ CUDA_SANITY_CHECK(u);
+ CUDA_SANITY_CHECK(v);
+ }
+ else
+ {
+ FAIL_NO_CPU();
+ }
+}
+
+//////////////////////////////////////////////////////
+// FarnebackOpticalFlow
+
+PERF_TEST_P(ImagePair, FarnebackOpticalFlow,
+ Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
+{
+ declare.time(10);
+
+ const cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame0.empty());
+
+ const cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame1.empty());
+
+ const int numLevels = 5;
+ const double pyrScale = 0.5;
+ const int winSize = 13;
+ const int numIters = 10;
+ const int polyN = 5;
+ const double polySigma = 1.1;
+ const int flags = 0;
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_frame0(frame0);
+ const cv::cuda::GpuMat d_frame1(frame1);
+ cv::cuda::GpuMat u;
+ cv::cuda::GpuMat v;
+
+ cv::cuda::FarnebackOpticalFlow d_farneback;
+ d_farneback.numLevels = numLevels;
+ d_farneback.pyrScale = pyrScale;
+ d_farneback.winSize = winSize;
+ d_farneback.numIters = numIters;
+ d_farneback.polyN = polyN;
+ d_farneback.polySigma = polySigma;
+ d_farneback.flags = flags;
+
+ TEST_CYCLE() d_farneback(d_frame0, d_frame1, u, v);
+
+ CUDA_SANITY_CHECK(u, 1e-4);
+ CUDA_SANITY_CHECK(v, 1e-4);
+ }
+ else
+ {
+ cv::Mat flow;
+
+ TEST_CYCLE() cv::calcOpticalFlowFarneback(frame0, frame1, flow, pyrScale, numLevels, winSize, numIters, polyN, polySigma, flags);
+
+ CPU_SANITY_CHECK(flow);
+ }
+}
+
+//////////////////////////////////////////////////////
+// OpticalFlowDual_TVL1
+
+PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1,
+ Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
+{
+ declare.time(20);
+
+ const cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame0.empty());
+
+ const cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame1.empty());
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_frame0(frame0);
+ const cv::cuda::GpuMat d_frame1(frame1);
+ cv::cuda::GpuMat u;
+ cv::cuda::GpuMat v;
+
+ cv::cuda::OpticalFlowDual_TVL1_CUDA d_alg;
+
+ TEST_CYCLE() d_alg(d_frame0, d_frame1, u, v);
+
+ CUDA_SANITY_CHECK(u, 1e-1);
+ CUDA_SANITY_CHECK(v, 1e-1);
+ }
+ else
+ {
+ cv::Mat flow;
+
+ cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
+ alg->set("medianFiltering", 1);
+ alg->set("innerIterations", 1);
+ alg->set("outerIterations", 300);
+
+ TEST_CYCLE() alg->calc(frame0, frame1, flow);
+
+ CPU_SANITY_CHECK(flow);
+ }
+}
+
+//////////////////////////////////////////////////////
+// OpticalFlowBM
+
+void calcOpticalFlowBM(const cv::Mat& prev, const cv::Mat& curr,
+ cv::Size bSize, cv::Size shiftSize, cv::Size maxRange, int usePrevious,
+ cv::Mat& velx, cv::Mat& vely)
+{
+ cv::Size sz((curr.cols - bSize.width + shiftSize.width)/shiftSize.width, (curr.rows - bSize.height + shiftSize.height)/shiftSize.height);
+
+ velx.create(sz, CV_32FC1);
+ vely.create(sz, CV_32FC1);
+
+ CvMat cvprev = prev;
+ CvMat cvcurr = curr;
+
+ CvMat cvvelx = velx;
+ CvMat cvvely = vely;
+
+ cvCalcOpticalFlowBM(&cvprev, &cvcurr, bSize, shiftSize, maxRange, usePrevious, &cvvelx, &cvvely);
+}
+
+PERF_TEST_P(ImagePair, OpticalFlowBM,
+ Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
+{
+ declare.time(400);
+
+ const cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame0.empty());
+
+ const cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame1.empty());
+
+ const cv::Size block_size(16, 16);
+ const cv::Size shift_size(1, 1);
+ const cv::Size max_range(16, 16);
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_frame0(frame0);
+ const cv::cuda::GpuMat d_frame1(frame1);
+ cv::cuda::GpuMat u, v, buf;
+
+ TEST_CYCLE() cv::cuda::calcOpticalFlowBM(d_frame0, d_frame1, block_size, shift_size, max_range, false, u, v, buf);
+
+ CUDA_SANITY_CHECK(u);
+ CUDA_SANITY_CHECK(v);
+ }
+ else
+ {
+ cv::Mat u, v;
+
+ TEST_CYCLE() calcOpticalFlowBM(frame0, frame1, block_size, shift_size, max_range, false, u, v);
+
+ CPU_SANITY_CHECK(u);
+ CPU_SANITY_CHECK(v);
+ }
+}
+
++PERF_TEST_P(ImagePair, DISABLED_FastOpticalFlowBM,
+ Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
+{
+ declare.time(400);
+
+ const cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame0.empty());
+
+ const cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame1.empty());
+
+ const cv::Size block_size(16, 16);
+ const cv::Size shift_size(1, 1);
+ const cv::Size max_range(16, 16);
+
+ if (PERF_RUN_CUDA())
+ {
+ const cv::cuda::GpuMat d_frame0(frame0);
+ const cv::cuda::GpuMat d_frame1(frame1);
+ cv::cuda::GpuMat u, v;
+
+ cv::cuda::FastOpticalFlowBM fastBM;
+
+ TEST_CYCLE() fastBM(d_frame0, d_frame1, u, v, max_range.width, block_size.width);
+
+ CUDA_SANITY_CHECK(u, 2);
+ CUDA_SANITY_CHECK(v, 2);
+ }
+ else
+ {
+ FAIL_NO_CPU();
+ }
+}