--- /dev/null
- EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "test_precomp.hpp"
+
+#ifdef HAVE_CUDA
+
+using namespace cvtest;
+
+////////////////////////////////////////////////////////////////////////////////
+// Norm
+
+PARAM_TEST_CASE(Norm, cv::cuda::DeviceInfo, cv::Size, MatDepth, NormCode, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ int normCode;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ normCode = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Norm, Accuracy)
+{
+ cv::Mat src = randomMat(size, depth);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
+
+ cv::cuda::GpuMat d_buf;
+ double val = cv::cuda::norm(loadMat(src, useRoi), normCode, loadMat(mask, useRoi), d_buf);
+
+ double val_gold = cv::norm(src, normCode, mask);
+
+ EXPECT_NEAR(val_gold, val, depth < CV_32F ? 0.0 : 1.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Norm, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U),
+ MatDepth(CV_8S),
+ MatDepth(CV_16U),
+ MatDepth(CV_16S),
+ MatDepth(CV_32S),
+ MatDepth(CV_32F)),
+ testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF)),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// normDiff
+
+PARAM_TEST_CASE(NormDiff, cv::cuda::DeviceInfo, cv::Size, NormCode, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int normCode;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ normCode = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(NormDiff, Accuracy)
+{
+ cv::Mat src1 = randomMat(size, CV_8UC1);
+ cv::Mat src2 = randomMat(size, CV_8UC1);
+
+ double val = cv::cuda::norm(loadMat(src1, useRoi), loadMat(src2, useRoi), normCode);
+
+ double val_gold = cv::norm(src1, src2, normCode);
+
+ EXPECT_NEAR(val_gold, val, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, NormDiff, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF)),
+ WHOLE_SUBMAT));
+
+//////////////////////////////////////////////////////////////////////////////
+// Sum
+
+namespace
+{
+ template <typename T>
+ cv::Scalar absSumImpl(const cv::Mat& src)
+ {
+ const int cn = src.channels();
+
+ cv::Scalar sum = cv::Scalar::all(0);
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ {
+ for (int c = 0; c < cn; ++c)
+ sum[c] += std::abs(src.at<T>(y, x * cn + c));
+ }
+ }
+
+ return sum;
+ }
+
+ cv::Scalar absSumGold(const cv::Mat& src)
+ {
+ typedef cv::Scalar (*func_t)(const cv::Mat& src);
+
+ static const func_t funcs[] =
+ {
+ absSumImpl<uchar>,
+ absSumImpl<schar>,
+ absSumImpl<ushort>,
+ absSumImpl<short>,
+ absSumImpl<int>,
+ absSumImpl<float>,
+ absSumImpl<double>
+ };
+
+ return funcs[src.depth()](src);
+ }
+
+ template <typename T>
+ cv::Scalar sqrSumImpl(const cv::Mat& src)
+ {
+ const int cn = src.channels();
+
+ cv::Scalar sum = cv::Scalar::all(0);
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ {
+ for (int c = 0; c < cn; ++c)
+ {
+ const T val = src.at<T>(y, x * cn + c);
+ sum[c] += val * val;
+ }
+ }
+ }
+
+ return sum;
+ }
+
+ cv::Scalar sqrSumGold(const cv::Mat& src)
+ {
+ typedef cv::Scalar (*func_t)(const cv::Mat& src);
+
+ static const func_t funcs[] =
+ {
+ sqrSumImpl<uchar>,
+ sqrSumImpl<schar>,
+ sqrSumImpl<ushort>,
+ sqrSumImpl<short>,
+ sqrSumImpl<int>,
+ sqrSumImpl<float>,
+ sqrSumImpl<double>
+ };
+
+ return funcs[src.depth()](src);
+ }
+}
+
+PARAM_TEST_CASE(Sum, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int type;
+ bool useRoi;
+
+ cv::Mat src;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ type = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ src = randomMat(size, type, -128.0, 128.0);
+ }
+};
+
+CUDA_TEST_P(Sum, Simple)
+{
+ cv::Scalar val = cv::cuda::sum(loadMat(src, useRoi));
+
+ cv::Scalar val_gold = cv::sum(src);
+
+ EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
+}
+
+CUDA_TEST_P(Sum, Abs)
+{
+ cv::Scalar val = cv::cuda::absSum(loadMat(src, useRoi));
+
+ cv::Scalar val_gold = absSumGold(src);
+
+ EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
+}
+
+CUDA_TEST_P(Sum, Sqr)
+{
+ cv::Scalar val = cv::cuda::sqrSum(loadMat(src, useRoi));
+
+ cv::Scalar val_gold = sqrSumGold(src);
+
+ EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Sum, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ TYPES(CV_8U, CV_64F, 1, 4),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// MinMax
+
+PARAM_TEST_CASE(MinMax, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(MinMax, WithoutMask)
+{
+ cv::Mat src = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src), &minVal, &maxVal);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src, useRoi), &minVal, &maxVal);
+
+ double minVal_gold, maxVal_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+ }
+}
+
+CUDA_TEST_P(MinMax, WithMask)
+{
+ cv::Mat src = randomMat(size, depth);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src), &minVal, &maxVal, loadMat(mask));
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src, useRoi), &minVal, &maxVal, loadMat(mask, useRoi));
+
+ double minVal_gold, maxVal_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold, 0, 0, mask);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+ }
+}
+
+CUDA_TEST_P(MinMax, NullPtr)
+{
+ cv::Mat src = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src), &minVal, 0);
+ cv::cuda::minMax(loadMat(src), 0, &maxVal);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::cuda::minMax(loadMat(src, useRoi), &minVal, 0);
+ cv::cuda::minMax(loadMat(src, useRoi), 0, &maxVal);
+
+ double minVal_gold, maxVal_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold, 0, 0);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MinMax, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// MinMaxLoc
+
+namespace
+{
+ template <typename T>
+ void expectEqualImpl(const cv::Mat& src, cv::Point loc_gold, cv::Point loc)
+ {
+ EXPECT_EQ(src.at<T>(loc_gold.y, loc_gold.x), src.at<T>(loc.y, loc.x));
+ }
+
+ void expectEqual(const cv::Mat& src, cv::Point loc_gold, cv::Point loc)
+ {
+ typedef void (*func_t)(const cv::Mat& src, cv::Point loc_gold, cv::Point loc);
+
+ static const func_t funcs[] =
+ {
+ expectEqualImpl<uchar>,
+ expectEqualImpl<schar>,
+ expectEqualImpl<ushort>,
+ expectEqualImpl<short>,
+ expectEqualImpl<int>,
+ expectEqualImpl<float>,
+ expectEqualImpl<double>
+ };
+
+ funcs[src.depth()](src, loc_gold, loc);
+ }
+}
+
+PARAM_TEST_CASE(MinMaxLoc, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(MinMaxLoc, WithoutMask)
+{
+ cv::Mat src = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src), &minVal, &maxVal, &minLoc, &maxLoc);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, &maxVal, &minLoc, &maxLoc);
+
+ double minVal_gold, maxVal_gold;
+ cv::Point minLoc_gold, maxLoc_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+
+ expectEqual(src, minLoc_gold, minLoc);
+ expectEqual(src, maxLoc_gold, maxLoc);
+ }
+}
+
+CUDA_TEST_P(MinMaxLoc, WithMask)
+{
+ cv::Mat src = randomMat(size, depth);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src), &minVal, &maxVal, &minLoc, &maxLoc, loadMat(mask));
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, &maxVal, &minLoc, &maxLoc, loadMat(mask, useRoi));
+
+ double minVal_gold, maxVal_gold;
+ cv::Point minLoc_gold, maxLoc_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold, mask);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+
+ expectEqual(src, minLoc_gold, minLoc);
+ expectEqual(src, maxLoc_gold, maxLoc);
+ }
+}
+
+CUDA_TEST_P(MinMaxLoc, NullPtr)
+{
+ cv::Mat src = randomMat(size, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, 0, 0, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, &maxVal, 0, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, &minLoc, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, 0, &maxLoc);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ double minVal, maxVal;
+ cv::Point minLoc, maxLoc;
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, 0, 0, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, &maxVal, 0, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, &minLoc, 0);
+ cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, 0, &maxLoc);
+
+ double minVal_gold, maxVal_gold;
+ cv::Point minLoc_gold, maxLoc_gold;
+ minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold);
+
+ EXPECT_DOUBLE_EQ(minVal_gold, minVal);
+ EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
+
+ expectEqual(src, minLoc_gold, minLoc);
+ expectEqual(src, maxLoc_gold, maxLoc);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MinMaxLoc, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////
+// CountNonZero
+
+PARAM_TEST_CASE(CountNonZero, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(CountNonZero, Accuracy)
+{
+ cv::Mat srcBase = randomMat(size, CV_8U, 0.0, 1.5);
+ cv::Mat src;
+ srcBase.convertTo(src, depth);
+
+ if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
+ {
+ try
+ {
+ cv::cuda::countNonZero(loadMat(src));
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
+ }
+ }
+ else
+ {
+ int val = cv::cuda::countNonZero(loadMat(src, useRoi));
+
+ int val_gold = cv::countNonZero(src);
+
+ ASSERT_EQ(val_gold, val);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, CountNonZero, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ WHOLE_SUBMAT));
+
+//////////////////////////////////////////////////////////////////////////////
+// Reduce
+
+CV_ENUM(ReduceCode, cv::REDUCE_SUM, cv::REDUCE_AVG, cv::REDUCE_MAX, cv::REDUCE_MIN)
+#define ALL_REDUCE_CODES testing::Values(ReduceCode(cv::REDUCE_SUM), ReduceCode(cv::REDUCE_AVG), ReduceCode(cv::REDUCE_MAX), ReduceCode(cv::REDUCE_MIN))
+
+PARAM_TEST_CASE(Reduce, cv::cuda::DeviceInfo, cv::Size, MatDepth, Channels, ReduceCode, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ int channels;
+ int reduceOp;
+ bool useRoi;
+
+ int type;
+ int dst_depth;
+ int dst_type;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ channels = GET_PARAM(3);
+ reduceOp = GET_PARAM(4);
+ useRoi = GET_PARAM(5);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ type = CV_MAKE_TYPE(depth, channels);
+
+ if (reduceOp == cv::REDUCE_MAX || reduceOp == cv::REDUCE_MIN)
+ dst_depth = depth;
+ else if (reduceOp == cv::REDUCE_SUM)
+ dst_depth = depth == CV_8U ? CV_32S : depth < CV_64F ? CV_32F : depth;
+ else
+ dst_depth = depth < CV_32F ? CV_32F : depth;
+
+ dst_type = CV_MAKE_TYPE(dst_depth, channels);
+ }
+
+};
+
+CUDA_TEST_P(Reduce, Rows)
+{
+ cv::Mat src = randomMat(size, type);
+
+ cv::cuda::GpuMat dst = createMat(cv::Size(src.cols, 1), dst_type, useRoi);
+ cv::cuda::reduce(loadMat(src, useRoi), dst, 0, reduceOp, dst_depth);
+
+ cv::Mat dst_gold;
+ cv::reduce(src, dst_gold, 0, reduceOp, dst_depth);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 0.02);
+}
+
+CUDA_TEST_P(Reduce, Cols)
+{
+ cv::Mat src = randomMat(size, type);
+
+ cv::cuda::GpuMat dst = createMat(cv::Size(src.rows, 1), dst_type, useRoi);
+ cv::cuda::reduce(loadMat(src, useRoi), dst, 1, reduceOp, dst_depth);
+
+ cv::Mat dst_gold;
+ cv::reduce(src, dst_gold, 1, reduceOp, dst_depth);
+ dst_gold.cols = dst_gold.rows;
+ dst_gold.rows = 1;
+ dst_gold.step = dst_gold.cols * dst_gold.elemSize();
+
+ EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 0.02);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Reduce, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U),
+ MatDepth(CV_16U),
+ MatDepth(CV_16S),
+ MatDepth(CV_32F),
+ MatDepth(CV_64F)),
+ ALL_CHANNELS,
+ ALL_REDUCE_CODES,
+ WHOLE_SUBMAT));
+
+//////////////////////////////////////////////////////////////////////////////
+// Normalize
+
+PARAM_TEST_CASE(Normalize, cv::cuda::DeviceInfo, cv::Size, MatDepth, NormCode, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int type;
+ int norm_type;
+ bool useRoi;
+
+ double alpha;
+ double beta;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ type = GET_PARAM(2);
+ norm_type = GET_PARAM(3);
+ useRoi = GET_PARAM(4);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ alpha = 1;
+ beta = 0;
+ }
+
+};
+
+CUDA_TEST_P(Normalize, WithOutMask)
+{
+ cv::Mat src = randomMat(size, type);
+
+ cv::cuda::GpuMat dst = createMat(size, type, useRoi);
+ cv::cuda::normalize(loadMat(src, useRoi), dst, alpha, beta, norm_type, type);
+
+ cv::Mat dst_gold;
+ cv::normalize(src, dst_gold, alpha, beta, norm_type, type);
+
++ EXPECT_MAT_NEAR(dst_gold, dst, type < CV_32F ? 1.0 : 1e-4);
+}
+
+CUDA_TEST_P(Normalize, WithMask)
+{
+ cv::Mat src = randomMat(size, type);
+ cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
+
+ cv::cuda::GpuMat dst = createMat(size, type, useRoi);
+ dst.setTo(cv::Scalar::all(0));
+ cv::cuda::normalize(loadMat(src, useRoi), dst, alpha, beta, norm_type, type, loadMat(mask, useRoi));
+
+ cv::Mat dst_gold(size, type);
+ dst_gold.setTo(cv::Scalar::all(0));
+ cv::normalize(src, dst_gold, alpha, beta, norm_type, type, mask);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Normalize, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ ALL_DEPTH,
+ testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF), NormCode(cv::NORM_MINMAX)),
+ WHOLE_SUBMAT));
+
+////////////////////////////////////////////////////////////////////////////////
+// MeanStdDev
+
+PARAM_TEST_CASE(MeanStdDev, cv::cuda::DeviceInfo, cv::Size, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ useRoi = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(MeanStdDev, Accuracy)
+{
+ cv::Mat src = randomMat(size, CV_8UC1);
+
+ if (!supportFeature(devInfo, cv::cuda::FEATURE_SET_COMPUTE_13))
+ {
+ try
+ {
+ cv::Scalar mean;
+ cv::Scalar stddev;
+ cv::cuda::meanStdDev(loadMat(src, useRoi), mean, stddev);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
+ }
+ }
+ else
+ {
+ cv::Scalar mean;
+ cv::Scalar stddev;
+ cv::cuda::meanStdDev(loadMat(src, useRoi), mean, stddev);
+
+ cv::Scalar mean_gold;
+ cv::Scalar stddev_gold;
+ cv::meanStdDev(src, mean_gold, stddev_gold);
+
+ EXPECT_SCALAR_NEAR(mean_gold, mean, 1e-5);
+ EXPECT_SCALAR_NEAR(stddev_gold, stddev, 1e-5);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MeanStdDev, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ WHOLE_SUBMAT));
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// Integral
+
+PARAM_TEST_CASE(Integral, cv::cuda::DeviceInfo, cv::Size, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ useRoi = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(Integral, Accuracy)
+{
+ cv::Mat src = randomMat(size, CV_8UC1);
+
+ cv::cuda::GpuMat dst = createMat(cv::Size(src.cols + 1, src.rows + 1), CV_32SC1, useRoi);
+ cv::cuda::integral(loadMat(src, useRoi), dst);
+
+ cv::Mat dst_gold;
+ cv::integral(src, dst_gold, CV_32S);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Integral, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ WHOLE_SUBMAT));
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// IntegralSqr
+
+PARAM_TEST_CASE(IntegralSqr, cv::cuda::DeviceInfo, cv::Size, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ useRoi = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(IntegralSqr, Accuracy)
+{
+ cv::Mat src = randomMat(size, CV_8UC1);
+
+ cv::cuda::GpuMat dst = createMat(cv::Size(src.cols + 1, src.rows + 1), CV_64FC1, useRoi);
+ cv::cuda::sqrIntegral(loadMat(src, useRoi), dst);
+
+ cv::Mat dst_gold, temp;
+ cv::integral(src, temp, dst_gold);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, IntegralSqr, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ WHOLE_SUBMAT));
+
+#endif // HAVE_CUDA
--- /dev/null
- __global__ void edgesHysteresisLocalKernel(PtrStepSzi map, ushort2* st)
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#if !defined CUDA_DISABLER
+
+#include <utility>
+#include <algorithm>
+#include "opencv2/core/cuda/common.hpp"
+#include "opencv2/core/cuda/emulation.hpp"
+#include "opencv2/core/cuda/transform.hpp"
+#include "opencv2/core/cuda/functional.hpp"
+#include "opencv2/core/cuda/utility.hpp"
+
+using namespace cv::cuda;
+using namespace cv::cuda::device;
+
+namespace canny
+{
+ struct L1 : binary_function<int, int, float>
+ {
+ __device__ __forceinline__ float operator ()(int x, int y) const
+ {
+ return ::abs(x) + ::abs(y);
+ }
+
+ __host__ __device__ __forceinline__ L1() {}
+ __host__ __device__ __forceinline__ L1(const L1&) {}
+ };
+ struct L2 : binary_function<int, int, float>
+ {
+ __device__ __forceinline__ float operator ()(int x, int y) const
+ {
+ return ::sqrtf(x * x + y * y);
+ }
+
+ __host__ __device__ __forceinline__ L2() {}
+ __host__ __device__ __forceinline__ L2(const L2&) {}
+ };
+}
+
+namespace cv { namespace cuda { namespace device
+{
+ template <> struct TransformFunctorTraits<canny::L1> : DefaultTransformFunctorTraits<canny::L1>
+ {
+ enum { smart_shift = 4 };
+ };
+ template <> struct TransformFunctorTraits<canny::L2> : DefaultTransformFunctorTraits<canny::L2>
+ {
+ enum { smart_shift = 4 };
+ };
+}}}
+
+namespace canny
+{
+ texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_src(false, cudaFilterModePoint, cudaAddressModeClamp);
+ struct SrcTex
+ {
+ int xoff;
+ int yoff;
+ __host__ SrcTex(int _xoff, int _yoff) : xoff(_xoff), yoff(_yoff) {}
+
+ __device__ __forceinline__ int operator ()(int y, int x) const
+ {
+ return tex2D(tex_src, x + xoff, y + yoff);
+ }
+ };
+
+ template <class Norm> __global__
+ void calcMagnitudeKernel(const SrcTex src, PtrStepi dx, PtrStepi dy, PtrStepSzf mag, const Norm norm)
+ {
+ const int x = blockIdx.x * blockDim.x + threadIdx.x;
+ const int y = blockIdx.y * blockDim.y + threadIdx.y;
+
+ if (y >= mag.rows || x >= mag.cols)
+ return;
+
+ int dxVal = (src(y - 1, x + 1) + 2 * src(y, x + 1) + src(y + 1, x + 1)) - (src(y - 1, x - 1) + 2 * src(y, x - 1) + src(y + 1, x - 1));
+ int dyVal = (src(y + 1, x - 1) + 2 * src(y + 1, x) + src(y + 1, x + 1)) - (src(y - 1, x - 1) + 2 * src(y - 1, x) + src(y - 1, x + 1));
+
+ dx(y, x) = dxVal;
+ dy(y, x) = dyVal;
+
+ mag(y, x) = norm(dxVal, dyVal);
+ }
+
+ void calcMagnitude(PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad)
+ {
+ const dim3 block(16, 16);
+ const dim3 grid(divUp(mag.cols, block.x), divUp(mag.rows, block.y));
+
+ bindTexture(&tex_src, srcWhole);
+ SrcTex src(xoff, yoff);
+
+ if (L2Grad)
+ {
+ L2 norm;
+ calcMagnitudeKernel<<<grid, block>>>(src, dx, dy, mag, norm);
+ }
+ else
+ {
+ L1 norm;
+ calcMagnitudeKernel<<<grid, block>>>(src, dx, dy, mag, norm);
+ }
+
+ cudaSafeCall( cudaGetLastError() );
+
+ cudaSafeCall(cudaThreadSynchronize());
+ }
+
+ void calcMagnitude(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad)
+ {
+ if (L2Grad)
+ {
+ L2 norm;
+ transform(dx, dy, mag, norm, WithOutMask(), 0);
+ }
+ else
+ {
+ L1 norm;
+ transform(dx, dy, mag, norm, WithOutMask(), 0);
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+namespace canny
+{
+ texture<float, cudaTextureType2D, cudaReadModeElementType> tex_mag(false, cudaFilterModePoint, cudaAddressModeClamp);
+
+ __global__ void calcMapKernel(const PtrStepSzi dx, const PtrStepi dy, PtrStepi map, const float low_thresh, const float high_thresh)
+ {
+ const int CANNY_SHIFT = 15;
+ const int TG22 = (int)(0.4142135623730950488016887242097*(1<<CANNY_SHIFT) + 0.5);
+
+ const int x = blockIdx.x * blockDim.x + threadIdx.x;
+ const int y = blockIdx.y * blockDim.y + threadIdx.y;
+
+ if (x == 0 || x >= dx.cols - 1 || y == 0 || y >= dx.rows - 1)
+ return;
+
+ int dxVal = dx(y, x);
+ int dyVal = dy(y, x);
+
+ const int s = (dxVal ^ dyVal) < 0 ? -1 : 1;
+ const float m = tex2D(tex_mag, x, y);
+
+ dxVal = ::abs(dxVal);
+ dyVal = ::abs(dyVal);
+
+ // 0 - the pixel can not belong to an edge
+ // 1 - the pixel might belong to an edge
+ // 2 - the pixel does belong to an edge
+ int edge_type = 0;
+
+ if (m > low_thresh)
+ {
+ const int tg22x = dxVal * TG22;
+ const int tg67x = tg22x + ((dxVal + dxVal) << CANNY_SHIFT);
+
+ dyVal <<= CANNY_SHIFT;
+
+ if (dyVal < tg22x)
+ {
+ if (m > tex2D(tex_mag, x - 1, y) && m >= tex2D(tex_mag, x + 1, y))
+ edge_type = 1 + (int)(m > high_thresh);
+ }
+ else if(dyVal > tg67x)
+ {
+ if (m > tex2D(tex_mag, x, y - 1) && m >= tex2D(tex_mag, x, y + 1))
+ edge_type = 1 + (int)(m > high_thresh);
+ }
+ else
+ {
+ if (m > tex2D(tex_mag, x - s, y - 1) && m >= tex2D(tex_mag, x + s, y + 1))
+ edge_type = 1 + (int)(m > high_thresh);
+ }
+ }
+
+ map(y, x) = edge_type;
+ }
+
+ void calcMap(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, PtrStepSzi map, float low_thresh, float high_thresh)
+ {
+ const dim3 block(16, 16);
+ const dim3 grid(divUp(dx.cols, block.x), divUp(dx.rows, block.y));
+
+ bindTexture(&tex_mag, mag);
+
+ calcMapKernel<<<grid, block>>>(dx, dy, map, low_thresh, high_thresh);
+ cudaSafeCall( cudaGetLastError() );
+
+ cudaSafeCall( cudaDeviceSynchronize() );
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+namespace canny
+{
+ __device__ int counter = 0;
+
- smem[threadIdx.y + 1][threadIdx.x + 1] = x < map.cols && y < map.rows ? map(y, x) : 0;
++ __device__ __forceinline__ bool checkIdx(int y, int x, int rows, int cols)
++ {
++ return (y >= 0) && (y < rows) && (x >= 0) && (x < cols);
++ }
++
++ __global__ void edgesHysteresisLocalKernel(PtrStepSzi map, short2* st)
+ {
+ __shared__ volatile int smem[18][18];
+
+ const int x = blockIdx.x * blockDim.x + threadIdx.x;
+ const int y = blockIdx.y * blockDim.y + threadIdx.y;
+
- smem[0][threadIdx.x + 1] = y > 0 ? map(y - 1, x) : 0;
++ smem[threadIdx.y + 1][threadIdx.x + 1] = checkIdx(y, x, map.rows, map.cols) ? map(y, x) : 0;
+ if (threadIdx.y == 0)
- smem[blockDim.y + 1][threadIdx.x + 1] = y + 1 < map.rows ? map(y + 1, x) : 0;
++ smem[0][threadIdx.x + 1] = checkIdx(y - 1, x, map.rows, map.cols) ? map(y - 1, x) : 0;
+ if (threadIdx.y == blockDim.y - 1)
- smem[threadIdx.y + 1][0] = x > 0 ? map(y, x - 1) : 0;
++ smem[blockDim.y + 1][threadIdx.x + 1] = checkIdx(y + 1, x, map.rows, map.cols) ? map(y + 1, x) : 0;
+ if (threadIdx.x == 0)
- smem[threadIdx.y + 1][blockDim.x + 1] = x + 1 < map.cols ? map(y, x + 1) : 0;
++ smem[threadIdx.y + 1][0] = checkIdx(y, x - 1, map.rows, map.cols) ? map(y, x - 1) : 0;
+ if (threadIdx.x == blockDim.x - 1)
- smem[0][0] = y > 0 && x > 0 ? map(y - 1, x - 1) : 0;
++ smem[threadIdx.y + 1][blockDim.x + 1] = checkIdx(y, x + 1, map.rows, map.cols) ? map(y, x + 1) : 0;
+ if (threadIdx.x == 0 && threadIdx.y == 0)
- smem[0][blockDim.x + 1] = y > 0 && x + 1 < map.cols ? map(y - 1, x + 1) : 0;
++ smem[0][0] = checkIdx(y - 1, x - 1, map.rows, map.cols) ? map(y - 1, x - 1) : 0;
+ if (threadIdx.x == blockDim.x - 1 && threadIdx.y == 0)
- smem[blockDim.y + 1][0] = y + 1 < map.rows && x > 0 ? map(y + 1, x - 1) : 0;
++ smem[0][blockDim.x + 1] = checkIdx(y - 1, x + 1, map.rows, map.cols) ? map(y - 1, x + 1) : 0;
+ if (threadIdx.x == 0 && threadIdx.y == blockDim.y - 1)
- smem[blockDim.y + 1][blockDim.x + 1] = y + 1 < map.rows && x + 1 < map.cols ? map(y + 1, x + 1) : 0;
++ smem[blockDim.y + 1][0] = checkIdx(y + 1, x - 1, map.rows, map.cols) ? map(y + 1, x - 1) : 0;
+ if (threadIdx.x == blockDim.x - 1 && threadIdx.y == blockDim.y - 1)
- st[ind] = make_ushort2(x, y);
++ smem[blockDim.y + 1][blockDim.x + 1] = checkIdx(y + 1, x + 1, map.rows, map.cols) ? map(y + 1, x + 1) : 0;
+
+ __syncthreads();
+
+ if (x >= map.cols || y >= map.rows)
+ return;
+
+ int n;
+
+ #pragma unroll
+ for (int k = 0; k < 16; ++k)
+ {
+ n = 0;
+
+ if (smem[threadIdx.y + 1][threadIdx.x + 1] == 1)
+ {
+ n += smem[threadIdx.y ][threadIdx.x ] == 2;
+ n += smem[threadIdx.y ][threadIdx.x + 1] == 2;
+ n += smem[threadIdx.y ][threadIdx.x + 2] == 2;
+
+ n += smem[threadIdx.y + 1][threadIdx.x ] == 2;
+ n += smem[threadIdx.y + 1][threadIdx.x + 2] == 2;
+
+ n += smem[threadIdx.y + 2][threadIdx.x ] == 2;
+ n += smem[threadIdx.y + 2][threadIdx.x + 1] == 2;
+ n += smem[threadIdx.y + 2][threadIdx.x + 2] == 2;
+ }
+
+ if (n > 0)
+ smem[threadIdx.y + 1][threadIdx.x + 1] = 2;
+ }
+
+ const int e = smem[threadIdx.y + 1][threadIdx.x + 1];
+
+ map(y, x) = e;
+
+ n = 0;
+
+ if (e == 2)
+ {
+ n += smem[threadIdx.y ][threadIdx.x ] == 1;
+ n += smem[threadIdx.y ][threadIdx.x + 1] == 1;
+ n += smem[threadIdx.y ][threadIdx.x + 2] == 1;
+
+ n += smem[threadIdx.y + 1][threadIdx.x ] == 1;
+ n += smem[threadIdx.y + 1][threadIdx.x + 2] == 1;
+
+ n += smem[threadIdx.y + 2][threadIdx.x ] == 1;
+ n += smem[threadIdx.y + 2][threadIdx.x + 1] == 1;
+ n += smem[threadIdx.y + 2][threadIdx.x + 2] == 1;
+ }
+
+ if (n > 0)
+ {
+ const int ind = ::atomicAdd(&counter, 1);
- void edgesHysteresisLocal(PtrStepSzi map, ushort2* st1)
++ st[ind] = make_short2(x, y);
+ }
+ }
+
- __global__ void edgesHysteresisGlobalKernel(PtrStepSzi map, ushort2* st1, ushort2* st2, const int count)
++ void edgesHysteresisLocal(PtrStepSzi map, short2* st1)
+ {
+ void* counter_ptr;
+ cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, counter) );
+
+ cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(int)) );
+
+ const dim3 block(16, 16);
+ const dim3 grid(divUp(map.cols, block.x), divUp(map.rows, block.y));
+
+ edgesHysteresisLocalKernel<<<grid, block>>>(map, st1);
+ cudaSafeCall( cudaGetLastError() );
+
+ cudaSafeCall( cudaDeviceSynchronize() );
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+namespace canny
+{
+ __constant__ int c_dx[8] = {-1, 0, 1, -1, 1, -1, 0, 1};
+ __constant__ int c_dy[8] = {-1, -1, -1, 0, 0, 1, 1, 1};
+
- __shared__ ushort2 s_st[stack_size];
++ __global__ void edgesHysteresisGlobalKernel(PtrStepSzi map, short2* st1, short2* st2, const int count)
+ {
+ const int stack_size = 512;
+
+ __shared__ int s_counter;
+ __shared__ int s_ind;
- ushort2 pos = st1[ind];
++ __shared__ short2 s_st[stack_size];
+
+ if (threadIdx.x == 0)
+ s_counter = 0;
+
+ __syncthreads();
+
+ int ind = blockIdx.y * gridDim.x + blockIdx.x;
+
+ if (ind >= count)
+ return;
+
- if (pos.x > 0 && pos.x < map.cols && pos.y > 0 && pos.y < map.rows && map(pos.y, pos.x) == 1)
++ short2 pos = st1[ind];
+
+ if (threadIdx.x < 8)
+ {
+ pos.x += c_dx[threadIdx.x];
+ pos.y += c_dy[threadIdx.x];
+
- if (pos.x > 0 && pos.x < map.cols && pos.y > 0 && pos.y < map.rows && map(pos.y, pos.x) == 1)
++ if (pos.x > 0 && pos.x < map.cols - 1 && pos.y > 0 && pos.y < map.rows - 1 && map(pos.y, pos.x) == 1)
+ {
+ map(pos.y, pos.x) = 2;
+
+ ind = Emulation::smem::atomicAdd(&s_counter, 1);
+
+ s_st[ind] = pos;
+ }
+ }
+
+ __syncthreads();
+
+ while (s_counter > 0 && s_counter <= stack_size - blockDim.x)
+ {
+ const int subTaskIdx = threadIdx.x >> 3;
+ const int portion = ::min(s_counter, blockDim.x >> 3);
+
+ if (subTaskIdx < portion)
+ pos = s_st[s_counter - 1 - subTaskIdx];
+
+ __syncthreads();
+
+ if (threadIdx.x == 0)
+ s_counter -= portion;
+
+ __syncthreads();
+
+ if (subTaskIdx < portion)
+ {
+ pos.x += c_dx[threadIdx.x & 7];
+ pos.y += c_dy[threadIdx.x & 7];
+
- ind = ::atomicAdd(&counter, s_counter);
- s_ind = ind - s_counter;
++ if (pos.x > 0 && pos.x < map.cols - 1 && pos.y > 0 && pos.y < map.rows - 1 && map(pos.y, pos.x) == 1)
+ {
+ map(pos.y, pos.x) = 2;
+
+ ind = Emulation::smem::atomicAdd(&s_counter, 1);
+
+ s_st[ind] = pos;
+ }
+ }
+
+ __syncthreads();
+ }
+
+ if (s_counter > 0)
+ {
+ if (threadIdx.x == 0)
+ {
- void edgesHysteresisGlobal(PtrStepSzi map, ushort2* st1, ushort2* st2)
++ s_ind = ::atomicAdd(&counter, s_counter);
++
++ if (s_ind + s_counter > map.cols * map.rows)
++ s_counter = 0;
+ }
+
+ __syncthreads();
+
+ ind = s_ind;
+
+ for (int i = threadIdx.x; i < s_counter; i += blockDim.x)
+ st2[ind + i] = s_st[i];
+ }
+ }
+
++ void edgesHysteresisGlobal(PtrStepSzi map, short2* st1, short2* st2)
+ {
+ void* counter_ptr;
+ cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, canny::counter) );
+
+ int count;
+ cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(int), cudaMemcpyDeviceToHost) );
+
+ while (count > 0)
+ {
+ cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(int)) );
+
+ const dim3 block(128);
+ const dim3 grid(::min(count, 65535u), divUp(count, 65535), 1);
+
+ edgesHysteresisGlobalKernel<<<grid, block>>>(map, st1, st2, count);
+ cudaSafeCall( cudaGetLastError() );
+
+ cudaSafeCall( cudaDeviceSynchronize() );
+
+ cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(int), cudaMemcpyDeviceToHost) );
+
++ count = min(count, map.cols * map.rows);
++
+ std::swap(st1, st2);
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+namespace canny
+{
+ struct GetEdges : unary_function<int, uchar>
+ {
+ __device__ __forceinline__ uchar operator ()(int e) const
+ {
+ return (uchar)(-(e >> 1));
+ }
+
+ __host__ __device__ __forceinline__ GetEdges() {}
+ __host__ __device__ __forceinline__ GetEdges(const GetEdges&) {}
+ };
+}
+
+namespace cv { namespace cuda { namespace device
+{
+ template <> struct TransformFunctorTraits<canny::GetEdges> : DefaultTransformFunctorTraits<canny::GetEdges>
+ {
+ enum { smart_shift = 4 };
+ };
+}}}
+
+namespace canny
+{
+ void getEdges(PtrStepSzi map, PtrStepSzb dst)
+ {
+ transform(map, dst, GetEdges(), WithOutMask(), 0);
+ }
+}
+
+#endif /* CUDA_DISABLER */
--- /dev/null
- EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "test_precomp.hpp"
+
+#ifdef HAVE_CUDA
+
+using namespace cvtest;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// cvtColor
+
+PARAM_TEST_CASE(CvtColor, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ int depth;
+ bool useRoi;
+
+ cv::Mat img;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ depth = GET_PARAM(2);
+ useRoi = GET_PARAM(3);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+
+ img = randomMat(size, CV_MAKE_TYPE(depth, 3), 0.0, depth == CV_32F ? 1.0 : 255.0);
+ }
+};
+
+CUDA_TEST_P(CvtColor, BGR2RGB)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR2RGBA)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2RGBA);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2RGBA);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR2BGRA)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2BGRA);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2BGRA);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2RGB)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2RGBA)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2RGBA);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2RGBA);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR2GRAY)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, RGB2GRAY)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, GRAY2BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2GRAY);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_GRAY2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_GRAY2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, GRAY2BGRA)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2GRAY);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_GRAY2BGRA, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_GRAY2BGRA, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2GRAY)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2GRAY)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGBA2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGBA2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2BGR565)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2BGR565);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2BGR565);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, RGB2BGR565)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2BGR565);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2BGR565);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5652BGR)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR565);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5652BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5652BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5652RGB)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR565);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5652RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5652RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2BGR565)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2BGR565);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2BGR565);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2BGR565)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGBA2BGR565);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGBA2BGR565);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5652BGRA)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR565);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5652BGRA, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5652BGRA, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5652RGBA)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR565);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5652RGBA, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5652RGBA, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, GRAY2BGR565)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2GRAY);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_GRAY2BGR565);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_GRAY2BGR565);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5652GRAY)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR565);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5652GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5652GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR2BGR555)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2BGR555);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2BGR555);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, RGB2BGR555)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2BGR555);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2BGR555);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5552BGR)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR555);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5552BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5552BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5552RGB)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR555);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5552RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5552RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2BGR555)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGRA2BGR555);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2BGR555);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2BGR555)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGBA2BGR555);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGBA2BGR555);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5552BGRA)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR555);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5552BGRA, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5552BGRA, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5552RGBA)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR555);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5552RGBA, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5552RGBA, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, GRAY2BGR555)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2GRAY);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_GRAY2BGR555);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_GRAY2BGR555);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR5552GRAY)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGR555);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR5552GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR5552GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
+}
+
+CUDA_TEST_P(CvtColor, BGR2XYZ)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2XYZ);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2XYZ);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, RGB2XYZ)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2XYZ);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2XYZ);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2XYZ4)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2XYZ, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2XYZ);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2XYZ4)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2XYZ, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2XYZ);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, XYZ2BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_XYZ2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_XYZ2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, XYZ2RGB)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_XYZ2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_XYZ2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, XYZ42BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_XYZ2BGR);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_XYZ2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, XYZ42BGRA)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_XYZ2BGR, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_XYZ2BGR, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2YCrCb)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2YCrCb);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2YCrCb);
+
- EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
++ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2YCrCb)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2YCrCb);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2YCrCb);
+
- EXPECT_MAT_NEAR(dst_gold, h_dst, 1.0);
++ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, BGR2YCrCb4)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2YCrCb, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2YCrCb);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
- EXPECT_MAT_NEAR(dst_gold, h_dst, 1.0);
++ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2YCrCb4)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2YCrCb, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2YCrCb);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
++ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, YCrCb2BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YCrCb2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YCrCb2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YCrCb2RGB)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YCrCb2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YCrCb2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YCrCb42RGB)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YCrCb2RGB);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YCrCb2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YCrCb42RGBA)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YCrCb2RGB, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YCrCb2RGB, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2HSV)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2HSV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2HSV);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HSV)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HSV4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2HSV4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, BGR2HLS)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2HLS);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2HLS);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HLS)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HLS4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2HLS4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV2BGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV2RGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV42BGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2BGR);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV42BGRA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2BGR, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2BGR, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS2BGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS2RGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS42RGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS42RGBA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, BGR2HSV_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2HSV_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2HSV_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HSV_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HSV4_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV_FULL, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV_FULL);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2HSV4_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HSV_FULL, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HSV_FULL);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, BGR2HLS_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2HLS_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2HLS_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HLS_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGB2HLS4_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS_FULL, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS_FULL);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2HLS4_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2HLS_FULL, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2HLS_FULL);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV2BGR_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV_FULL);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2BGR_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2BGR_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV2RGB_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV_FULL);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2RGB_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2RGB_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV42RGB_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2RGB_FULL);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2RGB_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HSV42RGBA_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HSV_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HSV2RGB_FULL, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HSV2RGB_FULL, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS2BGR_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS_FULL);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2BGR_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2BGR_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS2RGB_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS_FULL);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS42RGB_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB_FULL);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB_FULL);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, HLS42RGBA_FULL)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2HLS_FULL);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_HLS2RGB_FULL, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_HLS2RGB_FULL, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
+}
+
+CUDA_TEST_P(CvtColor, BGR2YUV)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2YUV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2YUV);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, RGB2YUV)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2YUV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2YUV);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YUV2BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YUV);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YUV2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YUV2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YUV42BGR)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YUV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YUV2BGR);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YUV2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YUV42BGRA)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2YUV);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YUV2BGR, 4);
+
+ cv::Mat channels[4];
+ cv::split(src, channels);
+ channels[3] = cv::Mat(src.size(), depth, cv::Scalar::all(0));
+ cv::merge(channels, 4, src);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YUV2BGR, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, YUV2RGB)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_RGB2YUV);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_YUV2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_YUV2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2YUV4)
+{
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2YUV, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2YUV);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, RGBA2YUV4)
+{
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2YUV, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2YUV);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2Lab)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Lab);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Lab);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, RGB2Lab)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2Lab);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2Lab);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2Lab4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Lab, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Lab);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LBGR2Lab)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Lab);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Lab);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LRGB2Lab)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LRGB2Lab);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LRGB2Lab);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LBGRA2Lab4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Lab, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Lab);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, Lab2BGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, Lab2RGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, Lab2BGRA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2BGR, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, Lab2LBGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2LBGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2LBGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, Lab2LRGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2LRGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2LRGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, Lab2LRGBA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2LRGB, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Lab2LRGB, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
+}
+
+CUDA_TEST_P(CvtColor, BGR2Luv)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Luv);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Luv);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, RGB2Luv)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2Luv);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGB2Luv);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, BGRA2Luv4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Luv, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Luv);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LBGR2Luv)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Luv);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Luv);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LRGB2Luv)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src = img;
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LRGB2Luv);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LRGB2Luv);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, LBGRA2Luv4)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Luv, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Luv);
+
+ cv::Mat h_dst(dst);
+
+ cv::Mat channels[4];
+ cv::split(h_dst, channels);
+ cv::merge(channels, 3, h_dst);
+
+ EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
+}
+
+CUDA_TEST_P(CvtColor, Luv2BGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+CUDA_TEST_P(CvtColor, Luv2RGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2RGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2RGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+CUDA_TEST_P(CvtColor, Luv2BGRA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2BGR, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+CUDA_TEST_P(CvtColor, Luv2LBGR)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2LBGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2LBGR);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+CUDA_TEST_P(CvtColor, Luv2LRGB)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2LRGB);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2LRGB);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+CUDA_TEST_P(CvtColor, Luv2LRGBA)
+{
+ if (depth == CV_16U)
+ return;
+
+ cv::Mat src;
+ cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2LRGB, 4);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_Luv2LRGB, 4);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
+}
+
+#if defined (CUDA_VERSION) && (CUDA_VERSION >= 5000)
+
+CUDA_TEST_P(CvtColor, RGBA2mRGBA)
+{
+ if (depth != CV_8U)
+ return;
+
+ cv::Mat src = randomMat(size, CV_MAKE_TYPE(depth, 4));
+
+ cv::cuda::GpuMat dst = createMat(src.size(), src.type(), useRoi);
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGBA2mRGBA);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_RGBA2mRGBA);
+
+ EXPECT_MAT_NEAR(dst_gold, dst, 1);
+}
+
+#endif // defined (CUDA_VERSION) && (CUDA_VERSION >= 5000)
+
+CUDA_TEST_P(CvtColor, BayerBG2BGR)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerBG2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerBG2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerBG2BGR4)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerBG2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerBG2BGR);
+
+ cv::Mat dst4(dst);
+ cv::Mat dst3;
+ cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
+
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerGB2BGR)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGB2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGB2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerGB2BGR4)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGB2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGB2BGR);
+
+ cv::Mat dst4(dst);
+ cv::Mat dst3;
+ cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerRG2BGR)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerRG2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerRG2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerRG2BGR4)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerRG2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerRG2BGR);
+
+ cv::Mat dst4(dst);
+ cv::Mat dst3;
+ cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerGR2BGR)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGR2BGR);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGR2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerGR2BGR4)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGR2BGR, 4);
+
+ ASSERT_EQ(4, dst.channels());
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGR2BGR);
+
+ cv::Mat dst4(dst);
+ cv::Mat dst3;
+ cv::cvtColor(dst4, dst3, cv::COLOR_BGRA2BGR);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
+}
+
+CUDA_TEST_P(CvtColor, BayerBG2Gray)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerBG2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerBG2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
+}
+
+CUDA_TEST_P(CvtColor, BayerGB2Gray)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGB2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGB2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
+}
+
+CUDA_TEST_P(CvtColor, BayerRG2Gray)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerRG2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerRG2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
+}
+
+CUDA_TEST_P(CvtColor, BayerGR2Gray)
+{
+ if ((depth != CV_8U && depth != CV_16U) || useRoi)
+ return;
+
+ cv::Mat src = randomMat(size, depth);
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BayerGR2GRAY);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BayerGR2GRAY);
+
+ EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, CvtColor, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32F)),
+ WHOLE_SUBMAT));
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// Demosaicing
+
+struct Demosaicing : testing::TestWithParam<cv::cuda::DeviceInfo>
+{
+ cv::cuda::DeviceInfo devInfo;
+
+ virtual void SetUp()
+ {
+ devInfo = GetParam();
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+
+ static void mosaic(const cv::Mat_<cv::Vec3b>& src, cv::Mat_<uchar>& dst, cv::Point firstRed)
+ {
+ dst.create(src.size());
+
+ for (int y = 0; y < src.rows; ++y)
+ {
+ for (int x = 0; x < src.cols; ++x)
+ {
+ cv::Vec3b pix = src(y, x);
+
+ cv::Point alternate;
+ alternate.x = (x + firstRed.x) % 2;
+ alternate.y = (y + firstRed.y) % 2;
+
+ if (alternate.y == 0)
+ {
+ if (alternate.x == 0)
+ {
+ // RG
+ // GB
+ dst(y, x) = pix[2];
+ }
+ else
+ {
+ // GR
+ // BG
+ dst(y, x) = pix[1];
+ }
+ }
+ else
+ {
+ if (alternate.x == 0)
+ {
+ // GB
+ // RG
+ dst(y, x) = pix[1];
+ }
+ else
+ {
+ // BG
+ // GR
+ dst(y, x) = pix[0];
+ }
+ }
+ }
+ }
+ }
+};
+
+CUDA_TEST_P(Demosaicing, BayerBG2BGR)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(1, 1));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::COLOR_BayerBG2BGR);
+
+ EXPECT_MAT_SIMILAR(img, dst, 2e-2);
+}
+
+CUDA_TEST_P(Demosaicing, BayerGB2BGR)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(0, 1));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::COLOR_BayerGB2BGR);
+
+ EXPECT_MAT_SIMILAR(img, dst, 2e-2);
+}
+
+CUDA_TEST_P(Demosaicing, BayerRG2BGR)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(0, 0));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::COLOR_BayerRG2BGR);
+
+ EXPECT_MAT_SIMILAR(img, dst, 2e-2);
+}
+
+CUDA_TEST_P(Demosaicing, BayerGR2BGR)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(1, 0));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::COLOR_BayerGR2BGR);
+
+ EXPECT_MAT_SIMILAR(img, dst, 2e-2);
+}
+
+CUDA_TEST_P(Demosaicing, BayerBG2BGR_MHT)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(1, 1));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::cuda::COLOR_BayerBG2BGR_MHT);
+
+ EXPECT_MAT_SIMILAR(img, dst, 5e-3);
+}
+
+CUDA_TEST_P(Demosaicing, BayerGB2BGR_MHT)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(0, 1));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::cuda::COLOR_BayerGB2BGR_MHT);
+
+ EXPECT_MAT_SIMILAR(img, dst, 5e-3);
+}
+
+CUDA_TEST_P(Demosaicing, BayerRG2BGR_MHT)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(0, 0));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::cuda::COLOR_BayerRG2BGR_MHT);
+
+ EXPECT_MAT_SIMILAR(img, dst, 5e-3);
+}
+
+CUDA_TEST_P(Demosaicing, BayerGR2BGR_MHT)
+{
+ cv::Mat img = readImage("stereobm/aloe-L.png");
+ ASSERT_FALSE(img.empty()) << "Can't load input image";
+
+ cv::Mat_<uchar> src;
+ mosaic(img, src, cv::Point(1, 0));
+
+ cv::cuda::GpuMat dst;
+ cv::cuda::demosaicing(loadMat(src), dst, cv::cuda::COLOR_BayerGR2BGR_MHT);
+
+ EXPECT_MAT_SIMILAR(img, dst, 5e-3);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, Demosaicing, ALL_DEVICES);
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////
+// swapChannels
+
+PARAM_TEST_CASE(SwapChannels, cv::cuda::DeviceInfo, cv::Size, UseRoi)
+{
+ cv::cuda::DeviceInfo devInfo;
+ cv::Size size;
+ bool useRoi;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ size = GET_PARAM(1);
+ useRoi = GET_PARAM(2);
+
+ cv::cuda::setDevice(devInfo.deviceID());
+ }
+};
+
+CUDA_TEST_P(SwapChannels, Accuracy)
+{
+ cv::Mat src = readImageType("stereobm/aloe-L.png", CV_8UC4);
+ ASSERT_FALSE(src.empty());
+
+ cv::cuda::GpuMat d_src = loadMat(src, useRoi);
+
+ const int dstOrder[] = {2, 1, 0, 3};
+ cv::cuda::swapChannels(d_src, dstOrder);
+
+ cv::Mat dst_gold;
+ cv::cvtColor(src, dst_gold, cv::COLOR_BGRA2RGBA);
+
+ EXPECT_MAT_NEAR(dst_gold, d_src, 0.0);
+}
+
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, SwapChannels, testing::Combine(
+ ALL_DEVICES,
+ DIFFERENT_SIZES,
+ WHOLE_SUBMAT));
+
+#endif // HAVE_CUDA