//
//M*/
-#ifndef __OPENCV_CORE_GPU_HPP__
-#define __OPENCV_CORE_GPU_HPP__
+#ifndef __OPENCV_CORE_CUDA_HPP__
+#define __OPENCV_CORE_CUDA_HPP__
#ifndef __cplusplus
# error cuda.hpp header must be compiled as C++
//! checks whether current device supports the given feature
CV_EXPORTS bool deviceSupports(FeatureSet feature_set);
-//! information about what GPU archs this OpenCV GPU module was compiled for
+//! information about what GPU archs this OpenCV CUDA module was compiled for
class CV_EXPORTS TargetArchs
{
public:
//! checks whether device supports the given feature
bool supports(FeatureSet feature_set) const;
- //! checks whether the GPU module can be run on the given device
+ //! checks whether the CUDA module can be run on the given device
bool isCompatible() const;
private:
#include "opencv2/core/cuda.inl.hpp"
-#endif /* __OPENCV_CORE_GPU_HPP__ */
+#endif /* __OPENCV_CORE_CUDA_HPP__ */
//
//M*/
-#ifndef __OPENCV_CORE_GPUINL_HPP__
-#define __OPENCV_CORE_GPUINL_HPP__
+#ifndef __OPENCV_CORE_CUDAINL_HPP__
+#define __OPENCV_CORE_CUDAINL_HPP__
#include "opencv2/core/cuda.hpp"
}
-#endif // __OPENCV_CORE_GPUINL_HPP__
+#endif // __OPENCV_CORE_CUDAINL_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_DEVICE_BLOCK_HPP__
-#define __OPENCV_GPU_DEVICE_BLOCK_HPP__
+#ifndef __OPENCV_CUDA_DEVICE_BLOCK_HPP__
+#define __OPENCV_CUDA_DEVICE_BLOCK_HPP__
namespace cv { namespace cuda { namespace device
{
};
}}}
-#endif /* __OPENCV_GPU_DEVICE_BLOCK_HPP__ */
+#endif /* __OPENCV_CUDA_DEVICE_BLOCK_HPP__ */
//
//M*/
-#ifndef __OPENCV_GPU_BORDER_INTERPOLATE_HPP__
-#define __OPENCV_GPU_BORDER_INTERPOLATE_HPP__
+#ifndef __OPENCV_CUDA_BORDER_INTERPOLATE_HPP__
+#define __OPENCV_CUDA_BORDER_INTERPOLATE_HPP__
#include "saturate_cast.hpp"
#include "vec_traits.hpp"
};
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__
+#endif // __OPENCV_CUDA_BORDER_INTERPOLATE_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_COLOR_HPP__
-#define __OPENCV_GPU_COLOR_HPP__
+#ifndef __OPENCV_CUDA_COLOR_HPP__
+#define __OPENCV_CUDA_COLOR_HPP__
#include "detail/color_detail.hpp"
namespace cv { namespace cuda { namespace device
{
- // All OPENCV_GPU_IMPLEMENT_*_TRAITS(ColorSpace1_to_ColorSpace2, ...) macros implements
+ // All OPENCV_CUDA_IMPLEMENT_*_TRAITS(ColorSpace1_to_ColorSpace2, ...) macros implements
// template <typename T> class ColorSpace1_to_ColorSpace2_traits
// {
// typedef ... functor_type;
// static __host__ __device__ functor_type create_functor();
// };
- OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgb, 3, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_bgra, 3, 4, 0)
- OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgba, 3, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_bgr, 4, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgb, 4, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgba, 4, 4, 2)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS
-
- OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr555, 3, 0, 5)
- OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr565, 3, 0, 6)
- OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr555, 3, 2, 5)
- OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr565, 3, 2, 6)
- OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr555, 4, 0, 5)
- OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr565, 4, 0, 6)
- OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr555, 4, 2, 5)
- OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr565, 4, 2, 6)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS
-
- OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgb, 3, 2, 5)
- OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgb, 3, 2, 6)
- OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgr, 3, 0, 5)
- OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgr, 3, 0, 6)
- OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgba, 4, 2, 5)
- OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgba, 4, 2, 6)
- OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgra, 4, 0, 5)
- OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgra, 4, 0, 6)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS
-
- OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgr, 3)
- OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgra, 4)
-
- #undef OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS
-
- OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr555, 5)
- OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr565, 6)
-
- #undef OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS
-
- OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr555_to_gray, 5)
- OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr565_to_gray, 6)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS
-
- OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(rgb_to_gray, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(bgr_to_gray, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(rgba_to_gray, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(bgra_to_gray, 4, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS
-
- OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 0)
- OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS
-
- OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 2)
- OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 2)
- OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 2)
- OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 2)
- OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 0)
- OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 0)
- OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 0)
- OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS
-
- OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb, 3, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb, 4, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb4, 3, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb4, 4, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb, 3, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb, 4, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb4, 3, 4, 0)
- OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb4, 4, 4, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS
-
- OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgb, 3, 3, 2)
- OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgba, 3, 4, 2)
- OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgb, 4, 3, 2)
- OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgba, 4, 4, 2)
- OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgr, 3, 3, 0)
- OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgra, 3, 4, 0)
- OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgr, 4, 3, 0)
- OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgra, 4, 4, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS
-
- OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz, 3, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz, 4, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz4, 3, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz4, 4, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz, 3, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz, 4, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz4, 3, 4, 0)
- OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz4, 4, 4, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS
-
- OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgb, 3, 3, 2)
- OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgb, 4, 3, 2)
- OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgba, 3, 4, 2)
- OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgba, 4, 4, 2)
- OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgr, 3, 3, 0)
- OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgr, 4, 3, 0)
- OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgra, 3, 4, 0)
- OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgra, 4, 4, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS
-
- OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv, 3, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv, 4, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv4, 3, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv4, 4, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv, 3, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv, 4, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv4, 3, 4, 0)
- OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv4, 4, 4, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS
-
- OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgb, 3, 3, 2)
- OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgba, 3, 4, 2)
- OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgb, 4, 3, 2)
- OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgba, 4, 4, 2)
- OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgr, 3, 3, 0)
- OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgra, 3, 4, 0)
- OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgr, 4, 3, 0)
- OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgra, 4, 4, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS
-
- OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls, 3, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls, 4, 3, 2)
- OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls4, 3, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls4, 4, 4, 2)
- OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls, 3, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls, 4, 3, 0)
- OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls4, 3, 4, 0)
- OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls4, 4, 4, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS
-
- OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgb, 3, 3, 2)
- OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgba, 3, 4, 2)
- OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgb, 4, 3, 2)
- OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgba, 4, 4, 2)
- OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgr, 3, 3, 0)
- OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgra, 3, 4, 0)
- OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgr, 4, 3, 0)
- OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgra, 4, 4, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS
-
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab, 3, 3, true, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab, 4, 3, true, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab4, 3, 4, true, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab4, 4, 4, true, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab, 3, 3, true, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab, 4, 3, true, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab4, 3, 4, true, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab4, 4, 4, true, 0)
-
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab, 3, 3, false, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab, 4, 3, false, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab4, 3, 4, false, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab4, 4, 4, false, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab, 3, 3, false, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab, 4, 3, false, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab4, 3, 4, false, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab4, 4, 4, false, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS
-
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgb, 3, 3, true, 2)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgb, 4, 3, true, 2)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgba, 3, 4, true, 2)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgba, 4, 4, true, 2)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgr, 3, 3, true, 0)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgr, 4, 3, true, 0)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgra, 3, 4, true, 0)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgra, 4, 4, true, 0)
-
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgb, 3, 3, false, 2)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgb, 4, 3, false, 2)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgba, 3, 4, false, 2)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgba, 4, 4, false, 2)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgr, 3, 3, false, 0)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgr, 4, 3, false, 0)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgra, 3, 4, false, 0)
- OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgra, 4, 4, false, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS
-
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv, 3, 3, true, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv, 4, 3, true, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv4, 3, 4, true, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv4, 4, 4, true, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv, 3, 3, true, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv, 4, 3, true, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv4, 3, 4, true, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv4, 4, 4, true, 0)
-
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv, 3, 3, false, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv, 4, 3, false, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv4, 3, 4, false, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv4, 4, 4, false, 2)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv, 3, 3, false, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv, 4, 3, false, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv4, 3, 4, false, 0)
- OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv4, 4, 4, false, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS
-
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgb, 3, 3, true, 2)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgb, 4, 3, true, 2)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgba, 3, 4, true, 2)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgba, 4, 4, true, 2)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgr, 3, 3, true, 0)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgr, 4, 3, true, 0)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgra, 3, 4, true, 0)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgra, 4, 4, true, 0)
-
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgb, 3, 3, false, 2)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgb, 4, 3, false, 2)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgba, 3, 4, false, 2)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgba, 4, 4, false, 2)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgr, 3, 3, false, 0)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgr, 4, 3, false, 0)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgra, 3, 4, false, 0)
- OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgra, 4, 4, false, 0)
-
- #undef OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgb, 3, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_bgra, 3, 4, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgba, 3, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_bgr, 4, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgb, 4, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgba, 4, 4, 2)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr555, 3, 0, 5)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr565, 3, 0, 6)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr555, 3, 2, 5)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr565, 3, 2, 6)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr555, 4, 0, 5)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr565, 4, 0, 6)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr555, 4, 2, 5)
+ OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr565, 4, 2, 6)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgb, 3, 2, 5)
+ OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgb, 3, 2, 6)
+ OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgr, 3, 0, 5)
+ OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgr, 3, 0, 6)
+ OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgba, 4, 2, 5)
+ OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgba, 4, 2, 6)
+ OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgra, 4, 0, 5)
+ OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgra, 4, 0, 6)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgr, 3)
+ OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgra, 4)
+
+ #undef OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr555, 5)
+ OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr565, 6)
+
+ #undef OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr555_to_gray, 5)
+ OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr565_to_gray, 6)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(rgb_to_gray, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(bgr_to_gray, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(rgba_to_gray, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(bgra_to_gray, 4, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 0)
+ OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb, 3, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb, 4, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb4, 3, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb4, 4, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb, 3, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb, 4, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb4, 3, 4, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb4, 4, 4, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgb, 3, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgba, 3, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgb, 4, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgba, 4, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgr, 3, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgra, 3, 4, 0)
+ OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgr, 4, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgra, 4, 4, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz, 3, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz, 4, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz4, 3, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz4, 4, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz, 3, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz, 4, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz4, 3, 4, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz4, 4, 4, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgb, 3, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgb, 4, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgba, 3, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgba, 4, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgr, 3, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgr, 4, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgra, 3, 4, 0)
+ OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgra, 4, 4, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv, 3, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv, 4, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv4, 3, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv4, 4, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv, 3, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv, 4, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv4, 3, 4, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv4, 4, 4, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgb, 3, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgba, 3, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgb, 4, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgba, 4, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgr, 3, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgra, 3, 4, 0)
+ OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgr, 4, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgra, 4, 4, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls, 3, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls, 4, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls4, 3, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls4, 4, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls, 3, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls, 4, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls4, 3, 4, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls4, 4, 4, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgb, 3, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgba, 3, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgb, 4, 3, 2)
+ OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgba, 4, 4, 2)
+ OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgr, 3, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgra, 3, 4, 0)
+ OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgr, 4, 3, 0)
+ OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgra, 4, 4, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab, 3, 3, true, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab, 4, 3, true, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab4, 3, 4, true, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab4, 4, 4, true, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab, 3, 3, true, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab, 4, 3, true, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab4, 3, 4, true, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab4, 4, 4, true, 0)
+
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab, 3, 3, false, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab, 4, 3, false, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab4, 3, 4, false, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab4, 4, 4, false, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab, 3, 3, false, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab, 4, 3, false, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab4, 3, 4, false, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab4, 4, 4, false, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgb, 3, 3, true, 2)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgb, 4, 3, true, 2)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgba, 3, 4, true, 2)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgba, 4, 4, true, 2)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgr, 3, 3, true, 0)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgr, 4, 3, true, 0)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgra, 3, 4, true, 0)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgra, 4, 4, true, 0)
+
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgb, 3, 3, false, 2)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgb, 4, 3, false, 2)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgba, 3, 4, false, 2)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgba, 4, 4, false, 2)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgr, 3, 3, false, 0)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgr, 4, 3, false, 0)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgra, 3, 4, false, 0)
+ OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgra, 4, 4, false, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv, 3, 3, true, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv, 4, 3, true, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv4, 3, 4, true, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv4, 4, 4, true, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv, 3, 3, true, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv, 4, 3, true, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv4, 3, 4, true, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv4, 4, 4, true, 0)
+
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv, 3, 3, false, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv, 4, 3, false, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv4, 3, 4, false, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv4, 4, 4, false, 2)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv, 3, 3, false, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv, 4, 3, false, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv4, 3, 4, false, 0)
+ OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv4, 4, 4, false, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS
+
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgb, 3, 3, true, 2)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgb, 4, 3, true, 2)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgba, 3, 4, true, 2)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgba, 4, 4, true, 2)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgr, 3, 3, true, 0)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgr, 4, 3, true, 0)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgra, 3, 4, true, 0)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgra, 4, 4, true, 0)
+
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgb, 3, 3, false, 2)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgb, 4, 3, false, 2)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgba, 3, 4, false, 2)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgba, 4, 4, false, 2)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgr, 3, 3, false, 0)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgr, 4, 3, false, 0)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgra, 3, 4, false, 0)
+ OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgra, 4, 4, false, 0)
+
+ #undef OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__
+#endif // __OPENCV_CUDA_BORDER_INTERPOLATE_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_COMMON_HPP__
-#define __OPENCV_GPU_COMMON_HPP__
+#ifndef __OPENCV_CUDA_COMMON_HPP__
+#define __OPENCV_CUDA_COMMON_HPP__
#include <cuda_runtime.h>
#include "opencv2/core/cuda_types.hpp"
-#endif // __OPENCV_GPU_COMMON_HPP__
+#endif // __OPENCV_CUDA_COMMON_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_DATAMOV_UTILS_HPP__
-#define __OPENCV_GPU_DATAMOV_UTILS_HPP__
+#ifndef __OPENCV_CUDA_DATAMOV_UTILS_HPP__
+#define __OPENCV_CUDA_DATAMOV_UTILS_HPP__
#include "common.hpp"
#if defined(_WIN64) || defined(__LP64__)
// 64-bit register modifier for inlined asm
- #define OPENCV_GPU_ASM_PTR "l"
+ #define OPENCV_CUDA_ASM_PTR "l"
#else
// 32-bit register modifier for inlined asm
- #define OPENCV_GPU_ASM_PTR "r"
+ #define OPENCV_CUDA_ASM_PTR "r"
#endif
template<class T> struct ForceGlob;
- #define OPENCV_GPU_DEFINE_FORCE_GLOB(base_type, ptx_type, reg_mod) \
+ #define OPENCV_CUDA_DEFINE_FORCE_GLOB(base_type, ptx_type, reg_mod) \
template <> struct ForceGlob<base_type> \
{ \
__device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
{ \
- asm("ld.global."#ptx_type" %0, [%1];" : "="#reg_mod(val) : OPENCV_GPU_ASM_PTR(ptr + offset)); \
+ asm("ld.global."#ptx_type" %0, [%1];" : "="#reg_mod(val) : OPENCV_CUDA_ASM_PTR(ptr + offset)); \
} \
};
- #define OPENCV_GPU_DEFINE_FORCE_GLOB_B(base_type, ptx_type) \
+ #define OPENCV_CUDA_DEFINE_FORCE_GLOB_B(base_type, ptx_type) \
template <> struct ForceGlob<base_type> \
{ \
__device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
{ \
- asm("ld.global."#ptx_type" %0, [%1];" : "=r"(*reinterpret_cast<uint*>(&val)) : OPENCV_GPU_ASM_PTR(ptr + offset)); \
+ asm("ld.global."#ptx_type" %0, [%1];" : "=r"(*reinterpret_cast<uint*>(&val)) : OPENCV_CUDA_ASM_PTR(ptr + offset)); \
} \
};
- OPENCV_GPU_DEFINE_FORCE_GLOB_B(uchar, u8)
- OPENCV_GPU_DEFINE_FORCE_GLOB_B(schar, s8)
- OPENCV_GPU_DEFINE_FORCE_GLOB_B(char, b8)
- OPENCV_GPU_DEFINE_FORCE_GLOB (ushort, u16, h)
- OPENCV_GPU_DEFINE_FORCE_GLOB (short, s16, h)
- OPENCV_GPU_DEFINE_FORCE_GLOB (uint, u32, r)
- OPENCV_GPU_DEFINE_FORCE_GLOB (int, s32, r)
- OPENCV_GPU_DEFINE_FORCE_GLOB (float, f32, f)
- OPENCV_GPU_DEFINE_FORCE_GLOB (double, f64, d)
+ OPENCV_CUDA_DEFINE_FORCE_GLOB_B(uchar, u8)
+ OPENCV_CUDA_DEFINE_FORCE_GLOB_B(schar, s8)
+ OPENCV_CUDA_DEFINE_FORCE_GLOB_B(char, b8)
+ OPENCV_CUDA_DEFINE_FORCE_GLOB (ushort, u16, h)
+ OPENCV_CUDA_DEFINE_FORCE_GLOB (short, s16, h)
+ OPENCV_CUDA_DEFINE_FORCE_GLOB (uint, u32, r)
+ OPENCV_CUDA_DEFINE_FORCE_GLOB (int, s32, r)
+ OPENCV_CUDA_DEFINE_FORCE_GLOB (float, f32, f)
+ OPENCV_CUDA_DEFINE_FORCE_GLOB (double, f64, d)
- #undef OPENCV_GPU_DEFINE_FORCE_GLOB
- #undef OPENCV_GPU_DEFINE_FORCE_GLOB_B
- #undef OPENCV_GPU_ASM_PTR
+ #undef OPENCV_CUDA_DEFINE_FORCE_GLOB
+ #undef OPENCV_CUDA_DEFINE_FORCE_GLOB_B
+ #undef OPENCV_CUDA_ASM_PTR
#endif // __CUDA_ARCH__ >= 200
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_DATAMOV_UTILS_HPP__
+#endif // __OPENCV_CUDA_DATAMOV_UTILS_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_COLOR_DETAIL_HPP__
-#define __OPENCV_GPU_COLOR_DETAIL_HPP__
+#ifndef __OPENCV_CUDA_COLOR_DETAIL_HPP__
+#define __OPENCV_CUDA_COLOR_DETAIL_HPP__
#include "../common.hpp"
#include "../vec_traits.hpp"
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(name, scn, dcn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB2RGB<T, scn, dcn, bidx> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(name, scn, bidx, green_bits) \
+#define OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(name, scn, bidx, green_bits) \
struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB2RGB5x5<scn, bidx, green_bits> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(name, dcn, bidx, green_bits) \
+#define OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(name, dcn, bidx, green_bits) \
struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB5x52RGB<dcn, bidx, green_bits> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(name, dcn) \
+#define OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(name, dcn) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::Gray2RGB<T, dcn> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(name, green_bits) \
+#define OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(name, green_bits) \
struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::Gray2RGB5x5<green_bits> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(name, green_bits) \
+#define OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(name, green_bits) \
struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB5x52Gray<green_bits> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(name, scn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(name, scn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB2Gray<T, scn, bidx> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(name, scn, dcn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB2YUV<T, scn, dcn, bidx> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(name, scn, dcn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::YUV2RGB<T, scn, dcn, bidx> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(name, scn, dcn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB2YCrCb<T, scn, dcn, bidx> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(name, scn, dcn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::YCrCb2RGB<T, scn, dcn, bidx> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(name, scn, dcn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB2XYZ<T, scn, dcn, bidx> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(name, scn, dcn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::XYZ2RGB<T, scn, dcn, bidx> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(name, scn, dcn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB2HSV<T, scn, dcn, bidx, 180> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(name, scn, dcn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::HSV2RGB<T, scn, dcn, bidx, 180> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(name, scn, dcn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB2HLS<T, scn, dcn, bidx, 180> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(name, scn, dcn, bidx) \
+#define OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::HLS2RGB<T, scn, dcn, bidx, 180> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(name, scn, dcn, srgb, blueIdx) \
+#define OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(name, scn, dcn, srgb, blueIdx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB2Lab<T, scn, dcn, srgb, blueIdx> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \
+#define OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::Lab2RGB<T, scn, dcn, srgb, blueIdx> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(name, scn, dcn, srgb, blueIdx) \
+#define OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(name, scn, dcn, srgb, blueIdx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::RGB2Luv<T, scn, dcn, srgb, blueIdx> functor_type; \
};
}
-#define OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \
+#define OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::cuda::device::color_detail::Luv2RGB<T, scn, dcn, srgb, blueIdx> functor_type; \
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_COLOR_DETAIL_HPP__
+#endif // __OPENCV_CUDA_COLOR_DETAIL_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_REDUCE_DETAIL_HPP__
-#define __OPENCV_GPU_REDUCE_DETAIL_HPP__
+#ifndef __OPENCV_CUDA_REDUCE_DETAIL_HPP__
+#define __OPENCV_CUDA_REDUCE_DETAIL_HPP__
#include <thrust/tuple.h>
#include "../warp.hpp"
}
}}}
-#endif // __OPENCV_GPU_REDUCE_DETAIL_HPP__
+#endif // __OPENCV_CUDA_REDUCE_DETAIL_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_PRED_VAL_REDUCE_DETAIL_HPP__
-#define __OPENCV_GPU_PRED_VAL_REDUCE_DETAIL_HPP__
+#ifndef __OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP__
+#define __OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP__
#include <thrust/tuple.h>
#include "../warp.hpp"
}
}}}
-#endif // __OPENCV_GPU_PRED_VAL_REDUCE_DETAIL_HPP__
+#endif // __OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_TRANSFORM_DETAIL_HPP__
-#define __OPENCV_GPU_TRANSFORM_DETAIL_HPP__
+#ifndef __OPENCV_CUDA_TRANSFORM_DETAIL_HPP__
+#define __OPENCV_CUDA_TRANSFORM_DETAIL_HPP__
#include "../common.hpp"
#include "../vec_traits.hpp"
} // namespace transform_detail
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_TRANSFORM_DETAIL_HPP__
+#endif // __OPENCV_CUDA_TRANSFORM_DETAIL_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__
-#define __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__
+#ifndef __OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP__
+#define __OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP__
#include "../common.hpp"
#include "../vec_traits.hpp"
} // namespace type_traits_detail
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__
+#endif // __OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__
-#define __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__
+#ifndef __OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP__
+#define __OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP__
#include "../datamov_utils.hpp"
} // namespace vec_distance_detail
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__
+#endif // __OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_DYNAMIC_SMEM_HPP__
-#define __OPENCV_GPU_DYNAMIC_SMEM_HPP__
+#ifndef __OPENCV_CUDA_DYNAMIC_SMEM_HPP__
+#define __OPENCV_CUDA_DYNAMIC_SMEM_HPP__
namespace cv { namespace cuda { namespace device
{
};
}}}
-#endif // __OPENCV_GPU_DYNAMIC_SMEM_HPP__
+#endif // __OPENCV_CUDA_DYNAMIC_SMEM_HPP__
//
//M*/
-#ifndef OPENCV_GPU_EMULATION_HPP_
-#define OPENCV_GPU_EMULATION_HPP_
+#ifndef OPENCV_CUDA_EMULATION_HPP_
+#define OPENCV_CUDA_EMULATION_HPP_
#include "common.hpp"
#include "warp_reduce.hpp"
}; //struct Emulation
}}} // namespace cv { namespace cuda { namespace cudev
-#endif /* OPENCV_GPU_EMULATION_HPP_ */
+#endif /* OPENCV_CUDA_EMULATION_HPP_ */
//
//M*/
-#ifndef __OPENCV_GPU_FILTERS_HPP__
-#define __OPENCV_GPU_FILTERS_HPP__
+#ifndef __OPENCV_CUDA_FILTERS_HPP__
+#define __OPENCV_CUDA_FILTERS_HPP__
#include "saturate_cast.hpp"
#include "vec_traits.hpp"
};
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_FILTERS_HPP__
+#endif // __OPENCV_CUDA_FILTERS_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_
-#define __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_
+#ifndef __OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP_
+#define __OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP_
#include <cstdio>
}
}}} // namespace cv { namespace cuda { namespace cudev
-#endif /* __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_ */
+#endif /* __OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP_ */
//
//M*/
-#ifndef __OPENCV_GPU_FUNCTIONAL_HPP__
-#define __OPENCV_GPU_FUNCTIONAL_HPP__
+#ifndef __OPENCV_CUDA_FUNCTIONAL_HPP__
+#define __OPENCV_CUDA_FUNCTIONAL_HPP__
#include <functional>
#include "saturate_cast.hpp"
// Min/Max Operations
-#define OPENCV_GPU_IMPLEMENT_MINMAX(name, type, op) \
+#define OPENCV_CUDA_IMPLEMENT_MINMAX(name, type, op) \
template <> struct name<type> : binary_function<type, type, type> \
{ \
__device__ __forceinline__ type operator()(type lhs, type rhs) const {return op(lhs, rhs);} \
__host__ __device__ __forceinline__ maximum(const maximum&) {}
};
- OPENCV_GPU_IMPLEMENT_MINMAX(maximum, uchar, ::max)
- OPENCV_GPU_IMPLEMENT_MINMAX(maximum, schar, ::max)
- OPENCV_GPU_IMPLEMENT_MINMAX(maximum, char, ::max)
- OPENCV_GPU_IMPLEMENT_MINMAX(maximum, ushort, ::max)
- OPENCV_GPU_IMPLEMENT_MINMAX(maximum, short, ::max)
- OPENCV_GPU_IMPLEMENT_MINMAX(maximum, int, ::max)
- OPENCV_GPU_IMPLEMENT_MINMAX(maximum, uint, ::max)
- OPENCV_GPU_IMPLEMENT_MINMAX(maximum, float, ::fmax)
- OPENCV_GPU_IMPLEMENT_MINMAX(maximum, double, ::fmax)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, uchar, ::max)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, schar, ::max)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, char, ::max)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, ushort, ::max)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, short, ::max)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, int, ::max)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, uint, ::max)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, float, ::fmax)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, double, ::fmax)
template <typename T> struct minimum : binary_function<T, T, T>
{
__host__ __device__ __forceinline__ minimum(const minimum&) {}
};
- OPENCV_GPU_IMPLEMENT_MINMAX(minimum, uchar, ::min)
- OPENCV_GPU_IMPLEMENT_MINMAX(minimum, schar, ::min)
- OPENCV_GPU_IMPLEMENT_MINMAX(minimum, char, ::min)
- OPENCV_GPU_IMPLEMENT_MINMAX(minimum, ushort, ::min)
- OPENCV_GPU_IMPLEMENT_MINMAX(minimum, short, ::min)
- OPENCV_GPU_IMPLEMENT_MINMAX(minimum, int, ::min)
- OPENCV_GPU_IMPLEMENT_MINMAX(minimum, uint, ::min)
- OPENCV_GPU_IMPLEMENT_MINMAX(minimum, float, ::fmin)
- OPENCV_GPU_IMPLEMENT_MINMAX(minimum, double, ::fmin)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, uchar, ::min)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, schar, ::min)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, char, ::min)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, ushort, ::min)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, short, ::min)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, int, ::min)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, uint, ::min)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, float, ::fmin)
+ OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, double, ::fmin)
-#undef OPENCV_GPU_IMPLEMENT_MINMAX
+#undef OPENCV_CUDA_IMPLEMENT_MINMAX
// Math functions
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
-#define OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(name, func) \
+#define OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(name, func) \
template <typename T> struct name ## _func : unary_function<T, float> \
{ \
__device__ __forceinline__ float operator ()(typename TypeTraits<T>::ParameterType v) const \
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
};
-#define OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(name, func) \
+#define OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(name, func) \
template <typename T> struct name ## _func : binary_function<T, T, float> \
{ \
__device__ __forceinline__ float operator ()(typename TypeTraits<T>::ParameterType v1, typename TypeTraits<T>::ParameterType v2) const \
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
};
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sqrt, ::sqrt)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp, ::exp)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp2, ::exp2)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp10, ::exp10)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log, ::log)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log2, ::log2)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log10, ::log10)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sin, ::sin)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(cos, ::cos)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(tan, ::tan)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(asin, ::asin)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(acos, ::acos)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(atan, ::atan)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sinh, ::sinh)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(cosh, ::cosh)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(tanh, ::tanh)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(asinh, ::asinh)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(acosh, ::acosh)
- OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(atanh, ::atanh)
-
- OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(hypot, ::hypot)
- OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(atan2, ::atan2)
- OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(pow, ::pow)
-
- #undef OPENCV_GPU_IMPLEMENT_UN_FUNCTOR
- #undef OPENCV_GPU_IMPLEMENT_UN_FUNCTOR_NO_DOUBLE
- #undef OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sqrt, ::sqrt)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp, ::exp)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp2, ::exp2)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp10, ::exp10)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log, ::log)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log2, ::log2)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log10, ::log10)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sin, ::sin)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(cos, ::cos)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(tan, ::tan)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(asin, ::asin)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(acos, ::acos)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(atan, ::atan)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sinh, ::sinh)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(cosh, ::cosh)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(tanh, ::tanh)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(asinh, ::asinh)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(acosh, ::acosh)
+ OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(atanh, ::atanh)
+
+ OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(hypot, ::hypot)
+ OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(atan2, ::atan2)
+ OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(pow, ::pow)
+
+ #undef OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR
+ #undef OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR_NO_DOUBLE
+ #undef OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR
template<typename T> struct hypot_sqr_func : binary_function<T, T, float>
{
template <typename Func> struct TransformFunctorTraits : DefaultTransformFunctorTraits<Func> {};
-#define OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(type) \
+#define OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(type) \
template <> struct TransformFunctorTraits< type > : DefaultTransformFunctorTraits< type >
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_FUNCTIONAL_HPP__
+#endif // __OPENCV_CUDA_FUNCTIONAL_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_LIMITS_GPU_HPP__
-#define __OPENCV_GPU_LIMITS_GPU_HPP__
+#ifndef __OPENCV_CUDA_LIMITS_HPP__
+#define __OPENCV_CUDA_LIMITS_HPP__
#include <limits.h>
#include <float.h>
}}} // namespace cv { namespace cuda { namespace cudev {
-#endif // __OPENCV_GPU_LIMITS_GPU_HPP__
+#endif // __OPENCV_CUDA_LIMITS_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_REDUCE_HPP__
-#define __OPENCV_GPU_REDUCE_HPP__
+#ifndef __OPENCV_CUDA_REDUCE_HPP__
+#define __OPENCV_CUDA_REDUCE_HPP__
#include <thrust/tuple.h>
#include "detail/reduce.hpp"
}
}}}
-#endif // __OPENCV_GPU_UTILITY_HPP__
+#endif // __OPENCV_CUDA_UTILITY_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_SATURATE_CAST_HPP__
-#define __OPENCV_GPU_SATURATE_CAST_HPP__
+#ifndef __OPENCV_CUDA_SATURATE_CAST_HPP__
+#define __OPENCV_CUDA_SATURATE_CAST_HPP__
#include "common.hpp"
}
}}}
-#endif /* __OPENCV_GPU_SATURATE_CAST_HPP__ */
+#endif /* __OPENCV_CUDA_SATURATE_CAST_HPP__ */
//
//M*/
-#ifndef __OPENCV_GPU_SCAN_HPP__
-#define __OPENCV_GPU_SCAN_HPP__
+#ifndef __OPENCV_CUDA_SCAN_HPP__
+#define __OPENCV_CUDA_SCAN_HPP__
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
// scan on shuffl functions
#pragma unroll
- for (int i = 1; i <= (OPENCV_GPU_WARP_SIZE / 2); i *= 2)
+ for (int i = 1; i <= (OPENCV_CUDA_WARP_SIZE / 2); i *= 2)
{
const T n = cv::cuda::device::shfl_up(idata, i);
if (laneId >= i)
return idata;
#else
- unsigned int pos = 2 * tid - (tid & (OPENCV_GPU_WARP_SIZE - 1));
+ unsigned int pos = 2 * tid - (tid & (OPENCV_CUDA_WARP_SIZE - 1));
s_Data[pos] = 0;
- pos += OPENCV_GPU_WARP_SIZE;
+ pos += OPENCV_CUDA_WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
template <int tiNumScanThreads, typename T>
__device__ T blockScanInclusive(T idata, volatile T* s_Data, unsigned int tid)
{
- if (tiNumScanThreads > OPENCV_GPU_WARP_SIZE)
+ if (tiNumScanThreads > OPENCV_CUDA_WARP_SIZE)
{
//Bottom-level inclusive warp scan
T warpResult = warpScanInclusive(idata, s_Data, tid);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
- if ((tid & (OPENCV_GPU_WARP_SIZE - 1)) == (OPENCV_GPU_WARP_SIZE - 1))
+ if ((tid & (OPENCV_CUDA_WARP_SIZE - 1)) == (OPENCV_CUDA_WARP_SIZE - 1))
{
- s_Data[tid >> OPENCV_GPU_LOG_WARP_SIZE] = warpResult;
+ s_Data[tid >> OPENCV_CUDA_LOG_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
- if (tid < (tiNumScanThreads / OPENCV_GPU_WARP_SIZE) )
+ if (tid < (tiNumScanThreads / OPENCV_CUDA_WARP_SIZE) )
{
//grab top warp elements
T val = s_Data[tid];
//return updated warp scans with exclusive scan results
__syncthreads();
- return warpResult + s_Data[tid >> OPENCV_GPU_LOG_WARP_SIZE];
+ return warpResult + s_Data[tid >> OPENCV_CUDA_LOG_WARP_SIZE];
}
else
{
}
}}}
-#endif // __OPENCV_GPU_SCAN_HPP__
+#endif // __OPENCV_CUDA_SCAN_HPP__
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __OPENCV_GPU_SIMD_FUNCTIONS_HPP__
-#define __OPENCV_GPU_SIMD_FUNCTIONS_HPP__
+#ifndef __OPENCV_CUDA_SIMD_FUNCTIONS_HPP__
+#define __OPENCV_CUDA_SIMD_FUNCTIONS_HPP__
#include "common.hpp"
/*
This header file contains inline functions that implement intra-word SIMD
- operations, that are hardware accelerated on sm_3x (Kepler) GPUs. Efficient
+ operations, that are hardware accelerated on sm_3x (Kepler) CUDAs. Efficient
emulation code paths are provided for earlier architectures (sm_1x, sm_2x)
- to make the code portable across all GPUs supported by CUDA. The following
+ to make the code portable across all CUDAs supported by CUDA. The following
functions are currently implemented:
vadd2(a,b) per-halfword unsigned addition, with wrap-around: a + b
}
}}}
-#endif // __OPENCV_GPU_SIMD_FUNCTIONS_HPP__
+#endif // __OPENCV_CUDA_SIMD_FUNCTIONS_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_TRANSFORM_HPP__
-#define __OPENCV_GPU_TRANSFORM_HPP__
+#ifndef __OPENCV_CUDA_TRANSFORM_HPP__
+#define __OPENCV_CUDA_TRANSFORM_HPP__
#include "common.hpp"
#include "utility.hpp"
}
}}}
-#endif // __OPENCV_GPU_TRANSFORM_HPP__
+#endif // __OPENCV_CUDA_TRANSFORM_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_TYPE_TRAITS_HPP__
-#define __OPENCV_GPU_TYPE_TRAITS_HPP__
+#ifndef __OPENCV_CUDA_TYPE_TRAITS_HPP__
+#define __OPENCV_CUDA_TYPE_TRAITS_HPP__
#include "detail/type_traits_detail.hpp"
};
}}}
-#endif // __OPENCV_GPU_TYPE_TRAITS_HPP__
+#endif // __OPENCV_CUDA_TYPE_TRAITS_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_UTILITY_HPP__
-#define __OPENCV_GPU_UTILITY_HPP__
+#ifndef __OPENCV_CUDA_UTILITY_HPP__
+#define __OPENCV_CUDA_UTILITY_HPP__
#include "saturate_cast.hpp"
#include "datamov_utils.hpp"
namespace cv { namespace cuda { namespace device
{
- #define OPENCV_GPU_LOG_WARP_SIZE (5)
- #define OPENCV_GPU_WARP_SIZE (1 << OPENCV_GPU_LOG_WARP_SIZE)
- #define OPENCV_GPU_LOG_MEM_BANKS ((__CUDA_ARCH__ >= 200) ? 5 : 4) // 32 banks on fermi, 16 on tesla
- #define OPENCV_GPU_MEM_BANKS (1 << OPENCV_GPU_LOG_MEM_BANKS)
+ #define OPENCV_CUDA_LOG_WARP_SIZE (5)
+ #define OPENCV_CUDA_WARP_SIZE (1 << OPENCV_CUDA_LOG_WARP_SIZE)
+ #define OPENCV_CUDA_LOG_MEM_BANKS ((__CUDA_ARCH__ >= 200) ? 5 : 4) // 32 banks on fermi, 16 on tesla
+ #define OPENCV_CUDA_MEM_BANKS (1 << OPENCV_CUDA_LOG_MEM_BANKS)
///////////////////////////////////////////////////////////////////////////////
// swap
}
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_UTILITY_HPP__
+#endif // __OPENCV_CUDA_UTILITY_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_VEC_DISTANCE_HPP__
-#define __OPENCV_GPU_VEC_DISTANCE_HPP__
+#ifndef __OPENCV_CUDA_VEC_DISTANCE_HPP__
+#define __OPENCV_CUDA_VEC_DISTANCE_HPP__
#include "reduce.hpp"
#include "functional.hpp"
};
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_VEC_DISTANCE_HPP__
+#endif // __OPENCV_CUDA_VEC_DISTANCE_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_VECMATH_HPP__
-#define __OPENCV_GPU_VECMATH_HPP__
+#ifndef __OPENCV_CUDA_VECMATH_HPP__
+#define __OPENCV_CUDA_VECMATH_HPP__
#include "vec_traits.hpp"
#include "saturate_cast.hpp"
}}} // namespace cv { namespace cuda { namespace device
-#endif // __OPENCV_GPU_VECMATH_HPP__
+#endif // __OPENCV_CUDA_VECMATH_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_VEC_TRAITS_HPP__
-#define __OPENCV_GPU_VEC_TRAITS_HPP__
+#ifndef __OPENCV_CUDA_VEC_TRAITS_HPP__
+#define __OPENCV_CUDA_VEC_TRAITS_HPP__
#include "common.hpp"
return val;
}
-#define OPENCV_GPU_IMPLEMENT_TYPE_VEC(type) \
+#define OPENCV_CUDA_IMPLEMENT_TYPE_VEC(type) \
template<> struct TypeVec<type, 1> { typedef type vec_type; }; \
template<> struct TypeVec<type ## 1, 1> { typedef type ## 1 vec_type; }; \
template<> struct TypeVec<type, 2> { typedef type ## 2 vec_type; }; \
template<> struct TypeVec<type, 8> { typedef type ## 8 vec_type; }; \
template<> struct TypeVec<type ## 8, 8> { typedef type ## 8 vec_type; };
- OPENCV_GPU_IMPLEMENT_TYPE_VEC(uchar)
- OPENCV_GPU_IMPLEMENT_TYPE_VEC(char)
- OPENCV_GPU_IMPLEMENT_TYPE_VEC(ushort)
- OPENCV_GPU_IMPLEMENT_TYPE_VEC(short)
- OPENCV_GPU_IMPLEMENT_TYPE_VEC(int)
- OPENCV_GPU_IMPLEMENT_TYPE_VEC(uint)
- OPENCV_GPU_IMPLEMENT_TYPE_VEC(float)
- OPENCV_GPU_IMPLEMENT_TYPE_VEC(double)
+ OPENCV_CUDA_IMPLEMENT_TYPE_VEC(uchar)
+ OPENCV_CUDA_IMPLEMENT_TYPE_VEC(char)
+ OPENCV_CUDA_IMPLEMENT_TYPE_VEC(ushort)
+ OPENCV_CUDA_IMPLEMENT_TYPE_VEC(short)
+ OPENCV_CUDA_IMPLEMENT_TYPE_VEC(int)
+ OPENCV_CUDA_IMPLEMENT_TYPE_VEC(uint)
+ OPENCV_CUDA_IMPLEMENT_TYPE_VEC(float)
+ OPENCV_CUDA_IMPLEMENT_TYPE_VEC(double)
- #undef OPENCV_GPU_IMPLEMENT_TYPE_VEC
+ #undef OPENCV_CUDA_IMPLEMENT_TYPE_VEC
template<> struct TypeVec<schar, 1> { typedef schar vec_type; };
template<> struct TypeVec<schar, 2> { typedef char2 vec_type; };
template<typename T> struct VecTraits;
-#define OPENCV_GPU_IMPLEMENT_VEC_TRAITS(type) \
+#define OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(type) \
template<> struct VecTraits<type> \
{ \
typedef type elem_type; \
static __device__ __host__ __forceinline__ type ## 8 make(const type* v) {return make_ ## type ## 8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);} \
};
- OPENCV_GPU_IMPLEMENT_VEC_TRAITS(uchar)
- OPENCV_GPU_IMPLEMENT_VEC_TRAITS(ushort)
- OPENCV_GPU_IMPLEMENT_VEC_TRAITS(short)
- OPENCV_GPU_IMPLEMENT_VEC_TRAITS(int)
- OPENCV_GPU_IMPLEMENT_VEC_TRAITS(uint)
- OPENCV_GPU_IMPLEMENT_VEC_TRAITS(float)
- OPENCV_GPU_IMPLEMENT_VEC_TRAITS(double)
+ OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(uchar)
+ OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(ushort)
+ OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(short)
+ OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(int)
+ OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(uint)
+ OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(float)
+ OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(double)
- #undef OPENCV_GPU_IMPLEMENT_VEC_TRAITS
+ #undef OPENCV_CUDA_IMPLEMENT_VEC_TRAITS
template<> struct VecTraits<char>
{
};
}}} // namespace cv { namespace cuda { namespace cudev
-#endif // __OPENCV_GPU_VEC_TRAITS_HPP__
+#endif // __OPENCV_CUDA_VEC_TRAITS_HPP__
//
//M*/
-#ifndef __OPENCV_GPU_DEVICE_WARP_HPP__
-#define __OPENCV_GPU_DEVICE_WARP_HPP__
+#ifndef __OPENCV_CUDA_DEVICE_WARP_HPP__
+#define __OPENCV_CUDA_DEVICE_WARP_HPP__
namespace cv { namespace cuda { namespace device
{
};
}}} // namespace cv { namespace cuda { namespace cudev
-#endif /* __OPENCV_GPU_DEVICE_WARP_HPP__ */
+#endif /* __OPENCV_CUDA_DEVICE_WARP_HPP__ */
//
//M*/
-#ifndef OPENCV_GPU_WARP_REDUCE_HPP__
-#define OPENCV_GPU_WARP_REDUCE_HPP__
+#ifndef OPENCV_CUDA_WARP_REDUCE_HPP__
+#define OPENCV_CUDA_WARP_REDUCE_HPP__
namespace cv { namespace cuda { namespace device
{
}
}}} // namespace cv { namespace cuda { namespace cudev {
-#endif /* OPENCV_GPU_WARP_REDUCE_HPP__ */
+#endif /* OPENCV_CUDA_WARP_REDUCE_HPP__ */
//
//M*/
-#ifndef __OPENCV_GPU_WARP_SHUFFLE_HPP__
-#define __OPENCV_GPU_WARP_SHUFFLE_HPP__
+#ifndef __OPENCV_CUDA_WARP_SHUFFLE_HPP__
+#define __OPENCV_CUDA_WARP_SHUFFLE_HPP__
namespace cv { namespace cuda { namespace device
{
}
}}}
-#endif // __OPENCV_GPU_WARP_SHUFFLE_HPP__
+#endif // __OPENCV_CUDA_WARP_SHUFFLE_HPP__
//
//M*/
-#ifndef __OPENCV_CORE_GPU_STREAM_ACCESSOR_HPP__
-#define __OPENCV_CORE_GPU_STREAM_ACCESSOR_HPP__
+#ifndef __OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP__
+#define __OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP__
#ifndef __cplusplus
# error cuda_stream_accessor.hpp header must be compiled as C++
// This is only header file that depends on Cuda. All other headers are independent.
// So if you use OpenCV binaries you do noot need to install Cuda Toolkit.
-// But of you wanna use GPU by yourself, may get cuda stream instance using the class below.
+// But of you wanna use CUDA by yourself, may get cuda stream instance using the class below.
// In this case you have to install Cuda Toolkit.
#include <cuda_runtime.h>
}
}
-#endif /* __OPENCV_CORE_GPU_STREAM_ACCESSOR_HPP__ */
+#endif /* __OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP__ */
//
//M*/
-#ifndef __OPENCV_CORE_GPU_TYPES_HPP__
-#define __OPENCV_CORE_GPU_TYPES_HPP__
+#ifndef __OPENCV_CORE_CUDA_TYPES_HPP__
+#define __OPENCV_CORE_CUDA_TYPES_HPP__
#ifndef __cplusplus
# error cuda_types.hpp header must be compiled as C++
#endif
#ifdef __CUDACC__
- #define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
+ #define __CV_CUDA_HOST_DEVICE__ __host__ __device__ __forceinline__
#else
- #define __CV_GPU_HOST_DEVICE__
+ #define __CV_CUDA_HOST_DEVICE__
#endif
namespace cv
T* data;
- __CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {}
- __CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
+ __CV_CUDA_HOST_DEVICE__ DevPtr() : data(0) {}
+ __CV_CUDA_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
- __CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
- __CV_GPU_HOST_DEVICE__ operator T*() { return data; }
- __CV_GPU_HOST_DEVICE__ operator const T*() const { return data; }
+ __CV_CUDA_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
+ __CV_CUDA_HOST_DEVICE__ operator T*() { return data; }
+ __CV_CUDA_HOST_DEVICE__ operator const T*() const { return data; }
};
template <typename T> struct PtrSz : public DevPtr<T>
{
- __CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {}
- __CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
+ __CV_CUDA_HOST_DEVICE__ PtrSz() : size(0) {}
+ __CV_CUDA_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
size_t size;
};
template <typename T> struct PtrStep : public DevPtr<T>
{
- __CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {}
- __CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
+ __CV_CUDA_HOST_DEVICE__ PtrStep() : step(0) {}
+ __CV_CUDA_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
//! stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!!
size_t step;
- __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
- __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
+ __CV_CUDA_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
+ __CV_CUDA_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
- __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
- __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
+ __CV_CUDA_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
+ __CV_CUDA_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
};
template <typename T> struct PtrStepSz : public PtrStep<T>
{
- __CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
- __CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
+ __CV_CUDA_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
+ __CV_CUDA_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
: PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}
template <typename U>
}
}
-#endif /* __OPENCV_CORE_GPU_TYPES_HPP__ */
+#endif /* __OPENCV_CORE_CUDA_TYPES_HPP__ */
//
//M*/
-#ifndef __OPENCV_CORE_PRIVATE_GPU_HPP__
-#define __OPENCV_CORE_PRIVATE_GPU_HPP__
+#ifndef __OPENCV_CORE_PRIVATE_CUDA_HPP__
+#define __OPENCV_CORE_PRIVATE_CUDA_HPP__
#ifndef __OPENCV_BUILD
# error this is a private header which should not be used from outside of the OpenCV library
# endif
# if defined(CUDA_ARCH_BIN_OR_PTX_10)
-# error "OpenCV GPU module doesn't support NVIDIA compute capability 1.0"
+# error "OpenCV CUDA module doesn't support NVIDIA compute capability 1.0"
# endif
#endif
#ifndef HAVE_CUDA
-static inline void throw_no_cuda() { CV_Error(cv::Error::GpuNotSupported, "The library is compiled without GPU support"); }
+static inline void throw_no_cuda() { CV_Error(cv::Error::GpuNotSupported, "The library is compiled without CUDA support"); }
#else // HAVE_CUDA
#endif // HAVE_CUDA
-#endif // __OPENCV_CORE_GPU_PRIVATE_HPP__
+#endif // __OPENCV_CORE_CUDA_PRIVATE_HPP__
case CV_StsNotImplemented : return "The function/feature is not implemented";
case CV_StsBadMemBlock : return "Memory block has been corrupted";
case CV_StsAssert : return "Assertion failed";
- case CV_GpuNotSupported : return "No GPU support";
+ case CV_GpuNotSupported : return "No CUDA support";
case CV_GpuApiCallError : return "Gpu API call";
case CV_OpenGlNotSupported : return "No OpenGL support";
case CV_OpenGlApiCallError : return "OpenGL API call";
//////////////////////////// CascadeClassifier ////////////////////////////
// The cascade classifier class for object detection: supports old haar and new lbp xlm formats and nvbin for haar cascades olny.
-class CV_EXPORTS CascadeClassifier_GPU
+class CV_EXPORTS CascadeClassifier_CUDA
{
public:
- CascadeClassifier_GPU();
- CascadeClassifier_GPU(const String& filename);
- ~CascadeClassifier_GPU();
+ CascadeClassifier_CUDA();
+ CascadeClassifier_CUDA(const String& filename);
+ ~CascadeClassifier_CUDA();
bool empty() const;
bool load(const String& filename);
CascadeClassifierImpl* impl;
struct HaarCascade;
struct LbpCascade;
- friend class CascadeClassifier_GPU_LBP;
+ friend class CascadeClassifier_CUDA_LBP;
};
//////////////////////////// Labeling ////////////////////////////
const cv::Mat tvec = cv::Mat::ones(1, 3, CV_32FC1);
const cv::Mat camera_mat = cv::Mat::ones(3, 3, CV_32FC1);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::projectPoints(d_src, rvec, tvec, camera_mat, cv::Mat(), dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
cv::Mat rvec;
cv::Mat tvec;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
TEST_CYCLE() cv::cuda::solvePnPRansac(object, image, camera_mat, dist_coef, rvec, tvec);
- GPU_SANITY_CHECK(rvec, 1e-3);
- GPU_SANITY_CHECK(tvec, 1e-3);
+ CUDA_SANITY_CHECK(rvec, 1e-3);
+ CUDA_SANITY_CHECK(tvec, 1e-3);
}
else
{
const cv::Mat image = readImage(GetParam(), cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_image(image);
cv::cuda::GpuMat mask;
TEST_CYCLE() cv::cuda::connectivityMask(d_image, mask, cv::Scalar::all(0), cv::Scalar::all(2));
- GPU_SANITY_CHECK(mask);
+ CUDA_SANITY_CHECK(mask);
}
else
{
const cv::Mat image = readImage(GetParam(), cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_mask;
cv::cuda::connectivityMask(cv::cuda::GpuMat(image), d_mask, cv::Scalar::all(0), cv::Scalar::all(2));
TEST_CYCLE() cv::cuda::labelComponents(d_mask, components);
- GPU_SANITY_CHECK(components);
+ CUDA_SANITY_CHECK(components);
}
else
{
// SetTo
PERF_TEST_P(Sz_Depth_Cn, MatOp_SetTo,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const cv::Scalar val(1, 2, 3, 4);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat dst(size, type);
TEST_CYCLE() dst.setTo(val);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// SetToMasked
PERF_TEST_P(Sz_Depth_Cn, MatOp_SetToMasked,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const cv::Scalar val(1, 2, 3, 4);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat dst(src);
const cv::cuda::GpuMat d_mask(mask);
TEST_CYCLE() dst.setTo(val, d_mask);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
// CopyToMasked
PERF_TEST_P(Sz_Depth_Cn, MatOp_CopyToMasked,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
cv::Mat mask(size, CV_8UC1);
declare.in(src, mask, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
const cv::cuda::GpuMat d_mask(mask);
TEST_CYCLE() d_src.copyTo(dst, d_mask);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
DEF_PARAM_TEST(Sz_2Depth, cv::Size, MatDepth, MatDepth);
PERF_TEST_P(Sz_2Depth, MatOp_ConvertTo,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
Values(CV_8U, CV_16U, CV_32F, CV_64F)))
{
const double a = 0.5;
const double b = 1.0;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() d_src.convertTo(dst, depth2, a, b);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
const cv::Mat img = readImage(GetParam(), cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_img(img);
std::vector<cv::Rect> gpu_found_locations;
const cv::Mat img = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
- cv::cuda::CascadeClassifier_GPU d_cascade;
+ cv::cuda::CascadeClassifier_CUDA d_cascade;
ASSERT_TRUE(d_cascade.load(perf::TestBase::getDataPath(GetParam().second)));
const cv::cuda::GpuMat d_img(img);
const cv::Mat img = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
- cv::cuda::CascadeClassifier_GPU d_cascade;
+ cv::cuda::CascadeClassifier_CUDA d_cascade;
ASSERT_TRUE(d_cascade.load(perf::TestBase::getDataPath(GetParam().second)));
const cv::cuda::GpuMat d_img(img);
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
-cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU() { throw_no_cuda(); }
-cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU(const String&) { throw_no_cuda(); }
-cv::cuda::CascadeClassifier_GPU::~CascadeClassifier_GPU() { throw_no_cuda(); }
-bool cv::cuda::CascadeClassifier_GPU::empty() const { throw_no_cuda(); return true; }
-bool cv::cuda::CascadeClassifier_GPU::load(const String&) { throw_no_cuda(); return true; }
-Size cv::cuda::CascadeClassifier_GPU::getClassifierSize() const { throw_no_cuda(); return Size();}
-void cv::cuda::CascadeClassifier_GPU::release() { throw_no_cuda(); }
-int cv::cuda::CascadeClassifier_GPU::detectMultiScale( const GpuMat&, GpuMat&, double, int, Size) {throw_no_cuda(); return -1;}
-int cv::cuda::CascadeClassifier_GPU::detectMultiScale( const GpuMat&, GpuMat&, Size, Size, double, int) {throw_no_cuda(); return -1;}
+cv::cuda::CascadeClassifier_CUDA::CascadeClassifier_CUDA() { throw_no_cuda(); }
+cv::cuda::CascadeClassifier_CUDA::CascadeClassifier_CUDA(const String&) { throw_no_cuda(); }
+cv::cuda::CascadeClassifier_CUDA::~CascadeClassifier_CUDA() { throw_no_cuda(); }
+bool cv::cuda::CascadeClassifier_CUDA::empty() const { throw_no_cuda(); return true; }
+bool cv::cuda::CascadeClassifier_CUDA::load(const String&) { throw_no_cuda(); return true; }
+Size cv::cuda::CascadeClassifier_CUDA::getClassifierSize() const { throw_no_cuda(); return Size();}
+void cv::cuda::CascadeClassifier_CUDA::release() { throw_no_cuda(); }
+int cv::cuda::CascadeClassifier_CUDA::detectMultiScale( const GpuMat&, GpuMat&, double, int, Size) {throw_no_cuda(); return -1;}
+int cv::cuda::CascadeClassifier_CUDA::detectMultiScale( const GpuMat&, GpuMat&, Size, Size, double, int) {throw_no_cuda(); return -1;}
#else
-struct cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
+struct cv::cuda::CascadeClassifier_CUDA::CascadeClassifierImpl
{
public:
CascadeClassifierImpl(){}
#ifndef HAVE_OPENCV_CUDALEGACY
-struct cv::cuda::CascadeClassifier_GPU::HaarCascade : cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
+struct cv::cuda::CascadeClassifier_CUDA::HaarCascade : cv::cuda::CascadeClassifier_CUDA::CascadeClassifierImpl
{
public:
HaarCascade()
#else
-struct cv::cuda::CascadeClassifier_GPU::HaarCascade : cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
+struct cv::cuda::CascadeClassifier_CUDA::HaarCascade : cv::cuda::CascadeClassifier_CUDA::CascadeClassifierImpl
{
public:
HaarCascade() : lastAllocatedFrameSize(-1, -1)
}
}}}
-struct cv::cuda::CascadeClassifier_GPU::LbpCascade : cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
+struct cv::cuda::CascadeClassifier_CUDA::LbpCascade : cv::cuda::CascadeClassifier_CUDA::CascadeClassifierImpl
{
public:
struct Stage
bool read(const FileNode &root)
{
- const char *GPU_CC_STAGE_TYPE = "stageType";
- const char *GPU_CC_FEATURE_TYPE = "featureType";
- const char *GPU_CC_BOOST = "BOOST";
- const char *GPU_CC_LBP = "LBP";
- const char *GPU_CC_MAX_CAT_COUNT = "maxCatCount";
- const char *GPU_CC_HEIGHT = "height";
- const char *GPU_CC_WIDTH = "width";
- const char *GPU_CC_STAGE_PARAMS = "stageParams";
- const char *GPU_CC_MAX_DEPTH = "maxDepth";
- const char *GPU_CC_FEATURE_PARAMS = "featureParams";
- const char *GPU_CC_STAGES = "stages";
- const char *GPU_CC_STAGE_THRESHOLD = "stageThreshold";
- const float GPU_THRESHOLD_EPS = 1e-5f;
- const char *GPU_CC_WEAK_CLASSIFIERS = "weakClassifiers";
- const char *GPU_CC_INTERNAL_NODES = "internalNodes";
- const char *GPU_CC_LEAF_VALUES = "leafValues";
- const char *GPU_CC_FEATURES = "features";
- const char *GPU_CC_RECT = "rect";
-
- String stageTypeStr = (String)root[GPU_CC_STAGE_TYPE];
- CV_Assert(stageTypeStr == GPU_CC_BOOST);
-
- String featureTypeStr = (String)root[GPU_CC_FEATURE_TYPE];
- CV_Assert(featureTypeStr == GPU_CC_LBP);
-
- NxM.width = (int)root[GPU_CC_WIDTH];
- NxM.height = (int)root[GPU_CC_HEIGHT];
+ const char *CUDA_CC_STAGE_TYPE = "stageType";
+ const char *CUDA_CC_FEATURE_TYPE = "featureType";
+ const char *CUDA_CC_BOOST = "BOOST";
+ const char *CUDA_CC_LBP = "LBP";
+ const char *CUDA_CC_MAX_CAT_COUNT = "maxCatCount";
+ const char *CUDA_CC_HEIGHT = "height";
+ const char *CUDA_CC_WIDTH = "width";
+ const char *CUDA_CC_STAGE_PARAMS = "stageParams";
+ const char *CUDA_CC_MAX_DEPTH = "maxDepth";
+ const char *CUDA_CC_FEATURE_PARAMS = "featureParams";
+ const char *CUDA_CC_STAGES = "stages";
+ const char *CUDA_CC_STAGE_THRESHOLD = "stageThreshold";
+ const float CUDA_THRESHOLD_EPS = 1e-5f;
+ const char *CUDA_CC_WEAK_CLASSIFIERS = "weakClassifiers";
+ const char *CUDA_CC_INTERNAL_NODES = "internalNodes";
+ const char *CUDA_CC_LEAF_VALUES = "leafValues";
+ const char *CUDA_CC_FEATURES = "features";
+ const char *CUDA_CC_RECT = "rect";
+
+ String stageTypeStr = (String)root[CUDA_CC_STAGE_TYPE];
+ CV_Assert(stageTypeStr == CUDA_CC_BOOST);
+
+ String featureTypeStr = (String)root[CUDA_CC_FEATURE_TYPE];
+ CV_Assert(featureTypeStr == CUDA_CC_LBP);
+
+ NxM.width = (int)root[CUDA_CC_WIDTH];
+ NxM.height = (int)root[CUDA_CC_HEIGHT];
CV_Assert( NxM.height > 0 && NxM.width > 0 );
- isStumps = ((int)(root[GPU_CC_STAGE_PARAMS][GPU_CC_MAX_DEPTH]) == 1) ? true : false;
+ isStumps = ((int)(root[CUDA_CC_STAGE_PARAMS][CUDA_CC_MAX_DEPTH]) == 1) ? true : false;
CV_Assert(isStumps);
- FileNode fn = root[GPU_CC_FEATURE_PARAMS];
+ FileNode fn = root[CUDA_CC_FEATURE_PARAMS];
if (fn.empty())
return false;
- ncategories = fn[GPU_CC_MAX_CAT_COUNT];
+ ncategories = fn[CUDA_CC_MAX_CAT_COUNT];
subsetSize = (ncategories + 31) / 32;
nodeStep = 3 + ( ncategories > 0 ? subsetSize : 1 );
- fn = root[GPU_CC_STAGES];
+ fn = root[CUDA_CC_STAGES];
if (fn.empty())
return false;
{
FileNode fns = *it;
Stage st;
- st.threshold = (float)fns[GPU_CC_STAGE_THRESHOLD] - GPU_THRESHOLD_EPS;
+ st.threshold = (float)fns[CUDA_CC_STAGE_THRESHOLD] - CUDA_THRESHOLD_EPS;
- fns = fns[GPU_CC_WEAK_CLASSIFIERS];
+ fns = fns[CUDA_CC_WEAK_CLASSIFIERS];
if (fns.empty())
return false;
{
FileNode fnw = *it1;
- FileNode internalNodes = fnw[GPU_CC_INTERNAL_NODES];
- FileNode leafValues = fnw[GPU_CC_LEAF_VALUES];
+ FileNode internalNodes = fnw[CUDA_CC_INTERNAL_NODES];
+ FileNode leafValues = fnw[CUDA_CC_LEAF_VALUES];
if ( internalNodes.empty() || leafValues.empty() )
return false;
}
}
- fn = root[GPU_CC_FEATURES];
+ fn = root[CUDA_CC_FEATURES];
if( fn.empty() )
return false;
std::vector<uchar> features;
FileNodeIterator f_it = fn.begin(), f_end = fn.end();
for (; f_it != f_end; ++f_it)
{
- FileNode rect = (*f_it)[GPU_CC_RECT];
+ FileNode rect = (*f_it)[CUDA_CC_RECT];
FileNodeIterator r_it = rect.begin();
features.push_back(saturate_cast<uchar>((int)*(r_it++)));
features.push_back(saturate_cast<uchar>((int)*(r_it++)));
static const int integralFactor = 4;
};
-cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU()
+cv::cuda::CascadeClassifier_CUDA::CascadeClassifier_CUDA()
: findLargestObject(false), visualizeInPlace(false), impl(0) {}
-cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU(const String& filename)
+cv::cuda::CascadeClassifier_CUDA::CascadeClassifier_CUDA(const String& filename)
: findLargestObject(false), visualizeInPlace(false), impl(0) { load(filename); }
-cv::cuda::CascadeClassifier_GPU::~CascadeClassifier_GPU() { release(); }
+cv::cuda::CascadeClassifier_CUDA::~CascadeClassifier_CUDA() { release(); }
-void cv::cuda::CascadeClassifier_GPU::release() { if (impl) { delete impl; impl = 0; } }
+void cv::cuda::CascadeClassifier_CUDA::release() { if (impl) { delete impl; impl = 0; } }
-bool cv::cuda::CascadeClassifier_GPU::empty() const { return impl == 0; }
+bool cv::cuda::CascadeClassifier_CUDA::empty() const { return impl == 0; }
-Size cv::cuda::CascadeClassifier_GPU::getClassifierSize() const
+Size cv::cuda::CascadeClassifier_CUDA::getClassifierSize() const
{
return this->empty() ? Size() : impl->getClassifierCvSize();
}
-int cv::cuda::CascadeClassifier_GPU::detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor, int minNeighbors, Size minSize)
+int cv::cuda::CascadeClassifier_CUDA::detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor, int minNeighbors, Size minSize)
{
CV_Assert( !this->empty());
return impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, minSize, cv::Size());
}
-int cv::cuda::CascadeClassifier_GPU::detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize, double scaleFactor, int minNeighbors)
+int cv::cuda::CascadeClassifier_CUDA::detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize, double scaleFactor, int minNeighbors)
{
CV_Assert( !this->empty());
return impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, minSize, maxObjectSize);
}
-bool cv::cuda::CascadeClassifier_GPU::load(const String& filename)
+bool cv::cuda::CascadeClassifier_CUDA::load(const String& filename)
{
release();
return impl->read(filename);
}
- const char *GPU_CC_LBP = "LBP";
+ const char *CUDA_CC_LBP = "LBP";
String featureTypeStr = (String)fs.getFirstTopLevelNode()["featureType"];
- if (featureTypeStr == GPU_CC_LBP)
+ if (featureTypeStr == CUDA_CC_LBP)
impl = new LbpCascade();
else
impl = new HaarCascade();
//
//M*/
-#ifndef __OPENCV_GPU_DEVICE_LBP_HPP_
-#define __OPENCV_GPU_DEVICE_LBP_HPP_
+#ifndef __OPENCV_CUDA_DEVICE_LBP_HPP_
+#define __OPENCV_CUDA_DEVICE_LBP_HPP_
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/emulation.hpp"
}
};
-GPU_TEST_P(TransformPoints, Accuracy)
+CUDA_TEST_P(TransformPoints, Accuracy)
{
cv::Mat src = randomMat(cv::Size(1000, 1), CV_32FC3, 0, 10);
cv::Mat rvec = randomMat(cv::Size(3, 1), CV_32F, 0, 1);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Calib3D, TransformPoints, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_Calib3D, TransformPoints, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// ProjectPoints
}
};
-GPU_TEST_P(ProjectPoints, Accuracy)
+CUDA_TEST_P(ProjectPoints, Accuracy)
{
cv::Mat src = randomMat(cv::Size(1000, 1), CV_32FC3, 0, 10);
cv::Mat rvec = randomMat(cv::Size(3, 1), CV_32F, 0, 1);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Calib3D, ProjectPoints, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_Calib3D, ProjectPoints, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// SolvePnPRansac
}
};
-GPU_TEST_P(SolvePnPRansac, Accuracy)
+CUDA_TEST_P(SolvePnPRansac, Accuracy)
{
cv::Mat object = randomMat(cv::Size(5000, 1), CV_32FC3, 0, 100);
cv::Mat camera_mat = randomMat(cv::Size(3, 3), CV_32F, 0.5, 1);
ASSERT_LE(cv::norm(tvec - tvec_gold), 1e-3);
}
-INSTANTIATE_TEST_CASE_P(GPU_Calib3D, SolvePnPRansac, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_Calib3D, SolvePnPRansac, ALL_DEVICES);
#endif // HAVE_CUDA
virtual void SetUp() { cuda::setDevice(GetParam().deviceID()); }
};
-GPU_TEST_P(CompactPoints, CanCompactizeSmallInput)
+CUDA_TEST_P(CompactPoints, CanCompactizeSmallInput)
{
Mat src0(1, 3, CV_32FC2);
src0.at<Point2f>(0,0) = Point2f(0,0);
ASSERT_TRUE(src1.at<Point2f>(0,1) == Point2f(1,2));
}
-INSTANTIATE_TEST_CASE_P(GPU_GlobalMotion, CompactPoints, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_GlobalMotion, CompactPoints, ALL_DEVICES);
#endif // HAVE_CUDA
}
};
-GPU_TEST_P(SetTo, Zero)
+CUDA_TEST_P(SetTo, Zero)
{
cv::Scalar zero = cv::Scalar::all(0);
EXPECT_MAT_NEAR(cv::Mat::zeros(size, type), mat, 0.0);
}
-GPU_TEST_P(SetTo, SameVal)
+CUDA_TEST_P(SetTo, SameVal)
{
cv::Scalar val = cv::Scalar::all(randomDouble(0.0, 255.0));
}
}
-GPU_TEST_P(SetTo, DifferentVal)
+CUDA_TEST_P(SetTo, DifferentVal)
{
cv::Scalar val = randomScalar(0.0, 255.0);
}
}
-GPU_TEST_P(SetTo, Masked)
+CUDA_TEST_P(SetTo, Masked)
{
cv::Scalar val = randomScalar(0.0, 255.0);
cv::Mat mat_gold = randomMat(size, type);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_GpuMat, SetTo, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_GpuMat, SetTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_TYPES,
}
};
-GPU_TEST_P(CopyTo, WithOutMask)
+CUDA_TEST_P(CopyTo, WithOutMask)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(src, dst, 0.0);
}
-GPU_TEST_P(CopyTo, Masked)
+CUDA_TEST_P(CopyTo, Masked)
{
cv::Mat src = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_GpuMat, CopyTo, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_GpuMat, CopyTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_TYPES,
}
};
-GPU_TEST_P(ConvertTo, WithOutScaling)
+CUDA_TEST_P(ConvertTo, WithOutScaling)
{
cv::Mat src = randomMat(size, depth1);
}
}
-GPU_TEST_P(ConvertTo, WithScaling)
+CUDA_TEST_P(ConvertTo, WithScaling)
{
cv::Mat src = randomMat(size, depth1);
double a = randomDouble(0.0, 1.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_GpuMat, ConvertTo, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_GpuMat, ConvertTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(EnsureSizeIsEnough, BufferReuse)
+CUDA_TEST_P(EnsureSizeIsEnough, BufferReuse)
{
cv::cuda::GpuMat buffer(100, 100, CV_8U);
cv::cuda::GpuMat old = buffer;
EXPECT_EQ(reinterpret_cast<intptr_t>(old.data), reinterpret_cast<intptr_t>(buffer.data));
}
-INSTANTIATE_TEST_CASE_P(GPU_GpuMat, EnsureSizeIsEnough, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_GpuMat, EnsureSizeIsEnough, ALL_DEVICES);
#endif // HAVE_CUDA
}
};
-GPU_TEST_P(Labeling, DISABLED_ConnectedComponents)
+CUDA_TEST_P(Labeling, DISABLED_ConnectedComponents)
{
cv::Mat image;
cvtColor(loat_image(), image, cv::COLOR_BGR2GRAY);
host.checkCorrectness(cv::Mat(components));
}
-INSTANTIATE_TEST_CASE_P(GPU_ConnectedComponents, Labeling, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_ConnectedComponents, Labeling, ALL_DEVICES);
#endif // HAVE_CUDA
#include "test_precomp.hpp"
-CV_GPU_TEST_MAIN("gpu")
+CV_CUDA_TEST_MAIN("gpu")
};
// desabled while resize does not fixed
-GPU_TEST_P(HOG, Detect)
+CUDA_TEST_P(HOG, Detect)
{
cv::Mat img_rgb = readImage("hog/road.png");
ASSERT_FALSE(img_rgb.empty());
f.close();
}
-GPU_TEST_P(HOG, GetDescriptors)
+CUDA_TEST_P(HOG, GetDescriptors)
{
// Load image (e.g. train data, composed from windows)
cv::Mat img_rgb = readImage("hog/train_data.png");
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(5, 6)));
}
-INSTANTIATE_TEST_CASE_P(GPU_ObjDetect, HOG, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_ObjDetect, HOG, ALL_DEVICES);
//============== caltech hog tests =====================//
}
};
-GPU_TEST_P(CalTech, HOG)
+CUDA_TEST_P(CalTech, HOG)
{
cv::cuda::GpuMat d_img(img);
cv::Mat markedImage(img.clone());
}
};
-GPU_TEST_P(LBP_Read_classifier, Accuracy)
+CUDA_TEST_P(LBP_Read_classifier, Accuracy)
{
- cv::cuda::CascadeClassifier_GPU classifier;
+ cv::cuda::CascadeClassifier_CUDA classifier;
std::string classifierXmlPath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/lbpcascade_frontalface.xml";
ASSERT_TRUE(classifier.load(classifierXmlPath));
}
-INSTANTIATE_TEST_CASE_P(GPU_ObjDetect, LBP_Read_classifier,
+INSTANTIATE_TEST_CASE_P(CUDA_ObjDetect, LBP_Read_classifier,
testing::Combine(ALL_DEVICES, testing::Values<int>(0)));
}
};
-GPU_TEST_P(LBP_classify, Accuracy)
+CUDA_TEST_P(LBP_classify, Accuracy)
{
std::string classifierXmlPath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/lbpcascade_frontalface.xml";
std::string imagePath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/er.png";
for (; it != rects.end(); ++it)
cv::rectangle(markedImage, *it, cv::Scalar(255, 0, 0));
- cv::cuda::CascadeClassifier_GPU gpuClassifier;
+ cv::cuda::CascadeClassifier_CUDA gpuClassifier;
ASSERT_TRUE(gpuClassifier.load(classifierXmlPath));
cv::cuda::GpuMat gpu_rects;
(void)count;
}
-INSTANTIATE_TEST_CASE_P(GPU_ObjDetect, LBP_classify,
+INSTANTIATE_TEST_CASE_P(CUDA_ObjDetect, LBP_classify,
testing::Combine(ALL_DEVICES, testing::Values<int>(0)));
#endif // HAVE_CUDA
}
};
-GPU_TEST_P(Buffer, Constructor1)
+CUDA_TEST_P(Buffer, Constructor1)
{
cv::ogl::Buffer buf(size.height, size.width, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_EQ(type, buf.type());
}
-GPU_TEST_P(Buffer, Constructor2)
+CUDA_TEST_P(Buffer, Constructor2)
{
cv::ogl::Buffer buf(size, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_EQ(type, buf.type());
}
-GPU_TEST_P(Buffer, ConstructorFromMat)
+CUDA_TEST_P(Buffer, ConstructorFromMat)
{
cv::Mat gold = randomMat(size, type);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
-GPU_TEST_P(Buffer, ConstructorFromGpuMat)
+CUDA_TEST_P(Buffer, ConstructorFromGpuMat)
{
cv::Mat gold = randomMat(size, type);
cv::cuda::GpuMat d_gold(gold);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
-GPU_TEST_P(Buffer, ConstructorFromBuffer)
+CUDA_TEST_P(Buffer, ConstructorFromBuffer)
{
cv::ogl::Buffer buf_gold(size, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_EQ(buf_gold.type(), buf.type());
}
-GPU_TEST_P(Buffer, Create)
+CUDA_TEST_P(Buffer, Create)
{
cv::ogl::Buffer buf;
buf.create(size.height, size.width, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_EQ(type, buf.type());
}
-GPU_TEST_P(Buffer, CopyFromMat)
+CUDA_TEST_P(Buffer, CopyFromMat)
{
cv::Mat gold = randomMat(size, type);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
-GPU_TEST_P(Buffer, CopyFromGpuMat)
+CUDA_TEST_P(Buffer, CopyFromGpuMat)
{
cv::Mat gold = randomMat(size, type);
cv::cuda::GpuMat d_gold(gold);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
-GPU_TEST_P(Buffer, CopyFromBuffer)
+CUDA_TEST_P(Buffer, CopyFromBuffer)
{
cv::Mat gold = randomMat(size, type);
cv::ogl::Buffer buf_gold(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
-GPU_TEST_P(Buffer, CopyToGpuMat)
+CUDA_TEST_P(Buffer, CopyToGpuMat)
{
cv::Mat gold = randomMat(size, type);
EXPECT_MAT_NEAR(gold, dst, 0);
}
-GPU_TEST_P(Buffer, CopyToBuffer)
+CUDA_TEST_P(Buffer, CopyToBuffer)
{
cv::Mat gold = randomMat(size, type);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
-GPU_TEST_P(Buffer, Clone)
+CUDA_TEST_P(Buffer, Clone)
{
cv::Mat gold = randomMat(size, type);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
-GPU_TEST_P(Buffer, MapHostRead)
+CUDA_TEST_P(Buffer, MapHostRead)
{
cv::Mat gold = randomMat(size, type);
buf.unmapHost();
}
-GPU_TEST_P(Buffer, MapHostWrite)
+CUDA_TEST_P(Buffer, MapHostWrite)
{
cv::Mat gold = randomMat(size, type);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
-GPU_TEST_P(Buffer, MapDevice)
+CUDA_TEST_P(Buffer, MapDevice)
{
cv::Mat gold = randomMat(size, type);
}
};
-GPU_TEST_P(Texture2D, Constructor1)
+CUDA_TEST_P(Texture2D, Constructor1)
{
cv::ogl::Texture2D tex(size.height, size.width, format, true);
EXPECT_EQ(format, tex.format());
}
-GPU_TEST_P(Texture2D, Constructor2)
+CUDA_TEST_P(Texture2D, Constructor2)
{
cv::ogl::Texture2D tex(size, format, true);
EXPECT_EQ(format, tex.format());
}
-GPU_TEST_P(Texture2D, ConstructorFromMat)
+CUDA_TEST_P(Texture2D, ConstructorFromMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
-GPU_TEST_P(Texture2D, ConstructorFromGpuMat)
+CUDA_TEST_P(Texture2D, ConstructorFromGpuMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::cuda::GpuMat d_gold(gold);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
-GPU_TEST_P(Texture2D, ConstructorFromBuffer)
+CUDA_TEST_P(Texture2D, ConstructorFromBuffer)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::ogl::Buffer buf_gold(gold, cv::ogl::Buffer::PIXEL_UNPACK_BUFFER, true);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
-GPU_TEST_P(Texture2D, ConstructorFromTexture2D)
+CUDA_TEST_P(Texture2D, ConstructorFromTexture2D)
{
cv::ogl::Texture2D tex_gold(size, format, true);
cv::ogl::Texture2D tex(tex_gold);
EXPECT_EQ(tex_gold.format(), tex.format());
}
-GPU_TEST_P(Texture2D, Create)
+CUDA_TEST_P(Texture2D, Create)
{
cv::ogl::Texture2D tex;
tex.create(size.height, size.width, format, true);
EXPECT_EQ(format, tex.format());
}
-GPU_TEST_P(Texture2D, CopyFromMat)
+CUDA_TEST_P(Texture2D, CopyFromMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
-GPU_TEST_P(Texture2D, CopyFromGpuMat)
+CUDA_TEST_P(Texture2D, CopyFromGpuMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::cuda::GpuMat d_gold(gold);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
-GPU_TEST_P(Texture2D, CopyFromBuffer)
+CUDA_TEST_P(Texture2D, CopyFromBuffer)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::ogl::Buffer buf_gold(gold, cv::ogl::Buffer::PIXEL_UNPACK_BUFFER, true);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
-GPU_TEST_P(Texture2D, CopyToGpuMat)
+CUDA_TEST_P(Texture2D, CopyToGpuMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
EXPECT_MAT_NEAR(gold, dst, 1e-2);
}
-GPU_TEST_P(Texture2D, CopyToBuffer)
+CUDA_TEST_P(Texture2D, CopyToBuffer)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
ASSERT_MAT_NEAR(dst_gold, dst, 0);
}
-GPU_TEST_P(Async, MemSet)
+CUDA_TEST_P(Async, MemSet)
{
cv::cuda::Stream stream;
ASSERT_MAT_NEAR(dst_gold, dst, 0);
}
-GPU_TEST_P(Async, Convert)
+CUDA_TEST_P(Async, Convert)
{
cv::cuda::Stream stream;
stream.waitForCompletion();
}
-INSTANTIATE_TEST_CASE_P(GPU_Stream, Async, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_Stream, Async, ALL_DEVICES);
#endif // CUDART_VERSION >= 5000
cv::Mat src3(size, type);
declare.in(src3, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
declare.time(5.0);
TEST_CYCLE() cv::cuda::gemm(d_src1, d_src2, 1.0, d_src3, 1.0, dst, flags);
- GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
}
else
{
DEF_PARAM_TEST(Sz_Flags, cv::Size, DftFlags);
PERF_TEST_P(Sz_Flags, MulSpectrums,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(0, DftFlags(cv::DFT_ROWS))))
{
const cv::Size size = GET_PARAM(0);
cv::Mat b(size, CV_32FC2);
declare.in(a, b, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_a(a);
const cv::cuda::GpuMat d_b(b);
TEST_CYCLE() cv::cuda::mulSpectrums(d_a, d_b, dst, flag);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// MulAndScaleSpectrums
PERF_TEST_P(Sz, MulAndScaleSpectrums,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src2(size, CV_32FC2);
declare.in(src1,src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::mulAndScaleSpectrums(d_src1, d_src2, dst, cv::DFT_ROWS, scale, false);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// Dft
PERF_TEST_P(Sz_Flags, Dft,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(0, DftFlags(cv::DFT_ROWS), DftFlags(cv::DFT_INVERSE))))
{
declare.time(10.0);
cv::Mat src(size, CV_32FC2);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::dft(d_src, dst, size, flag);
- GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
}
else
{
DEF_PARAM_TEST(Sz_KernelSz_Ccorr, cv::Size, int, bool);
PERF_TEST_P(Sz_KernelSz_Ccorr, Convolve,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(17, 27, 32, 64),
Bool()))
{
const cv::Mat templ(templ_size, templ_size, CV_32FC1);
declare.in(image, templ, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_image = cv::cuda::createContinuous(size, CV_32FC1);
d_image.upload(image);
TEST_CYCLE() convolution->convolve(d_image, d_templ, dst, ccorr);
- GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
}
else
{
// Integral
PERF_TEST_P(Sz, Integral,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src(size, CV_8UC1);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::integral(d_src, dst, d_buf);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// IntegralSqr
PERF_TEST_P(Sz, IntegralSqr,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src(size, CV_8UC1);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst, buf;
TEST_CYCLE() cv::cuda::sqrIntegral(d_src, dst, buf);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// Merge
PERF_TEST_P(Sz_Depth_Cn, Merge,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH,
Values(2, 3, 4)))
{
declare.in(src[i], WARMUP_RNG);
}
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
std::vector<cv::cuda::GpuMat> d_src(channels);
for (int i = 0; i < channels; ++i)
TEST_CYCLE() cv::cuda::merge(d_src, dst);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
// Split
PERF_TEST_P(Sz_Depth_Cn, Split,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH,
Values(2, 3, 4)))
{
cv::Mat src(size, CV_MAKE_TYPE(depth, channels));
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
std::vector<cv::cuda::GpuMat> dst;
const cv::cuda::GpuMat& dst0 = dst[0];
const cv::cuda::GpuMat& dst1 = dst[1];
- GPU_SANITY_CHECK(dst0, 1e-10);
- GPU_SANITY_CHECK(dst1, 1e-10);
+ CUDA_SANITY_CHECK(dst0, 1e-10);
+ CUDA_SANITY_CHECK(dst1, 1e-10);
}
else
{
// Transpose
PERF_TEST_P(Sz_Type, Transpose,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8UC1, CV_8UC4, CV_16UC2, CV_16SC2, CV_32SC1, CV_32SC2, CV_64FC1)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::transpose(d_src, dst);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Cn_Code, cv::Size, MatDepth, MatCn, FlipCode);
PERF_TEST_P(Sz_Depth_Cn_Code, Flip,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
FlipCode::all()))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::flip(d_src, dst, flipCode);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// LutOneChannel
PERF_TEST_P(Sz_Type, LutOneChannel,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8UC1, CV_8UC3)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat lut(1, 256, CV_8UC1);
declare.in(lut, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::cuda::LookUpTable> lutAlg = cv::cuda::createLookUpTable(lut);
TEST_CYCLE() lutAlg->transform(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// LutMultiChannel
PERF_TEST_P(Sz_Type, LutMultiChannel,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values<MatType>(CV_8UC3)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat lut(1, 256, CV_MAKE_TYPE(CV_8U, src.channels()));
declare.in(lut, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::cuda::LookUpTable> lutAlg = cv::cuda::createLookUpTable(lut);
TEST_CYCLE() lutAlg->transform(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Cn_Border, cv::Size, MatDepth, MatCn, BorderMode);
PERF_TEST_P(Sz_Depth_Cn_Border, CopyMakeBorder,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
ALL_BORDER_MODES))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::copyMakeBorder(d_src, dst, 5, 5, 5, 5, borderMode);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// AddMat
PERF_TEST_P(Sz_Depth, AddMat,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, depth);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::add(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
// AddScalar
PERF_TEST_P(Sz_Depth, AddScalar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH))
{
const cv::Size size = GET_PARAM(0);
cv::Scalar s;
declare.in(s, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::add(d_src, s, dst);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
// SubtractMat
PERF_TEST_P(Sz_Depth, SubtractMat,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, depth);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::subtract(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
// SubtractScalar
PERF_TEST_P(Sz_Depth, SubtractScalar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH))
{
const cv::Size size = GET_PARAM(0);
cv::Scalar s;
declare.in(s, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::subtract(d_src, s, dst);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
// MultiplyMat
PERF_TEST_P(Sz_Depth, MultiplyMat,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, depth);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::multiply(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst, 1e-6);
+ CUDA_SANITY_CHECK(dst, 1e-6);
}
else
{
// MultiplyScalar
PERF_TEST_P(Sz_Depth, MultiplyScalar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH))
{
const cv::Size size = GET_PARAM(0);
cv::Scalar s;
declare.in(s, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::multiply(d_src, s, dst);
- GPU_SANITY_CHECK(dst, 1e-6);
+ CUDA_SANITY_CHECK(dst, 1e-6);
}
else
{
// DivideMat
PERF_TEST_P(Sz_Depth, DivideMat,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, depth);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::divide(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst, 1e-6);
+ CUDA_SANITY_CHECK(dst, 1e-6);
}
else
{
// DivideScalar
PERF_TEST_P(Sz_Depth, DivideScalar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH))
{
const cv::Size size = GET_PARAM(0);
cv::Scalar s;
declare.in(s, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::divide(d_src, s, dst);
- GPU_SANITY_CHECK(dst, 1e-6);
+ CUDA_SANITY_CHECK(dst, 1e-6);
}
else
{
// DivideScalarInv
PERF_TEST_P(Sz_Depth, DivideScalarInv,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH))
{
const cv::Size size = GET_PARAM(0);
cv::Scalar s;
declare.in(s, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::divide(s[0], d_src, dst);
- GPU_SANITY_CHECK(dst, 1e-6);
+ CUDA_SANITY_CHECK(dst, 1e-6);
}
else
{
// AbsDiffMat
PERF_TEST_P(Sz_Depth, AbsDiffMat,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, depth);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::absdiff(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
// AbsDiffScalar
PERF_TEST_P(Sz_Depth, AbsDiffScalar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH))
{
const cv::Size size = GET_PARAM(0);
cv::Scalar s;
declare.in(s, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::absdiff(d_src, s, dst);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
// Abs
PERF_TEST_P(Sz_Depth, Abs,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_16S, CV_32F)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, depth);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::abs(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// Sqr
PERF_TEST_P(Sz_Depth, Sqr,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16S, CV_32F)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, depth);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::sqr(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// Sqrt
PERF_TEST_P(Sz_Depth, Sqrt,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16S, CV_32F)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, depth);
cv::randu(src, 0, 100000);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::sqrt(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// Log
PERF_TEST_P(Sz_Depth, Log,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16S, CV_32F)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, depth);
cv::randu(src, 0, 100000);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::log(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// Exp
PERF_TEST_P(Sz_Depth, Exp,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16S, CV_32F)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, depth);
cv::randu(src, 0, 10);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::exp(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Power, cv::Size, MatDepth, double);
PERF_TEST_P(Sz_Depth_Power, Pow,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16S, CV_32F),
Values(0.3, 2.0, 2.4)))
{
cv::Mat src(size, depth);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::pow(d_src, power, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Code, cv::Size, MatDepth, CmpCode);
PERF_TEST_P(Sz_Depth_Code, CompareMat,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH,
CmpCode::all()))
{
cv::Mat src2(size, depth);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::compare(d_src1, d_src2, dst, cmp_code);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// CompareScalar
PERF_TEST_P(Sz_Depth_Code, CompareScalar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
ARITHM_MAT_DEPTH,
CmpCode::all()))
{
cv::Scalar s;
declare.in(s, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::compare(d_src, s, dst, cmp_code);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// BitwiseNot
PERF_TEST_P(Sz_Depth, BitwiseNot,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32S)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, depth);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::bitwise_not(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// BitwiseAndMat
PERF_TEST_P(Sz_Depth, BitwiseAndMat,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32S)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, depth);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::bitwise_and(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// BitwiseAndScalar
PERF_TEST_P(Sz_Depth_Cn, BitwiseAndScalar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32S),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
declare.in(s, WARMUP_RNG);
cv::Scalar_<int> is = s;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::bitwise_and(d_src, is, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// BitwiseOrMat
PERF_TEST_P(Sz_Depth, BitwiseOrMat,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32S)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, depth);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::bitwise_or(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// BitwiseOrScalar
PERF_TEST_P(Sz_Depth_Cn, BitwiseOrScalar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32S),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
declare.in(s, WARMUP_RNG);
cv::Scalar_<int> is = s;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::bitwise_or(d_src, is, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// BitwiseXorMat
PERF_TEST_P(Sz_Depth, BitwiseXorMat,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32S)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, depth);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::bitwise_xor(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// BitwiseXorScalar
PERF_TEST_P(Sz_Depth_Cn, BitwiseXorScalar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32S),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
declare.in(s, WARMUP_RNG);
cv::Scalar_<int> is = s;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::bitwise_xor(d_src, is, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// RShift
PERF_TEST_P(Sz_Depth_Cn, RShift,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32S),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const cv::Scalar_<int> val = cv::Scalar_<int>::all(4);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::rshift(d_src, val, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// LShift
PERF_TEST_P(Sz_Depth_Cn, LShift,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32S),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const cv::Scalar_<int> val = cv::Scalar_<int>::all(4);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::lshift(d_src, val, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// MinMat
PERF_TEST_P(Sz_Depth, MinMat,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, depth);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::min(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// MinScalar
PERF_TEST_P(Sz_Depth, MinScalar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F)))
{
const cv::Size size = GET_PARAM(0);
cv::Scalar val;
declare.in(val, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::min(d_src, val[0], dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// MaxMat
PERF_TEST_P(Sz_Depth, MaxMat,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, depth);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::max(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// MaxScalar
PERF_TEST_P(Sz_Depth, MaxScalar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F)))
{
const cv::Size size = GET_PARAM(0);
cv::Scalar val;
declare.in(val, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::max(d_src, val[0], dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_3Depth, cv::Size, MatDepth, MatDepth, MatDepth);
PERF_TEST_P(Sz_3Depth, AddWeighted,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
Values(CV_8U, CV_16U, CV_32F, CV_64F),
Values(CV_8U, CV_16U, CV_32F, CV_64F)))
cv::Mat src2(size, depth2);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::addWeighted(d_src1, 0.5, d_src2, 0.5, 10.0, dst, dst_depth);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
// MagnitudeComplex
PERF_TEST_P(Sz, MagnitudeComplex,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src(size, CV_32FC2);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::magnitude(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// MagnitudeSqrComplex
PERF_TEST_P(Sz, MagnitudeSqrComplex,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src(size, CV_32FC2);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::magnitudeSqr(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// Magnitude
PERF_TEST_P(Sz, Magnitude,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src2(size, CV_32FC1);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::magnitude(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// MagnitudeSqr
PERF_TEST_P(Sz, MagnitudeSqr,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src2(size, CV_32FC1);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::magnitudeSqr(d_src1, d_src2, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_AngleInDegrees, cv::Size, bool);
PERF_TEST_P(Sz_AngleInDegrees, Phase,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Bool()))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, CV_32FC1);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::phase(d_src1, d_src2, dst, angleInDegrees);
- GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
}
else
{
// CartToPolar
PERF_TEST_P(Sz_AngleInDegrees, CartToPolar,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Bool()))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, CV_32FC1);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
TEST_CYCLE() cv::cuda::cartToPolar(d_src1, d_src2, magnitude, angle, angleInDegrees);
- GPU_SANITY_CHECK(magnitude);
- GPU_SANITY_CHECK(angle, 1e-6, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(magnitude);
+ CUDA_SANITY_CHECK(angle, 1e-6, ERROR_RELATIVE);
}
else
{
// PolarToCart
PERF_TEST_P(Sz_AngleInDegrees, PolarToCart,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Bool()))
{
const cv::Size size = GET_PARAM(0);
cv::Mat angle(size, CV_32FC1);
declare.in(angle, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_magnitude(magnitude);
const cv::cuda::GpuMat d_angle(angle);
TEST_CYCLE() cv::cuda::polarToCart(d_magnitude, d_angle, x, y, angleInDegrees);
- GPU_SANITY_CHECK(x);
- GPU_SANITY_CHECK(y);
+ CUDA_SANITY_CHECK(x);
+ CUDA_SANITY_CHECK(y);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Op, cv::Size, MatDepth, ThreshOp);
PERF_TEST_P(Sz_Depth_Op, Threshold,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
ThreshOp::all()))
{
cv::Mat src(size, depth);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::threshold(d_src, dst, 100.0, 255.0, threshOp);
- GPU_SANITY_CHECK(dst, 1e-10);
+ CUDA_SANITY_CHECK(dst, 1e-10);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Norm, cv::Size, MatDepth, NormType);
PERF_TEST_P(Sz_Depth_Norm, Norm,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32S, CV_32F),
Values(NormType(cv::NORM_INF), NormType(cv::NORM_L1), NormType(cv::NORM_L2))))
{
else
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
DEF_PARAM_TEST(Sz_Norm, cv::Size, NormType);
PERF_TEST_P(Sz_Norm, NormDiff,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(NormType(cv::NORM_INF), NormType(cv::NORM_L1), NormType(cv::NORM_L2))))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src2(size, CV_8UC1);
declare.in(src2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
// Sum
PERF_TEST_P(Sz_Depth_Cn, Sum,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
// SumAbs
PERF_TEST_P(Sz_Depth_Cn, SumAbs,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
// SumSqr
PERF_TEST_P(Sz_Depth_Cn, SumSqr,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values<MatDepth>(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
// MinMax
PERF_TEST_P(Sz_Depth, MinMax,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F)))
{
const cv::Size size = GET_PARAM(0);
else
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
// MinMaxLoc
PERF_TEST_P(Sz_Depth, MinMaxLoc,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F)))
{
const cv::Size size = GET_PARAM(0);
else
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_valbuf, d_locbuf;
// CountNonZero
PERF_TEST_P(Sz_Depth, CountNonZero,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, depth);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
DEF_PARAM_TEST(Sz_Depth_Cn_Code_Dim, cv::Size, MatDepth, MatCn, ReduceCode, ReduceDim);
PERF_TEST_P(Sz_Depth_Cn_Code_Dim, Reduce,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_16S, CV_32F),
Values(1, 2, 3, 4),
ReduceCode::all(),
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::reduce(d_src, dst, dim, reduceOp);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Depth_NormType, cv::Size, MatDepth, NormType);
PERF_TEST_P(Sz_Depth_NormType, Normalize,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
Values(NormType(cv::NORM_INF),
NormType(cv::NORM_L1),
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::normalize(d_src, dst, alpha, beta, norm_type, type, cv::cuda::GpuMat(), d_norm_buf, d_cvt_buf);
- GPU_SANITY_CHECK(dst, 1e-6);
+ CUDA_SANITY_CHECK(dst, 1e-6);
}
else
{
// MeanStdDev
PERF_TEST_P(Sz, MeanStdDev,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
}
};
-GPU_TEST_P(GEMM, Accuracy)
+CUDA_TEST_P(GEMM, Accuracy)
{
cv::Mat src1 = randomMat(size, type, -10.0, 10.0);
cv::Mat src2 = randomMat(size, type, -10.0, 10.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, GEMM, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, GEMM, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_32FC1), MatType(CV_32FC2), MatType(CV_64FC1), MatType(CV_64FC2)),
}
};
-GPU_TEST_P(Integral, Accuracy)
+CUDA_TEST_P(Integral, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Integral, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Integral, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
}
};
-GPU_TEST_P(MulSpectrums, Simple)
+CUDA_TEST_P(MulSpectrums, Simple)
{
cv::cuda::GpuMat c;
cv::cuda::mulSpectrums(loadMat(a), loadMat(b), c, flag, false);
EXPECT_MAT_NEAR(c_gold, c, 1e-2);
}
-GPU_TEST_P(MulSpectrums, Scaled)
+CUDA_TEST_P(MulSpectrums, Scaled)
{
float scale = 1.f / size.area();
EXPECT_MAT_NEAR(c_gold, c, 1e-2);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, MulSpectrums, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MulSpectrums, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(DftFlags(0), DftFlags(cv::DFT_ROWS))));
}
}
-GPU_TEST_P(Dft, C2C)
+CUDA_TEST_P(Dft, C2C)
{
int cols = randomInt(2, 100);
int rows = randomInt(2, 100);
}
}
-GPU_TEST_P(Dft, R2CThenC2R)
+CUDA_TEST_P(Dft, R2CThenC2R)
{
int cols = randomInt(2, 100);
int rows = randomInt(2, 100);
testR2CThenC2R("single row 1", cols + 1, 1, true);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Dft, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Dft, ALL_DEVICES);
////////////////////////////////////////////////////////
// Convolve
}
};
-GPU_TEST_P(Convolve, Accuracy)
+CUDA_TEST_P(Convolve, Accuracy)
{
cv::Mat src = randomMat(size, CV_32FC1, 0.0, 100.0);
cv::Mat kernel = randomMat(cv::Size(ksize, ksize), CV_32FC1, 0.0, 1.0);
EXPECT_MAT_NEAR(dst, dst_gold, 1e-1);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Convolve, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Convolve, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(KSize(3), KSize(7), KSize(11), KSize(17), KSize(19), KSize(23), KSize(45)),
}
};
-GPU_TEST_P(Merge, Accuracy)
+CUDA_TEST_P(Merge, Accuracy)
{
std::vector<cv::Mat> src;
src.reserve(channels);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Merge, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Merge, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(Split, Accuracy)
+CUDA_TEST_P(Split, Accuracy)
{
cv::Mat src = randomMat(size, type);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Split, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Split, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(Transpose, Accuracy)
+CUDA_TEST_P(Transpose, Accuracy)
{
cv::Mat src = randomMat(size, type);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Transpose, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Transpose, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1),
}
};
-GPU_TEST_P(Flip, Accuracy)
+CUDA_TEST_P(Flip, Accuracy)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Flip, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Flip, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1),
}
};
-GPU_TEST_P(LUT, OneChannel)
+CUDA_TEST_P(LUT, OneChannel)
{
cv::Mat src = randomMat(size, type);
cv::Mat lut = randomMat(cv::Size(256, 1), CV_8UC1);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(LUT, MultiChannel)
+CUDA_TEST_P(LUT, MultiChannel)
{
cv::Mat src = randomMat(size, type);
cv::Mat lut = randomMat(cv::Size(256, 1), CV_MAKE_TYPE(CV_8U, src.channels()));
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, LUT, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, LUT, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3)),
}
};
-GPU_TEST_P(CopyMakeBorder, Accuracy)
+CUDA_TEST_P(CopyMakeBorder, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Scalar val = randomScalar(0, 255);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, CopyMakeBorder, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, CopyMakeBorder, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1),
}
};
-GPU_TEST_P(Add_Array, Accuracy)
+CUDA_TEST_P(Add_Array, Accuracy)
{
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Add_Array, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Array, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Add_Array_Mask, Accuracy)
+CUDA_TEST_P(Add_Array_Mask, Accuracy)
{
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Add_Array_Mask, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Array_Mask, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Add_Scalar, WithOutMask)
+CUDA_TEST_P(Add_Scalar, WithOutMask)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
}
-GPU_TEST_P(Add_Scalar, WithMask)
+CUDA_TEST_P(Add_Scalar, WithMask)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Add_Scalar, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Scalar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Add_Scalar_First, WithOutMask)
+CUDA_TEST_P(Add_Scalar_First, WithOutMask)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
}
-GPU_TEST_P(Add_Scalar_First, WithMask)
+CUDA_TEST_P(Add_Scalar_First, WithMask)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Add_Scalar_First, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Scalar_First, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Subtract_Array, Accuracy)
+CUDA_TEST_P(Subtract_Array, Accuracy)
{
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Subtract_Array, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Array, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Subtract_Array_Mask, Accuracy)
+CUDA_TEST_P(Subtract_Array_Mask, Accuracy)
{
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Subtract_Array_Mask, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Array_Mask, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Subtract_Scalar, WithOutMask)
+CUDA_TEST_P(Subtract_Scalar, WithOutMask)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
}
-GPU_TEST_P(Subtract_Scalar, WithMask)
+CUDA_TEST_P(Subtract_Scalar, WithMask)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Subtract_Scalar, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Scalar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Subtract_Scalar_First, WithOutMask)
+CUDA_TEST_P(Subtract_Scalar_First, WithOutMask)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
}
-GPU_TEST_P(Subtract_Scalar_First, WithMask)
+CUDA_TEST_P(Subtract_Scalar_First, WithMask)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Subtract_Scalar_First, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Scalar_First, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Multiply_Array, WithOutScale)
+CUDA_TEST_P(Multiply_Array, WithOutScale)
{
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype);
}
}
-GPU_TEST_P(Multiply_Array, WithScale)
+CUDA_TEST_P(Multiply_Array, WithScale)
{
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Multiply_Array, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Array, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Multiply_Array_Special, Case_8UC4x_32FC1)
+CUDA_TEST_P(Multiply_Array_Special, Case_8UC4x_32FC1)
{
cv::Mat mat1 = randomMat(size, CV_8UC4);
cv::Mat mat2 = randomMat(size, CV_32FC1);
}
}
-GPU_TEST_P(Multiply_Array_Special, Case_16SC4x_32FC1)
+CUDA_TEST_P(Multiply_Array_Special, Case_16SC4x_32FC1)
{
cv::Mat mat1 = randomMat(size, CV_16SC4);
cv::Mat mat2 = randomMat(size, CV_32FC1);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Multiply_Array_Special, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Array_Special, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
}
};
-GPU_TEST_P(Multiply_Scalar, WithOutScale)
+CUDA_TEST_P(Multiply_Scalar, WithOutScale)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
-GPU_TEST_P(Multiply_Scalar, WithScale)
+CUDA_TEST_P(Multiply_Scalar, WithScale)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Multiply_Scalar, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Scalar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Multiply_Scalar_First, WithOutScale)
+CUDA_TEST_P(Multiply_Scalar_First, WithOutScale)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
-GPU_TEST_P(Multiply_Scalar_First, WithScale)
+CUDA_TEST_P(Multiply_Scalar_First, WithScale)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(0, 255);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Multiply_Scalar_First, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Scalar_First, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Divide_Array, WithOutScale)
+CUDA_TEST_P(Divide_Array, WithOutScale)
{
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype, 1.0, 255.0);
}
}
-GPU_TEST_P(Divide_Array, WithScale)
+CUDA_TEST_P(Divide_Array, WithScale)
{
cv::Mat mat1 = randomMat(size, stype);
cv::Mat mat2 = randomMat(size, stype, 1.0, 255.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Divide_Array, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Array, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Divide_Array_Special, Case_8UC4x_32FC1)
+CUDA_TEST_P(Divide_Array_Special, Case_8UC4x_32FC1)
{
cv::Mat mat1 = randomMat(size, CV_8UC4);
cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0);
}
}
-GPU_TEST_P(Divide_Array_Special, Case_16SC4x_32FC1)
+CUDA_TEST_P(Divide_Array_Special, Case_16SC4x_32FC1)
{
cv::Mat mat1 = randomMat(size, CV_16SC4);
cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Divide_Array_Special, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Array_Special, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
}
};
-GPU_TEST_P(Divide_Scalar, WithOutScale)
+CUDA_TEST_P(Divide_Scalar, WithOutScale)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(1.0, 255.0);
}
}
-GPU_TEST_P(Divide_Scalar, WithScale)
+CUDA_TEST_P(Divide_Scalar, WithScale)
{
cv::Mat mat = randomMat(size, depth.first);
cv::Scalar val = randomScalar(1.0, 255.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Divide_Scalar, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Scalar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(Divide_Scalar_First, Accuracy)
+CUDA_TEST_P(Divide_Scalar_First, Accuracy)
{
double scale = randomDouble(0.0, 255.0);
cv::Mat mat = randomMat(size, depth.first, 1.0, 255.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Divide_Scalar_First, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Scalar_First, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DEPTH_PAIRS,
}
};
-GPU_TEST_P(AbsDiff, Array)
+CUDA_TEST_P(AbsDiff, Array)
{
cv::Mat src1 = randomMat(size, depth);
cv::Mat src2 = randomMat(size, depth);
}
}
-GPU_TEST_P(AbsDiff, Scalar)
+CUDA_TEST_P(AbsDiff, Scalar)
{
cv::Mat src = randomMat(size, depth);
cv::Scalar val = randomScalar(0.0, 255.0);
}
}
-GPU_TEST_P(AbsDiff, Scalar_First)
+CUDA_TEST_P(AbsDiff, Scalar_First)
{
cv::Mat src = randomMat(size, depth);
cv::Scalar val = randomScalar(0.0, 255.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, AbsDiff, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, AbsDiff, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(Abs, Accuracy)
+CUDA_TEST_P(Abs, Accuracy)
{
cv::Mat src = randomMat(size, depth);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Abs, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Abs, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_16S), MatDepth(CV_32F)),
}
};
-GPU_TEST_P(Sqr, Accuracy)
+CUDA_TEST_P(Sqr, Accuracy)
{
cv::Mat src = randomMat(size, depth, 0, depth == CV_8U ? 16 : 255);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Sqr, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Sqr, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U),
}
};
-GPU_TEST_P(Sqrt, Accuracy)
+CUDA_TEST_P(Sqrt, Accuracy)
{
cv::Mat src = randomMat(size, depth);
EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-5);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Sqrt, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Sqrt, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U),
}
};
-GPU_TEST_P(Log, Accuracy)
+CUDA_TEST_P(Log, Accuracy)
{
cv::Mat src = randomMat(size, depth, 1.0, 255.0);
EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-6);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Log, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Log, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U),
}
};
-GPU_TEST_P(Exp, Accuracy)
+CUDA_TEST_P(Exp, Accuracy)
{
cv::Mat src = randomMat(size, depth, 0.0, 10.0);
EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-2);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Exp, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Exp, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U),
}
};
-GPU_TEST_P(Pow, Accuracy)
+CUDA_TEST_P(Pow, Accuracy)
{
cv::Mat src = randomMat(size, depth, 0.0, 10.0);
double power = randomDouble(2.0, 4.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Pow, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Pow, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(Compare_Array, Accuracy)
+CUDA_TEST_P(Compare_Array, Accuracy)
{
cv::Mat src1 = randomMat(size, depth);
cv::Mat src2 = randomMat(size, depth);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Compare_Array, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Compare_Array, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(Compare_Scalar, Accuracy)
+CUDA_TEST_P(Compare_Scalar, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Scalar sc = randomScalar(0.0, 255.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Compare_Scalar, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Compare_Scalar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
TYPES(CV_8U, CV_64F, 1, 4),
}
};
-GPU_TEST_P(Bitwise_Array, Not)
+CUDA_TEST_P(Bitwise_Array, Not)
{
cv::cuda::GpuMat dst;
cv::cuda::bitwise_not(loadMat(src1), dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(Bitwise_Array, Or)
+CUDA_TEST_P(Bitwise_Array, Or)
{
cv::cuda::GpuMat dst;
cv::cuda::bitwise_or(loadMat(src1), loadMat(src2), dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(Bitwise_Array, And)
+CUDA_TEST_P(Bitwise_Array, And)
{
cv::cuda::GpuMat dst;
cv::cuda::bitwise_and(loadMat(src1), loadMat(src2), dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(Bitwise_Array, Xor)
+CUDA_TEST_P(Bitwise_Array, Xor)
{
cv::cuda::GpuMat dst;
cv::cuda::bitwise_xor(loadMat(src1), loadMat(src2), dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Bitwise_Array, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Bitwise_Array, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
TYPES(CV_8U, CV_32S, 1, 4)));
}
};
-GPU_TEST_P(Bitwise_Scalar, Or)
+CUDA_TEST_P(Bitwise_Scalar, Or)
{
cv::cuda::GpuMat dst;
cv::cuda::bitwise_or(loadMat(src), val, dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(Bitwise_Scalar, And)
+CUDA_TEST_P(Bitwise_Scalar, And)
{
cv::cuda::GpuMat dst;
cv::cuda::bitwise_and(loadMat(src), val, dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(Bitwise_Scalar, Xor)
+CUDA_TEST_P(Bitwise_Scalar, Xor)
{
cv::cuda::GpuMat dst;
cv::cuda::bitwise_xor(loadMat(src), val, dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Bitwise_Scalar, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Bitwise_Scalar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32S)),
}
};
-GPU_TEST_P(RShift, Accuracy)
+CUDA_TEST_P(RShift, Accuracy)
{
int type = CV_MAKE_TYPE(depth, channels);
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, RShift, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, RShift, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U),
}
};
-GPU_TEST_P(LShift, Accuracy)
+CUDA_TEST_P(LShift, Accuracy)
{
int type = CV_MAKE_TYPE(depth, channels);
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, LShift, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, LShift, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32S)),
}
};
-GPU_TEST_P(Min, Array)
+CUDA_TEST_P(Min, Array)
{
cv::Mat src1 = randomMat(size, depth);
cv::Mat src2 = randomMat(size, depth);
}
}
-GPU_TEST_P(Min, Scalar)
+CUDA_TEST_P(Min, Scalar)
{
cv::Mat src = randomMat(size, depth);
double val = randomDouble(0.0, 255.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Min, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Min, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(Max, Array)
+CUDA_TEST_P(Max, Array)
{
cv::Mat src1 = randomMat(size, depth);
cv::Mat src2 = randomMat(size, depth);
}
}
-GPU_TEST_P(Max, Scalar)
+CUDA_TEST_P(Max, Scalar)
{
cv::Mat src = randomMat(size, depth);
double val = randomDouble(0.0, 255.0);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Max, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Max, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(AddWeighted, Accuracy)
+CUDA_TEST_P(AddWeighted, Accuracy)
{
cv::Mat src1 = randomMat(size, depth1);
cv::Mat src2 = randomMat(size, depth2);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, AddWeighted, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, AddWeighted, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(Threshold, Accuracy)
+CUDA_TEST_P(Threshold, Accuracy)
{
cv::Mat src = randomMat(size, type);
double maxVal = randomDouble(20.0, 127.0);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Threshold, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Threshold, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_16SC1), MatType(CV_32FC1)),
}
};
-GPU_TEST_P(Magnitude, NPP)
+CUDA_TEST_P(Magnitude, NPP)
{
cv::Mat src = randomMat(size, CV_32FC2);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-4);
}
-GPU_TEST_P(Magnitude, Sqr_NPP)
+CUDA_TEST_P(Magnitude, Sqr_NPP)
{
cv::Mat src = randomMat(size, CV_32FC2);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-1);
}
-GPU_TEST_P(Magnitude, Accuracy)
+CUDA_TEST_P(Magnitude, Accuracy)
{
cv::Mat x = randomMat(size, CV_32FC1);
cv::Mat y = randomMat(size, CV_32FC1);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-4);
}
-GPU_TEST_P(Magnitude, Sqr_Accuracy)
+CUDA_TEST_P(Magnitude, Sqr_Accuracy)
{
cv::Mat x = randomMat(size, CV_32FC1);
cv::Mat y = randomMat(size, CV_32FC1);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-1);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Magnitude, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Magnitude, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
}
};
-GPU_TEST_P(Phase, Accuracy)
+CUDA_TEST_P(Phase, Accuracy)
{
cv::Mat x = randomMat(size, CV_32FC1);
cv::Mat y = randomMat(size, CV_32FC1);
EXPECT_MAT_NEAR(dst_gold, dst, angleInDegrees ? 1e-2 : 1e-3);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Phase, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Phase, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(AngleInDegrees(false), AngleInDegrees(true)),
}
};
-GPU_TEST_P(CartToPolar, Accuracy)
+CUDA_TEST_P(CartToPolar, Accuracy)
{
cv::Mat x = randomMat(size, CV_32FC1);
cv::Mat y = randomMat(size, CV_32FC1);
EXPECT_MAT_NEAR(angle_gold, angle, angleInDegrees ? 1e-2 : 1e-3);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, CartToPolar, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, CartToPolar, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(AngleInDegrees(false), AngleInDegrees(true)),
}
};
-GPU_TEST_P(PolarToCart, Accuracy)
+CUDA_TEST_P(PolarToCart, Accuracy)
{
cv::Mat magnitude = randomMat(size, CV_32FC1);
cv::Mat angle = randomMat(size, CV_32FC1);
EXPECT_MAT_NEAR(y_gold, y, 1e-4);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, PolarToCart, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, PolarToCart, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(AngleInDegrees(false), AngleInDegrees(true)),
#include "test_precomp.hpp"
-CV_GPU_TEST_MAIN("gpu")
+CV_CUDA_TEST_MAIN("gpu")
}
};
-GPU_TEST_P(Norm, Accuracy)
+CUDA_TEST_P(Norm, Accuracy)
{
cv::Mat src = randomMat(size, depth);
cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
EXPECT_NEAR(val_gold, val, depth < CV_32F ? 0.0 : 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Norm, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Norm, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U),
}
};
-GPU_TEST_P(NormDiff, Accuracy)
+CUDA_TEST_P(NormDiff, Accuracy)
{
cv::Mat src1 = randomMat(size, CV_8UC1);
cv::Mat src2 = randomMat(size, CV_8UC1);
EXPECT_NEAR(val_gold, val, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, NormDiff, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, NormDiff, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF)),
}
};
-GPU_TEST_P(Sum, Simple)
+CUDA_TEST_P(Sum, Simple)
{
cv::Scalar val = cv::cuda::sum(loadMat(src, useRoi));
EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
}
-GPU_TEST_P(Sum, Abs)
+CUDA_TEST_P(Sum, Abs)
{
cv::Scalar val = cv::cuda::absSum(loadMat(src, useRoi));
EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
}
-GPU_TEST_P(Sum, Sqr)
+CUDA_TEST_P(Sum, Sqr)
{
cv::Scalar val = cv::cuda::sqrSum(loadMat(src, useRoi));
EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Sum, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Sum, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
TYPES(CV_8U, CV_64F, 1, 4),
}
};
-GPU_TEST_P(MinMax, WithoutMask)
+CUDA_TEST_P(MinMax, WithoutMask)
{
cv::Mat src = randomMat(size, depth);
}
}
-GPU_TEST_P(MinMax, WithMask)
+CUDA_TEST_P(MinMax, WithMask)
{
cv::Mat src = randomMat(size, depth);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
}
}
-GPU_TEST_P(MinMax, NullPtr)
+CUDA_TEST_P(MinMax, NullPtr)
{
cv::Mat src = randomMat(size, depth);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, MinMax, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MinMax, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(MinMaxLoc, WithoutMask)
+CUDA_TEST_P(MinMaxLoc, WithoutMask)
{
cv::Mat src = randomMat(size, depth);
}
}
-GPU_TEST_P(MinMaxLoc, WithMask)
+CUDA_TEST_P(MinMaxLoc, WithMask)
{
cv::Mat src = randomMat(size, depth);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
}
}
-GPU_TEST_P(MinMaxLoc, NullPtr)
+CUDA_TEST_P(MinMaxLoc, NullPtr)
{
cv::Mat src = randomMat(size, depth);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, MinMaxLoc, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MinMaxLoc, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(CountNonZero, Accuracy)
+CUDA_TEST_P(CountNonZero, Accuracy)
{
cv::Mat srcBase = randomMat(size, CV_8U, 0.0, 1.5);
cv::Mat src;
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, CountNonZero, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, CountNonZero, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
};
-GPU_TEST_P(Reduce, Rows)
+CUDA_TEST_P(Reduce, Rows)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 0.02);
}
-GPU_TEST_P(Reduce, Cols)
+CUDA_TEST_P(Reduce, Cols)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 0.02);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Reduce, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Reduce, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U),
};
-GPU_TEST_P(Normalize, WithOutMask)
+CUDA_TEST_P(Normalize, WithOutMask)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
}
-GPU_TEST_P(Normalize, WithMask)
+CUDA_TEST_P(Normalize, WithMask)
{
cv::Mat src = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, Normalize, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Normalize, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
}
};
-GPU_TEST_P(MeanStdDev, Accuracy)
+CUDA_TEST_P(MeanStdDev, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Arithm, MeanStdDev, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MeanStdDev, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
cap >> frame;
ASSERT_FALSE(frame.empty());
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_frame(frame), foreground;
stopTimer();
}
- GPU_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
#ifdef HAVE_OPENCV_CUDAIMGPROC
cv::cuda::GpuMat background3, background;
d_fgd->getBackgroundImage(background3);
cv::cuda::cvtColor(background3, background, cv::COLOR_BGR2BGRA);
- GPU_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
#endif
}
else
PERF_TEST_P(Video_Cn_LearningRate, MOG,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
Values(0.0, 0.01)))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
cv::swap(temp, frame);
}
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::BackgroundSubtractor> d_mog = cv::cuda::createBackgroundSubtractorMOG();
stopTimer();
}
- GPU_SANITY_CHECK(foreground);
+ CUDA_SANITY_CHECK(foreground);
}
else
{
PERF_TEST_P(Video_Cn, MOG2,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
cv::swap(temp, frame);
}
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::BackgroundSubtractorMOG2> d_mog2 = cv::cuda::createBackgroundSubtractorMOG2();
d_mog2->setDetectShadows(false);
stopTimer();
}
- GPU_SANITY_CHECK(foreground);
+ CUDA_SANITY_CHECK(foreground);
}
else
{
PERF_TEST_P(Video_Cn, MOG2GetBackgroundImage,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
cv::Mat frame;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::BackgroundSubtractor> d_mog2 = cv::cuda::createBackgroundSubtractorMOG2();
TEST_CYCLE() d_mog2->getBackgroundImage(background);
- GPU_SANITY_CHECK(background, 1);
+ CUDA_SANITY_CHECK(background, 1);
}
else
{
PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
Combine(Values(string("gpu/video/768x576.avi")),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
Values(20, 40, 60)))
{
const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
cv::swap(temp, frame);
}
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_frame(frame);
cv::cuda::GpuMat foreground;
stopTimer();
}
- GPU_SANITY_CHECK(foreground);
+ CUDA_SANITY_CHECK(foreground);
}
else
{
}
};
-GPU_TEST_P(FGDStatModel, Update)
+CUDA_TEST_P(FGDStatModel, Update)
{
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
}
}
-INSTANTIATE_TEST_CASE_P(GPU_BgSegm, FGDStatModel, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_BgSegm, FGDStatModel, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi"))));
}
};
-GPU_TEST_P(MOG, Update)
+CUDA_TEST_P(MOG, Update)
{
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
}
}
-INSTANTIATE_TEST_CASE_P(GPU_BgSegm, MOG, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_BgSegm, MOG, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi")),
testing::Values(UseGray(true), UseGray(false)),
}
};
-GPU_TEST_P(MOG2, Update)
+CUDA_TEST_P(MOG2, Update)
{
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
}
}
-GPU_TEST_P(MOG2, getBackgroundImage)
+CUDA_TEST_P(MOG2, getBackgroundImage)
{
if (useGray)
return;
ASSERT_MAT_NEAR(background_gold, background, 0);
}
-INSTANTIATE_TEST_CASE_P(GPU_BgSegm, MOG2, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_BgSegm, MOG2, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi")),
testing::Values(UseGray(true), UseGray(false)),
{
};
-GPU_TEST_P(GMG, Accuracy)
+CUDA_TEST_P(GMG, Accuracy)
{
const cv::cuda::DeviceInfo devInfo = GET_PARAM(0);
cv::cuda::setDevice(devInfo.deviceID());
ASSERT_MAT_NEAR(fullfg, d_fgmask, 0);
}
-INSTANTIATE_TEST_CASE_P(GPU_BgSegm, GMG, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_BgSegm, GMG, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8U), MatType(CV_16U), MatType(CV_32F)),
#include "test_precomp.hpp"
-CV_GPU_TEST_MAIN("gpu")
+CV_CUDA_TEST_MAIN("gpu")
const string inputFile = perf::TestBase::getDataPath(GetParam());
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::cudacodec::VideoReader> d_reader = cv::cudacodec::createVideoReader(inputFile);
TEST_CYCLE_N(10) d_reader->nextFrame(frame);
- GPU_SANITY_CHECK(frame);
+ CUDA_SANITY_CHECK(frame);
}
else
{
cv::Mat frame;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::cudacodec::VideoWriter> d_writer;
#include "test_precomp.hpp"
-CV_GPU_TEST_MAIN("gpu")
+CV_CUDA_TEST_MAIN("gpu")
//////////////////////////////////////////////////////
// VideoReader
-GPU_TEST_P(Video, Reader)
+CUDA_TEST_P(Video, Reader)
{
cv::cuda::setDevice(GET_PARAM(0).deviceID());
#ifdef WIN32
-GPU_TEST_P(Video, Writer)
+CUDA_TEST_P(Video, Writer)
{
cv::cuda::setDevice(GET_PARAM(0).deviceID());
#endif // WIN32
-INSTANTIATE_TEST_CASE_P(GPU_Codec, Video, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Codec, Video, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi"), std::string("1920x1080.avi"))));
namespace cv { namespace cuda {
-class CV_EXPORTS BFMatcher_GPU
+class CV_EXPORTS BFMatcher_CUDA
{
public:
- explicit BFMatcher_GPU(int norm = cv::NORM_L2);
+ explicit BFMatcher_CUDA(int norm = cv::NORM_L2);
// Add descriptors to train descriptor collection
void add(const std::vector<GpuMat>& descCollection);
std::vector<GpuMat> trainDescCollection;
};
-class CV_EXPORTS FAST_GPU
+class CV_EXPORTS FAST_CUDA
{
public:
enum
// all features have same size
static const int FEATURE_SIZE = 7;
- explicit FAST_GPU(int threshold, bool nonmaxSupression = true, double keypointsRatio = 0.05);
+ explicit FAST_CUDA(int threshold, bool nonmaxSupression = true, double keypointsRatio = 0.05);
//! finds the keypoints using FAST detector
//! supports only CV_8UC1 images
GpuMat d_keypoints_;
};
-class CV_EXPORTS ORB_GPU
+class CV_EXPORTS ORB_CUDA
{
public:
enum
};
//! Constructor
- explicit ORB_GPU(int nFeatures = 500, float scaleFactor = 1.2f, int nLevels = 8, int edgeThreshold = 31,
+ explicit ORB_CUDA(int nFeatures = 500, float scaleFactor = 1.2f, int nLevels = 8, int edgeThreshold = 31,
int firstLevel = 0, int WTA_K = 2, int scoreType = 0, int patchSize = 31);
//! Compute the ORB features on an image
std::vector<GpuMat> keyPointsPyr_;
std::vector<int> keyPointsCount_;
- FAST_GPU fastDetector_;
+ FAST_CUDA fastDetector_;
Ptr<cuda::Filter> blurFilter;
const int threshold = GET_PARAM(1);
const bool nonMaxSuppersion = GET_PARAM(2);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
- cv::cuda::FAST_GPU d_fast(threshold, nonMaxSuppersion, 0.5);
+ cv::cuda::FAST_CUDA d_fast(threshold, nonMaxSuppersion, 0.5);
const cv::cuda::GpuMat d_img(img);
cv::cuda::GpuMat d_keypoints;
const int nFeatures = GET_PARAM(1);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
- cv::cuda::ORB_GPU d_orb(nFeatures);
+ cv::cuda::ORB_CUDA d_orb(nFeatures);
const cv::cuda::GpuMat d_img(img);
cv::cuda::GpuMat d_keypoints, d_descriptors;
cv::Mat train(3000, desc_size, type);
declare.in(train, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
- cv::cuda::BFMatcher_GPU d_matcher(normType);
+ cv::cuda::BFMatcher_CUDA d_matcher(normType);
const cv::cuda::GpuMat d_query(query);
const cv::cuda::GpuMat d_train(train);
cv::Mat train(3000, desc_size, type);
declare.in(train, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
- cv::cuda::BFMatcher_GPU d_matcher(normType);
+ cv::cuda::BFMatcher_CUDA d_matcher(normType);
const cv::cuda::GpuMat d_query(query);
const cv::cuda::GpuMat d_train(train);
cv::Mat train(3000, desc_size, type);
declare.in(train, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
- cv::cuda::BFMatcher_GPU d_matcher(normType);
+ cv::cuda::BFMatcher_CUDA d_matcher(normType);
const cv::cuda::GpuMat d_query(query);
const cv::cuda::GpuMat d_train(train);
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
-cv::cuda::BFMatcher_GPU::BFMatcher_GPU(int) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::add(const std::vector<GpuMat>&) { throw_no_cuda(); }
-const std::vector<GpuMat>& cv::cuda::BFMatcher_GPU::getTrainDescriptors() const { throw_no_cuda(); return trainDescCollection; }
-void cv::cuda::BFMatcher_GPU::clear() { throw_no_cuda(); }
-bool cv::cuda::BFMatcher_GPU::empty() const { throw_no_cuda(); return true; }
-bool cv::cuda::BFMatcher_GPU::isMaskSupported() const { throw_no_cuda(); return true; }
-void cv::cuda::BFMatcher_GPU::matchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::matchDownload(const GpuMat&, const GpuMat&, std::vector<DMatch>&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::matchConvert(const Mat&, const Mat&, std::vector<DMatch>&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::match(const GpuMat&, const GpuMat&, std::vector<DMatch>&, const GpuMat&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::makeGpuCollection(GpuMat&, GpuMat&, const std::vector<GpuMat>&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::matchCollection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::matchDownload(const GpuMat&, const GpuMat&, const GpuMat&, std::vector<DMatch>&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::matchConvert(const Mat&, const Mat&, const Mat&, std::vector<DMatch>&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::match(const GpuMat&, std::vector<DMatch>&, const std::vector<GpuMat>&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::knnMatchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, const GpuMat&, Stream&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::knnMatchDownload(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::knnMatchConvert(const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::knnMatch(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, int, const GpuMat&, bool) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::knnMatch2Collection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::knnMatch2Download(const GpuMat&, const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::knnMatch2Convert(const Mat&, const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::knnMatch(const GpuMat&, std::vector< std::vector<DMatch> >&, int, const std::vector<GpuMat>&, bool) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::radiusMatchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, float, const GpuMat&, Stream&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::radiusMatchDownload(const GpuMat&, const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::radiusMatchConvert(const Mat&, const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::radiusMatch(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, float, const GpuMat&, bool) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::radiusMatchCollection(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, float, const std::vector<GpuMat>&, Stream&) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::radiusMatchDownload(const GpuMat&, const GpuMat&, const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::radiusMatchConvert(const Mat&, const Mat&, const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
-void cv::cuda::BFMatcher_GPU::radiusMatch(const GpuMat&, std::vector< std::vector<DMatch> >&, float, const std::vector<GpuMat>&, bool) { throw_no_cuda(); }
+cv::cuda::BFMatcher_CUDA::BFMatcher_CUDA(int) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::add(const std::vector<GpuMat>&) { throw_no_cuda(); }
+const std::vector<GpuMat>& cv::cuda::BFMatcher_CUDA::getTrainDescriptors() const { throw_no_cuda(); return trainDescCollection; }
+void cv::cuda::BFMatcher_CUDA::clear() { throw_no_cuda(); }
+bool cv::cuda::BFMatcher_CUDA::empty() const { throw_no_cuda(); return true; }
+bool cv::cuda::BFMatcher_CUDA::isMaskSupported() const { throw_no_cuda(); return true; }
+void cv::cuda::BFMatcher_CUDA::matchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::matchDownload(const GpuMat&, const GpuMat&, std::vector<DMatch>&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::matchConvert(const Mat&, const Mat&, std::vector<DMatch>&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::match(const GpuMat&, const GpuMat&, std::vector<DMatch>&, const GpuMat&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::makeGpuCollection(GpuMat&, GpuMat&, const std::vector<GpuMat>&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::matchCollection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::matchDownload(const GpuMat&, const GpuMat&, const GpuMat&, std::vector<DMatch>&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::matchConvert(const Mat&, const Mat&, const Mat&, std::vector<DMatch>&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::match(const GpuMat&, std::vector<DMatch>&, const std::vector<GpuMat>&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::knnMatchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, const GpuMat&, Stream&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::knnMatchDownload(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::knnMatchConvert(const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::knnMatch(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, int, const GpuMat&, bool) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::knnMatch2Collection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::knnMatch2Download(const GpuMat&, const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::knnMatch2Convert(const Mat&, const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::knnMatch(const GpuMat&, std::vector< std::vector<DMatch> >&, int, const std::vector<GpuMat>&, bool) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::radiusMatchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, float, const GpuMat&, Stream&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::radiusMatchDownload(const GpuMat&, const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::radiusMatchConvert(const Mat&, const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::radiusMatch(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, float, const GpuMat&, bool) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::radiusMatchCollection(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, float, const std::vector<GpuMat>&, Stream&) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::radiusMatchDownload(const GpuMat&, const GpuMat&, const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::radiusMatchConvert(const Mat&, const Mat&, const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_no_cuda(); }
+void cv::cuda::BFMatcher_CUDA::radiusMatch(const GpuMat&, std::vector< std::vector<DMatch> >&, float, const std::vector<GpuMat>&, bool) { throw_no_cuda(); }
#else /* !defined (HAVE_CUDA) */
////////////////////////////////////////////////////////////////////
// Train collection
-cv::cuda::BFMatcher_GPU::BFMatcher_GPU(int norm_) : norm(norm_)
+cv::cuda::BFMatcher_CUDA::BFMatcher_CUDA(int norm_) : norm(norm_)
{
}
-void cv::cuda::BFMatcher_GPU::add(const std::vector<GpuMat>& descCollection)
+void cv::cuda::BFMatcher_CUDA::add(const std::vector<GpuMat>& descCollection)
{
trainDescCollection.insert(trainDescCollection.end(), descCollection.begin(), descCollection.end());
}
-const std::vector<GpuMat>& cv::cuda::BFMatcher_GPU::getTrainDescriptors() const
+const std::vector<GpuMat>& cv::cuda::BFMatcher_CUDA::getTrainDescriptors() const
{
return trainDescCollection;
}
-void cv::cuda::BFMatcher_GPU::clear()
+void cv::cuda::BFMatcher_CUDA::clear()
{
trainDescCollection.clear();
}
-bool cv::cuda::BFMatcher_GPU::empty() const
+bool cv::cuda::BFMatcher_CUDA::empty() const
{
return trainDescCollection.empty();
}
-bool cv::cuda::BFMatcher_GPU::isMaskSupported() const
+bool cv::cuda::BFMatcher_CUDA::isMaskSupported() const
{
return true;
}
////////////////////////////////////////////////////////////////////
// Match
-void cv::cuda::BFMatcher_GPU::matchSingle(const GpuMat& query, const GpuMat& train,
+void cv::cuda::BFMatcher_CUDA::matchSingle(const GpuMat& query, const GpuMat& train,
GpuMat& trainIdx, GpuMat& distance,
const GpuMat& mask, Stream& stream)
{
func(query, train, mask, trainIdx, distance, StreamAccessor::getStream(stream));
}
-void cv::cuda::BFMatcher_GPU::matchDownload(const GpuMat& trainIdx, const GpuMat& distance, std::vector<DMatch>& matches)
+void cv::cuda::BFMatcher_CUDA::matchDownload(const GpuMat& trainIdx, const GpuMat& distance, std::vector<DMatch>& matches)
{
if (trainIdx.empty() || distance.empty())
return;
matchConvert(trainIdxCPU, distanceCPU, matches);
}
-void cv::cuda::BFMatcher_GPU::matchConvert(const Mat& trainIdx, const Mat& distance, std::vector<DMatch>& matches)
+void cv::cuda::BFMatcher_CUDA::matchConvert(const Mat& trainIdx, const Mat& distance, std::vector<DMatch>& matches)
{
if (trainIdx.empty() || distance.empty())
return;
}
}
-void cv::cuda::BFMatcher_GPU::match(const GpuMat& query, const GpuMat& train,
+void cv::cuda::BFMatcher_CUDA::match(const GpuMat& query, const GpuMat& train,
std::vector<DMatch>& matches, const GpuMat& mask)
{
GpuMat trainIdx, distance;
matchDownload(trainIdx, distance, matches);
}
-void cv::cuda::BFMatcher_GPU::makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection,
+void cv::cuda::BFMatcher_CUDA::makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection,
const std::vector<GpuMat>& masks)
{
if (empty())
}
}
-void cv::cuda::BFMatcher_GPU::matchCollection(const GpuMat& query, const GpuMat& trainCollection,
+void cv::cuda::BFMatcher_CUDA::matchCollection(const GpuMat& query, const GpuMat& trainCollection,
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
const GpuMat& masks, Stream& stream)
{
func(query, trainCollection, masks, trainIdx, imgIdx, distance, StreamAccessor::getStream(stream));
}
-void cv::cuda::BFMatcher_GPU::matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, std::vector<DMatch>& matches)
+void cv::cuda::BFMatcher_CUDA::matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, std::vector<DMatch>& matches)
{
if (trainIdx.empty() || imgIdx.empty() || distance.empty())
return;
matchConvert(trainIdxCPU, imgIdxCPU, distanceCPU, matches);
}
-void cv::cuda::BFMatcher_GPU::matchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, std::vector<DMatch>& matches)
+void cv::cuda::BFMatcher_CUDA::matchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, std::vector<DMatch>& matches)
{
if (trainIdx.empty() || imgIdx.empty() || distance.empty())
return;
}
}
-void cv::cuda::BFMatcher_GPU::match(const GpuMat& query, std::vector<DMatch>& matches, const std::vector<GpuMat>& masks)
+void cv::cuda::BFMatcher_CUDA::match(const GpuMat& query, std::vector<DMatch>& matches, const std::vector<GpuMat>& masks)
{
GpuMat trainCollection;
GpuMat maskCollection;
////////////////////////////////////////////////////////////////////
// KnnMatch
-void cv::cuda::BFMatcher_GPU::knnMatchSingle(const GpuMat& query, const GpuMat& train,
+void cv::cuda::BFMatcher_CUDA::knnMatchSingle(const GpuMat& query, const GpuMat& train,
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k,
const GpuMat& mask, Stream& stream)
{
func(query, train, k, mask, trainIdx, distance, allDist, StreamAccessor::getStream(stream));
}
-void cv::cuda::BFMatcher_GPU::knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance,
+void cv::cuda::BFMatcher_CUDA::knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance,
std::vector< std::vector<DMatch> >& matches, bool compactResult)
{
if (trainIdx.empty() || distance.empty())
knnMatchConvert(trainIdxCPU, distanceCPU, matches, compactResult);
}
-void cv::cuda::BFMatcher_GPU::knnMatchConvert(const Mat& trainIdx, const Mat& distance,
+void cv::cuda::BFMatcher_CUDA::knnMatchConvert(const Mat& trainIdx, const Mat& distance,
std::vector< std::vector<DMatch> >& matches, bool compactResult)
{
if (trainIdx.empty() || distance.empty())
}
}
-void cv::cuda::BFMatcher_GPU::knnMatch(const GpuMat& query, const GpuMat& train,
+void cv::cuda::BFMatcher_CUDA::knnMatch(const GpuMat& query, const GpuMat& train,
std::vector< std::vector<DMatch> >& matches, int k, const GpuMat& mask, bool compactResult)
{
GpuMat trainIdx, distance, allDist;
knnMatchDownload(trainIdx, distance, matches, compactResult);
}
-void cv::cuda::BFMatcher_GPU::knnMatch2Collection(const GpuMat& query, const GpuMat& trainCollection,
+void cv::cuda::BFMatcher_CUDA::knnMatch2Collection(const GpuMat& query, const GpuMat& trainCollection,
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
const GpuMat& maskCollection, Stream& stream)
{
func(query, trainCollection, maskCollection, trainIdx, imgIdx, distance, StreamAccessor::getStream(stream));
}
-void cv::cuda::BFMatcher_GPU::knnMatch2Download(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance,
+void cv::cuda::BFMatcher_CUDA::knnMatch2Download(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance,
std::vector< std::vector<DMatch> >& matches, bool compactResult)
{
if (trainIdx.empty() || imgIdx.empty() || distance.empty())
knnMatch2Convert(trainIdxCPU, imgIdxCPU, distanceCPU, matches, compactResult);
}
-void cv::cuda::BFMatcher_GPU::knnMatch2Convert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance,
+void cv::cuda::BFMatcher_CUDA::knnMatch2Convert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance,
std::vector< std::vector<DMatch> >& matches, bool compactResult)
{
if (trainIdx.empty() || imgIdx.empty() || distance.empty())
};
}
-void cv::cuda::BFMatcher_GPU::knnMatch(const GpuMat& query, std::vector< std::vector<DMatch> >& matches, int k,
+void cv::cuda::BFMatcher_CUDA::knnMatch(const GpuMat& query, std::vector< std::vector<DMatch> >& matches, int k,
const std::vector<GpuMat>& masks, bool compactResult)
{
if (k == 2)
////////////////////////////////////////////////////////////////////
// RadiusMatch
-void cv::cuda::BFMatcher_GPU::radiusMatchSingle(const GpuMat& query, const GpuMat& train,
+void cv::cuda::BFMatcher_CUDA::radiusMatchSingle(const GpuMat& query, const GpuMat& train,
GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,
const GpuMat& mask, Stream& stream)
{
func(query, train, maxDistance, mask, trainIdx, distance, nMatches, StreamAccessor::getStream(stream));
}
-void cv::cuda::BFMatcher_GPU::radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, const GpuMat& nMatches,
+void cv::cuda::BFMatcher_CUDA::radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, const GpuMat& nMatches,
std::vector< std::vector<DMatch> >& matches, bool compactResult)
{
if (trainIdx.empty() || distance.empty() || nMatches.empty())
radiusMatchConvert(trainIdxCPU, distanceCPU, nMatchesCPU, matches, compactResult);
}
-void cv::cuda::BFMatcher_GPU::radiusMatchConvert(const Mat& trainIdx, const Mat& distance, const Mat& nMatches,
+void cv::cuda::BFMatcher_CUDA::radiusMatchConvert(const Mat& trainIdx, const Mat& distance, const Mat& nMatches,
std::vector< std::vector<DMatch> >& matches, bool compactResult)
{
if (trainIdx.empty() || distance.empty() || nMatches.empty())
}
}
-void cv::cuda::BFMatcher_GPU::radiusMatch(const GpuMat& query, const GpuMat& train,
+void cv::cuda::BFMatcher_CUDA::radiusMatch(const GpuMat& query, const GpuMat& train,
std::vector< std::vector<DMatch> >& matches, float maxDistance, const GpuMat& mask, bool compactResult)
{
GpuMat trainIdx, distance, nMatches;
radiusMatchDownload(trainIdx, distance, nMatches, matches, compactResult);
}
-void cv::cuda::BFMatcher_GPU::radiusMatchCollection(const GpuMat& query, GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches,
+void cv::cuda::BFMatcher_CUDA::radiusMatchCollection(const GpuMat& query, GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches,
float maxDistance, const std::vector<GpuMat>& masks, Stream& stream)
{
if (query.empty() || empty())
trainIdx, imgIdx, distance, nMatches, StreamAccessor::getStream(stream));
}
-void cv::cuda::BFMatcher_GPU::radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, const GpuMat& nMatches,
+void cv::cuda::BFMatcher_CUDA::radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, const GpuMat& nMatches,
std::vector< std::vector<DMatch> >& matches, bool compactResult)
{
if (trainIdx.empty() || imgIdx.empty() || distance.empty() || nMatches.empty())
radiusMatchConvert(trainIdxCPU, imgIdxCPU, distanceCPU, nMatchesCPU, matches, compactResult);
}
-void cv::cuda::BFMatcher_GPU::radiusMatchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, const Mat& nMatches,
+void cv::cuda::BFMatcher_CUDA::radiusMatchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, const Mat& nMatches,
std::vector< std::vector<DMatch> >& matches, bool compactResult)
{
if (trainIdx.empty() || imgIdx.empty() || distance.empty() || nMatches.empty())
}
}
-void cv::cuda::BFMatcher_GPU::radiusMatch(const GpuMat& query, std::vector< std::vector<DMatch> >& matches,
+void cv::cuda::BFMatcher_CUDA::radiusMatch(const GpuMat& query, std::vector< std::vector<DMatch> >& matches,
float maxDistance, const std::vector<GpuMat>& masks, bool compactResult)
{
GpuMat trainIdx, imgIdx, distance, nMatches;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
-cv::cuda::FAST_GPU::FAST_GPU(int, bool, double) { throw_no_cuda(); }
-void cv::cuda::FAST_GPU::operator ()(const GpuMat&, const GpuMat&, GpuMat&) { throw_no_cuda(); }
-void cv::cuda::FAST_GPU::operator ()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
-void cv::cuda::FAST_GPU::downloadKeypoints(const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
-void cv::cuda::FAST_GPU::convertKeypoints(const Mat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
-void cv::cuda::FAST_GPU::release() { throw_no_cuda(); }
-int cv::cuda::FAST_GPU::calcKeyPointsLocation(const GpuMat&, const GpuMat&) { throw_no_cuda(); return 0; }
-int cv::cuda::FAST_GPU::getKeyPoints(GpuMat&) { throw_no_cuda(); return 0; }
+cv::cuda::FAST_CUDA::FAST_CUDA(int, bool, double) { throw_no_cuda(); }
+void cv::cuda::FAST_CUDA::operator ()(const GpuMat&, const GpuMat&, GpuMat&) { throw_no_cuda(); }
+void cv::cuda::FAST_CUDA::operator ()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
+void cv::cuda::FAST_CUDA::downloadKeypoints(const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
+void cv::cuda::FAST_CUDA::convertKeypoints(const Mat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
+void cv::cuda::FAST_CUDA::release() { throw_no_cuda(); }
+int cv::cuda::FAST_CUDA::calcKeyPointsLocation(const GpuMat&, const GpuMat&) { throw_no_cuda(); return 0; }
+int cv::cuda::FAST_CUDA::getKeyPoints(GpuMat&) { throw_no_cuda(); return 0; }
#else /* !defined (HAVE_CUDA) */
-cv::cuda::FAST_GPU::FAST_GPU(int _threshold, bool _nonmaxSupression, double _keypointsRatio) :
+cv::cuda::FAST_CUDA::FAST_CUDA(int _threshold, bool _nonmaxSupression, double _keypointsRatio) :
nonmaxSupression(_nonmaxSupression), threshold(_threshold), keypointsRatio(_keypointsRatio), count_(0)
{
}
-void cv::cuda::FAST_GPU::operator ()(const GpuMat& image, const GpuMat& mask, std::vector<KeyPoint>& keypoints)
+void cv::cuda::FAST_CUDA::operator ()(const GpuMat& image, const GpuMat& mask, std::vector<KeyPoint>& keypoints)
{
if (image.empty())
return;
downloadKeypoints(d_keypoints_, keypoints);
}
-void cv::cuda::FAST_GPU::downloadKeypoints(const GpuMat& d_keypoints, std::vector<KeyPoint>& keypoints)
+void cv::cuda::FAST_CUDA::downloadKeypoints(const GpuMat& d_keypoints, std::vector<KeyPoint>& keypoints)
{
if (d_keypoints.empty())
return;
convertKeypoints(h_keypoints, keypoints);
}
-void cv::cuda::FAST_GPU::convertKeypoints(const Mat& h_keypoints, std::vector<KeyPoint>& keypoints)
+void cv::cuda::FAST_CUDA::convertKeypoints(const Mat& h_keypoints, std::vector<KeyPoint>& keypoints)
{
if (h_keypoints.empty())
return;
}
}
-void cv::cuda::FAST_GPU::operator ()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints)
+void cv::cuda::FAST_CUDA::operator ()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints)
{
calcKeyPointsLocation(img, mask);
keypoints.cols = getKeyPoints(keypoints);
}
}}}
-int cv::cuda::FAST_GPU::calcKeyPointsLocation(const GpuMat& img, const GpuMat& mask)
+int cv::cuda::FAST_CUDA::calcKeyPointsLocation(const GpuMat& img, const GpuMat& mask)
{
using namespace cv::cuda::device::fast;
return count_;
}
-int cv::cuda::FAST_GPU::getKeyPoints(GpuMat& keypoints)
+int cv::cuda::FAST_CUDA::getKeyPoints(GpuMat& keypoints)
{
using namespace cv::cuda::device::fast;
return count_;
}
-void cv::cuda::FAST_GPU::release()
+void cv::cuda::FAST_CUDA::release()
{
kpLoc_.release();
score_.release();
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
-cv::cuda::ORB_GPU::ORB_GPU(int, float, int, int, int, int, int, int) : fastDetector_(20) { throw_no_cuda(); }
-void cv::cuda::ORB_GPU::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
-void cv::cuda::ORB_GPU::operator()(const GpuMat&, const GpuMat&, GpuMat&) { throw_no_cuda(); }
-void cv::cuda::ORB_GPU::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, GpuMat&) { throw_no_cuda(); }
-void cv::cuda::ORB_GPU::operator()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
-void cv::cuda::ORB_GPU::downloadKeyPoints(const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
-void cv::cuda::ORB_GPU::convertKeyPoints(const Mat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
-void cv::cuda::ORB_GPU::release() { throw_no_cuda(); }
-void cv::cuda::ORB_GPU::buildScalePyramids(const GpuMat&, const GpuMat&) { throw_no_cuda(); }
-void cv::cuda::ORB_GPU::computeKeyPointsPyramid() { throw_no_cuda(); }
-void cv::cuda::ORB_GPU::computeDescriptors(GpuMat&) { throw_no_cuda(); }
-void cv::cuda::ORB_GPU::mergeKeyPoints(GpuMat&) { throw_no_cuda(); }
+cv::cuda::ORB_CUDA::ORB_CUDA(int, float, int, int, int, int, int, int) : fastDetector_(20) { throw_no_cuda(); }
+void cv::cuda::ORB_CUDA::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
+void cv::cuda::ORB_CUDA::operator()(const GpuMat&, const GpuMat&, GpuMat&) { throw_no_cuda(); }
+void cv::cuda::ORB_CUDA::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, GpuMat&) { throw_no_cuda(); }
+void cv::cuda::ORB_CUDA::operator()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
+void cv::cuda::ORB_CUDA::downloadKeyPoints(const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
+void cv::cuda::ORB_CUDA::convertKeyPoints(const Mat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
+void cv::cuda::ORB_CUDA::release() { throw_no_cuda(); }
+void cv::cuda::ORB_CUDA::buildScalePyramids(const GpuMat&, const GpuMat&) { throw_no_cuda(); }
+void cv::cuda::ORB_CUDA::computeKeyPointsPyramid() { throw_no_cuda(); }
+void cv::cuda::ORB_CUDA::computeDescriptors(GpuMat&) { throw_no_cuda(); }
+void cv::cuda::ORB_CUDA::mergeKeyPoints(GpuMat&) { throw_no_cuda(); }
#else /* !defined (HAVE_CUDA) */
}
}
-cv::cuda::ORB_GPU::ORB_GPU(int nFeatures, float scaleFactor, int nLevels, int edgeThreshold, int firstLevel, int WTA_K, int scoreType, int patchSize) :
+cv::cuda::ORB_CUDA::ORB_CUDA(int nFeatures, float scaleFactor, int nLevels, int edgeThreshold, int firstLevel, int WTA_K, int scoreType, int patchSize) :
nFeatures_(nFeatures), scaleFactor_(scaleFactor), nLevels_(nLevels), edgeThreshold_(edgeThreshold), firstLevel_(firstLevel), WTA_K_(WTA_K),
scoreType_(scoreType), patchSize_(patchSize),
fastDetector_(DEFAULT_FAST_THRESHOLD)
}
}
-void cv::cuda::ORB_GPU::buildScalePyramids(const GpuMat& image, const GpuMat& mask)
+void cv::cuda::ORB_CUDA::buildScalePyramids(const GpuMat& image, const GpuMat& mask)
{
CV_Assert(image.type() == CV_8UC1);
CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()));
return;
}
- count = cull_gpu(keypoints.ptr<int>(FAST_GPU::LOCATION_ROW), keypoints.ptr<float>(FAST_GPU::RESPONSE_ROW), count, n_points);
+ count = cull_gpu(keypoints.ptr<int>(FAST_CUDA::LOCATION_ROW), keypoints.ptr<float>(FAST_CUDA::RESPONSE_ROW), count, n_points);
}
}
}
-void cv::cuda::ORB_GPU::computeKeyPointsPyramid()
+void cv::cuda::ORB_CUDA::computeKeyPointsPyramid()
{
using namespace cv::cuda::device::orb;
}
}
-void cv::cuda::ORB_GPU::computeDescriptors(GpuMat& descriptors)
+void cv::cuda::ORB_CUDA::computeDescriptors(GpuMat& descriptors)
{
using namespace cv::cuda::device::orb;
}
}
-void cv::cuda::ORB_GPU::mergeKeyPoints(GpuMat& keypoints)
+void cv::cuda::ORB_CUDA::mergeKeyPoints(GpuMat& keypoints)
{
using namespace cv::cuda::device::orb;
}
}
-void cv::cuda::ORB_GPU::downloadKeyPoints(const GpuMat &d_keypoints, std::vector<KeyPoint>& keypoints)
+void cv::cuda::ORB_CUDA::downloadKeyPoints(const GpuMat &d_keypoints, std::vector<KeyPoint>& keypoints)
{
if (d_keypoints.empty())
{
convertKeyPoints(h_keypoints, keypoints);
}
-void cv::cuda::ORB_GPU::convertKeyPoints(const Mat &d_keypoints, std::vector<KeyPoint>& keypoints)
+void cv::cuda::ORB_CUDA::convertKeyPoints(const Mat &d_keypoints, std::vector<KeyPoint>& keypoints)
{
if (d_keypoints.empty())
{
}
}
-void cv::cuda::ORB_GPU::operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints)
+void cv::cuda::ORB_CUDA::operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints)
{
buildScalePyramids(image, mask);
computeKeyPointsPyramid();
mergeKeyPoints(keypoints);
}
-void cv::cuda::ORB_GPU::operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors)
+void cv::cuda::ORB_CUDA::operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors)
{
buildScalePyramids(image, mask);
computeKeyPointsPyramid();
mergeKeyPoints(keypoints);
}
-void cv::cuda::ORB_GPU::operator()(const GpuMat& image, const GpuMat& mask, std::vector<KeyPoint>& keypoints)
+void cv::cuda::ORB_CUDA::operator()(const GpuMat& image, const GpuMat& mask, std::vector<KeyPoint>& keypoints)
{
(*this)(image, mask, d_keypoints_);
downloadKeyPoints(d_keypoints_, keypoints);
}
-void cv::cuda::ORB_GPU::operator()(const GpuMat& image, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors)
+void cv::cuda::ORB_CUDA::operator()(const GpuMat& image, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors)
{
(*this)(image, mask, d_keypoints_, descriptors);
downloadKeyPoints(d_keypoints_, keypoints);
}
-void cv::cuda::ORB_GPU::release()
+void cv::cuda::ORB_CUDA::release()
{
imagePyr_.clear();
maskPyr_.clear();
}
};
-GPU_TEST_P(FAST, Accuracy)
+CUDA_TEST_P(FAST, Accuracy)
{
cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
- cv::cuda::FAST_GPU fast(threshold);
+ cv::cuda::FAST_CUDA fast(threshold);
fast.nonmaxSupression = nonmaxSupression;
if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS))
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Features2D, FAST, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Features2D, FAST, testing::Combine(
ALL_DEVICES,
testing::Values(FAST_Threshold(25), FAST_Threshold(50)),
testing::Values(FAST_NonmaxSupression(false), FAST_NonmaxSupression(true))));
}
};
-GPU_TEST_P(ORB, Accuracy)
+CUDA_TEST_P(ORB, Accuracy)
{
cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::Mat mask(image.size(), CV_8UC1, cv::Scalar::all(1));
mask(cv::Range(0, image.rows / 2), cv::Range(0, image.cols / 2)).setTo(cv::Scalar::all(0));
- cv::cuda::ORB_GPU orb(nFeatures, scaleFactor, nLevels, edgeThreshold, firstLevel, WTA_K, scoreType, patchSize);
+ cv::cuda::ORB_CUDA orb(nFeatures, scaleFactor, nLevels, edgeThreshold, firstLevel, WTA_K, scoreType, patchSize);
orb.blurForDescriptor = blurForDescriptor;
if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS))
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Features2D, ORB, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Features2D, ORB, testing::Combine(
ALL_DEVICES,
testing::Values(ORB_FeaturesCount(1000)),
testing::Values(ORB_ScaleFactor(1.2f)),
}
};
-GPU_TEST_P(BruteForceMatcher, Match_Single)
+CUDA_TEST_P(BruteForceMatcher, Match_Single)
{
- cv::cuda::BFMatcher_GPU matcher(normCode);
+ cv::cuda::BFMatcher_CUDA matcher(normCode);
cv::cuda::GpuMat mask;
if (useMask)
ASSERT_EQ(0, badCount);
}
-GPU_TEST_P(BruteForceMatcher, Match_Collection)
+CUDA_TEST_P(BruteForceMatcher, Match_Collection)
{
- cv::cuda::BFMatcher_GPU matcher(normCode);
+ cv::cuda::BFMatcher_CUDA matcher(normCode);
cv::cuda::GpuMat d_train(train);
ASSERT_EQ(0, badCount);
}
-GPU_TEST_P(BruteForceMatcher, KnnMatch_2_Single)
+CUDA_TEST_P(BruteForceMatcher, KnnMatch_2_Single)
{
- cv::cuda::BFMatcher_GPU matcher(normCode);
+ cv::cuda::BFMatcher_CUDA matcher(normCode);
const int knn = 2;
ASSERT_EQ(0, badCount);
}
-GPU_TEST_P(BruteForceMatcher, KnnMatch_3_Single)
+CUDA_TEST_P(BruteForceMatcher, KnnMatch_3_Single)
{
- cv::cuda::BFMatcher_GPU matcher(normCode);
+ cv::cuda::BFMatcher_CUDA matcher(normCode);
const int knn = 3;
ASSERT_EQ(0, badCount);
}
-GPU_TEST_P(BruteForceMatcher, KnnMatch_2_Collection)
+CUDA_TEST_P(BruteForceMatcher, KnnMatch_2_Collection)
{
- cv::cuda::BFMatcher_GPU matcher(normCode);
+ cv::cuda::BFMatcher_CUDA matcher(normCode);
const int knn = 2;
ASSERT_EQ(0, badCount);
}
-GPU_TEST_P(BruteForceMatcher, KnnMatch_3_Collection)
+CUDA_TEST_P(BruteForceMatcher, KnnMatch_3_Collection)
{
- cv::cuda::BFMatcher_GPU matcher(normCode);
+ cv::cuda::BFMatcher_CUDA matcher(normCode);
const int knn = 3;
ASSERT_EQ(0, badCount);
}
-GPU_TEST_P(BruteForceMatcher, RadiusMatch_Single)
+CUDA_TEST_P(BruteForceMatcher, RadiusMatch_Single)
{
- cv::cuda::BFMatcher_GPU matcher(normCode);
+ cv::cuda::BFMatcher_CUDA matcher(normCode);
const float radius = 1.f / countFactor;
}
}
-GPU_TEST_P(BruteForceMatcher, RadiusMatch_Collection)
+CUDA_TEST_P(BruteForceMatcher, RadiusMatch_Collection)
{
- cv::cuda::BFMatcher_GPU matcher(normCode);
+ cv::cuda::BFMatcher_CUDA matcher(normCode);
const int n = 3;
const float radius = 1.f / countFactor * n;
}
}
-INSTANTIATE_TEST_CASE_P(GPU_Features2D, BruteForceMatcher, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Features2D, BruteForceMatcher, testing::Combine(
ALL_DEVICES,
testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2)),
testing::Values(DescriptorSize(57), DescriptorSize(64), DescriptorSize(83), DescriptorSize(128), DescriptorSize(179), DescriptorSize(256), DescriptorSize(304)),
#include "test_precomp.hpp"
-CV_GPU_TEST_MAIN("gpu")
+CV_CUDA_TEST_MAIN("gpu")
DEF_PARAM_TEST(Sz_Type_KernelSz, cv::Size, MatType, int);
PERF_TEST_P(Sz_Type_KernelSz, Blur,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8UC1, CV_8UC4),
Values(3, 5, 7)))
{
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() blurFilter->apply(d_src, dst);
- GPU_SANITY_CHECK(dst, 1);
+ CUDA_SANITY_CHECK(dst, 1);
}
else
{
//////////////////////////////////////////////////////////////////////
// Filter2D
-PERF_TEST_P(Sz_Type_KernelSz, Filter2D, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4), Values(3, 5, 7, 9, 11, 13, 15)))
+PERF_TEST_P(Sz_Type_KernelSz, Filter2D, Combine(CUDA_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4), Values(3, 5, 7, 9, 11, 13, 15)))
{
declare.time(20.0);
cv::Mat kernel(ksize, ksize, CV_32FC1);
declare.in(kernel, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() filter2D->apply(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
//////////////////////////////////////////////////////////////////////
// Laplacian
-PERF_TEST_P(Sz_Type_KernelSz, Laplacian, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4), Values(1, 3)))
+PERF_TEST_P(Sz_Type_KernelSz, Laplacian, Combine(CUDA_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4), Values(1, 3)))
{
declare.time(20.0);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() laplacian->apply(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
//////////////////////////////////////////////////////////////////////
// Sobel
-PERF_TEST_P(Sz_Type_KernelSz, Sobel, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4, CV_32FC1), Values(3, 5, 7, 9, 11, 13, 15)))
+PERF_TEST_P(Sz_Type_KernelSz, Sobel, Combine(CUDA_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4, CV_32FC1), Values(3, 5, 7, 9, 11, 13, 15)))
{
declare.time(20.0);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() sobel->apply(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
//////////////////////////////////////////////////////////////////////
// Scharr
-PERF_TEST_P(Sz_Type, Scharr, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4, CV_32FC1)))
+PERF_TEST_P(Sz_Type, Scharr, Combine(CUDA_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4, CV_32FC1)))
{
declare.time(20.0);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() scharr->apply(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
//////////////////////////////////////////////////////////////////////
// GaussianBlur
-PERF_TEST_P(Sz_Type_KernelSz, GaussianBlur, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4, CV_32FC1), Values(3, 5, 7, 9, 11, 13, 15)))
+PERF_TEST_P(Sz_Type_KernelSz, GaussianBlur, Combine(CUDA_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4, CV_32FC1), Values(3, 5, 7, 9, 11, 13, 15)))
{
declare.time(20.0);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() gauss->apply(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
//////////////////////////////////////////////////////////////////////
// Erode
-PERF_TEST_P(Sz_Type, Erode, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4)))
+PERF_TEST_P(Sz_Type, Erode, Combine(CUDA_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4)))
{
declare.time(20.0);
const cv::Mat ker = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() erode->apply(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
//////////////////////////////////////////////////////////////////////
// Dilate
-PERF_TEST_P(Sz_Type, Dilate, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4)))
+PERF_TEST_P(Sz_Type, Dilate, Combine(CUDA_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4)))
{
declare.time(20.0);
const cv::Mat ker = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() dilate->apply(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Type_Op, cv::Size, MatType, MorphOp);
-PERF_TEST_P(Sz_Type_Op, MorphologyEx, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4), MorphOp::all()))
+PERF_TEST_P(Sz_Type_Op, MorphologyEx, Combine(CUDA_TYPICAL_MAT_SIZES, Values(CV_8UC1, CV_8UC4), MorphOp::all()))
{
declare.time(20.0);
const cv::Mat ker = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() morph->apply(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
}
};
-GPU_TEST_P(Blur, Accuracy)
+CUDA_TEST_P(Blur, Accuracy)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Filters, Blur, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Filters, Blur, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC4)),
}
};
-GPU_TEST_P(Filter2D, Accuracy)
+CUDA_TEST_P(Filter2D, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat kernel = randomMat(cv::Size(ksize.width, ksize.height), CV_32FC1, 0.0, 1.0);
EXPECT_MAT_NEAR(dst_gold, dst, CV_MAT_DEPTH(type) == CV_32F ? 1e-1 : 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Filters, Filter2D, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Filters, Filter2D, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC4), MatType(CV_16UC1), MatType(CV_16UC4), MatType(CV_32FC1), MatType(CV_32FC4)),
}
};
-GPU_TEST_P(Laplacian, Accuracy)
+CUDA_TEST_P(Laplacian, Accuracy)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() < CV_32F ? 0.0 : 1e-3);
}
-INSTANTIATE_TEST_CASE_P(GPU_Filters, Laplacian, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Filters, Laplacian, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC4), MatType(CV_32FC1)),
}
};
-GPU_TEST_P(SeparableLinearFilter, Accuracy)
+CUDA_TEST_P(SeparableLinearFilter, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat rowKernel = randomMat(Size(ksize.width, 1), CV_32FC1, 0.0, 1.0);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() < CV_32F ? 1.0 : 1e-2);
}
-INSTANTIATE_TEST_CASE_P(GPU_Filters, SeparableLinearFilter, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Filters, SeparableLinearFilter, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32F)),
}
};
-GPU_TEST_P(Sobel, Accuracy)
+CUDA_TEST_P(Sobel, Accuracy)
{
if (dx == 0 && dy == 0)
return;
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() < CV_32F ? 0.0 : 0.1);
}
-INSTANTIATE_TEST_CASE_P(GPU_Filters, Sobel, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Filters, Sobel, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32F)),
}
};
-GPU_TEST_P(Scharr, Accuracy)
+CUDA_TEST_P(Scharr, Accuracy)
{
if (dx + dy != 1)
return;
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() < CV_32F ? 0.0 : 0.1);
}
-INSTANTIATE_TEST_CASE_P(GPU_Filters, Scharr, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Filters, Scharr, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32F)),
}
};
-GPU_TEST_P(GaussianBlur, Accuracy)
+CUDA_TEST_P(GaussianBlur, Accuracy)
{
cv::Mat src = randomMat(size, type);
double sigma1 = randomDouble(0.1, 1.0);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() < CV_32F ? 4.0 : 1e-4);
}
-INSTANTIATE_TEST_CASE_P(GPU_Filters, GaussianBlur, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Filters, GaussianBlur, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32F)),
}
};
-GPU_TEST_P(Erode, Accuracy)
+CUDA_TEST_P(Erode, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat kernel = cv::Mat::ones(3, 3, CV_8U);
EXPECT_MAT_NEAR(getInnerROI(dst_gold, ksize), getInnerROI(dst, ksize), 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Filters, Erode, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Filters, Erode, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC4)),
}
};
-GPU_TEST_P(Dilate, Accuracy)
+CUDA_TEST_P(Dilate, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat kernel = cv::Mat::ones(3, 3, CV_8U);
EXPECT_MAT_NEAR(getInnerROI(dst_gold, ksize), getInnerROI(dst, ksize), 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Filters, Dilate, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Filters, Dilate, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC4)),
}
};
-GPU_TEST_P(MorphEx, Accuracy)
+CUDA_TEST_P(MorphEx, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat kernel = cv::Mat::ones(3, 3, CV_8U);
EXPECT_MAT_NEAR(getInnerROI(dst_gold, border), getInnerROI(dst, border), 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Filters, MorphEx, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Filters, MorphEx, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC4)),
#include "test_precomp.hpp"
-CV_GPU_TEST_MAIN("gpu")
+CV_CUDA_TEST_MAIN("gpu")
DEF_PARAM_TEST(Sz_Depth_Cn_KernelSz, cv::Size, MatDepth, MatCn, int);
PERF_TEST_P(Sz_Depth_Cn_KernelSz, BilateralFilter,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_32F),
- GPU_CHANNELS_1_3,
+ CUDA_CHANNELS_1_3,
Values(3, 5, 9)))
{
declare.time(60.0);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::bilateralFilter(d_src, dst, kernel_size, sigma_color, sigma_spatial, borderMode);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// BlendLinear
PERF_TEST_P(Sz_Depth_Cn, BlendLinear,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_32F),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const cv::Mat weights1(size, CV_32FC1, cv::Scalar::all(0.5));
const cv::Mat weights2(size, CV_32FC1, cv::Scalar::all(0.5));
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_img1(img1);
const cv::cuda::GpuMat d_img2(img2);
TEST_CYCLE() cv::cuda::blendLinear(d_img1, d_img2, d_weights1, d_weights2, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
const double low_thresh = 50.0;
const double high_thresh = 100.0;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_image(image);
cv::cuda::GpuMat dst;
TEST_CYCLE() canny->detect(d_image, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Code, cv::Size, MatDepth, CvtColorInfo);
PERF_TEST_P(Sz_Depth_Code, CvtColor,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_32F),
Values(CvtColorInfo(4, 4, cv::COLOR_RGBA2BGRA),
CvtColorInfo(4, 1, cv::COLOR_BGRA2GRAY),
cv::Mat src(size, CV_MAKETYPE(depth, info.scn));
cv::randu(src, 0, depth == CV_8U ? 255.0 : 1.0);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::cvtColor(d_src, dst, info.code, info.dcn);
- GPU_SANITY_CHECK(dst, 1e-4);
+ CUDA_SANITY_CHECK(dst, 1e-4);
}
else
{
}
PERF_TEST_P(Sz_Depth_Code, CvtColorBayer,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U),
Values(CvtColorInfo(1, 3, cv::COLOR_BayerBG2BGR),
CvtColorInfo(1, 3, cv::COLOR_BayerGB2BGR),
cv::Mat src(size, CV_MAKETYPE(depth, info.scn));
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::cvtColor(d_src, dst, info.code, info.dcn);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Code, cv::Size, DemosaicingCode);
PERF_TEST_P(Sz_Code, Demosaicing,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
DemosaicingCode::all()))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, CV_8UC1);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::demosaicing(d_src, dst, code);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// SwapChannels
PERF_TEST_P(Sz, SwapChannels,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
const int dstOrder[] = {2, 1, 0, 3};
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat dst(src);
TEST_CYCLE() cv::cuda::swapChannels(dst, dstOrder);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Type_Op, cv::Size, MatType, AlphaOp);
PERF_TEST_P(Sz_Type_Op, AlphaComp,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8UC4, CV_16UC4, CV_32SC4, CV_32FC4),
AlphaOp::all()))
{
cv::Mat img2(size, type);
declare.in(img1, img2, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_img1(img1);
const cv::cuda::GpuMat d_img2(img2);
TEST_CYCLE() cv::cuda::alphaComp(d_img1, d_img2, dst, alpha_op);
- GPU_SANITY_CHECK(dst, 1e-3, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(dst, 1e-3, ERROR_RELATIVE);
}
else
{
const double k = 0.5;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_img(img);
cv::cuda::GpuMat dst;
TEST_CYCLE() harris->compute(d_img, dst);
- GPU_SANITY_CHECK(dst, 1e-4);
+ CUDA_SANITY_CHECK(dst, 1e-4);
}
else
{
img.convertTo(img, type, type == CV_32F ? 1.0 / 255.0 : 1.0);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_img(img);
cv::cuda::GpuMat dst;
TEST_CYCLE() minEigenVal->compute(d_img, dst);
- GPU_SANITY_CHECK(dst, 1e-4);
+ CUDA_SANITY_CHECK(dst, 1e-4);
}
else
{
const int maxCorners = 8000;
const double qualityLevel = 0.01;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::cuda::CornersDetector> d_detector = cv::cuda::createGoodFeaturesToTrackDetector(image.type(), maxCorners, qualityLevel, minDistance);
TEST_CYCLE() d_detector->detect(d_image, pts);
- GPU_SANITY_CHECK(pts);
+ CUDA_SANITY_CHECK(pts);
}
else
{
// HistEvenC1
PERF_TEST_P(Sz_Depth, HistEvenC1,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_16S)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, depth);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::histEven(d_src, dst, d_buf, 30, 0, 180);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// HistEvenC4
PERF_TEST_P(Sz_Depth, HistEvenC4,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_16S)))
{
const cv::Size size = GET_PARAM(0);
int lowerLevel[] = {0, 0, 0, 0};
int upperLevel[] = {180, 180, 180, 180};
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_hist[4];
// CalcHist
PERF_TEST_P(Sz, CalcHist,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src(size, CV_8UC1);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::calcHist(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// EqualizeHist
PERF_TEST_P(Sz, EqualizeHist,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src(size, CV_8UC1);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::equalizeHist(d_src, dst, d_buf);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_ClipLimit, cv::Size, double);
PERF_TEST_P(Sz_ClipLimit, CLAHE,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(0.0, 40.0)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, CV_8UC1);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::cuda::CLAHE> clahe = cv::cuda::createCLAHE(clipLimit);
cv::cuda::GpuMat d_src(src);
TEST_CYCLE() clahe->apply(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
}
PERF_TEST_P(Sz, HoughLines,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
declare.time(30.0);
cv::line(src, cv::Point(200, 0), cv::Point(200, src.rows), cv::Scalar::all(255), 1);
cv::line(src, cv::Point(400, 0), cv::Point(400, src.rows), cv::Scalar::all(255), 1);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_lines;
cv::Mat mask;
cv::Canny(image, mask, 50, 100);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_mask(mask);
cv::cuda::GpuMat d_lines;
DEF_PARAM_TEST(Sz_Dp_MinDist, cv::Size, float, float);
PERF_TEST_P(Sz_Dp_MinDist, HoughCircles,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(1.0f, 2.0f, 4.0f),
Values(1.0f)))
{
cv::circle(src, cv::Point(200, 200), 25, cv::Scalar::all(255), -1);
cv::circle(src, cv::Point(200, 100), 25, cv::Scalar::all(255), -1);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_circles;
//////////////////////////////////////////////////////////////////////
// GeneralizedHough
-PERF_TEST_P(Sz, GeneralizedHoughBallard, GPU_TYPICAL_MAT_SIZES)
+PERF_TEST_P(Sz, GeneralizedHoughBallard, CUDA_TYPICAL_MAT_SIZES)
{
declare.time(10);
cv::Sobel(image, dx, CV_32F, 1, 0);
cv::Sobel(image, dy, CV_32F, 0, 1);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::GeneralizedHoughBallard> alg = cv::cuda::createGeneralizedHoughBallard();
TEST_CYCLE() alg->detect(d_edges, d_dx, d_dy, positions);
- GPU_SANITY_CHECK(positions);
+ CUDA_SANITY_CHECK(positions);
}
else
{
}
}
-PERF_TEST_P(Sz, GeneralizedHoughGuil, GPU_TYPICAL_MAT_SIZES)
+PERF_TEST_P(Sz, GeneralizedHoughGuil, CUDA_TYPICAL_MAT_SIZES)
{
declare.time(10);
cv::Sobel(image, dx, CV_32F, 1, 0);
cv::Sobel(image, dy, CV_32F, 0, 1);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::GeneralizedHoughGuil> alg = cv::cuda::createGeneralizedHoughGuil();
alg->setMaxAngle(90.0);
TEST_CYCLE() alg->detect(d_edges, d_dx, d_dy, positions);
- GPU_SANITY_CHECK(positions);
+ CUDA_SANITY_CHECK(positions);
}
else
{
DEF_PARAM_TEST(Sz_TemplateSz_Cn_Method, cv::Size, cv::Size, MatCn, TemplateMethod);
PERF_TEST_P(Sz_TemplateSz_Cn_Method, MatchTemplate8U,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(cv::Size(5, 5), cv::Size(16, 16), cv::Size(30, 30)),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
TemplateMethod::all()))
{
declare.time(300.0);
cv::Mat templ(templ_size, CV_MAKE_TYPE(CV_8U, cn));
declare.in(image, templ, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_image(image);
const cv::cuda::GpuMat d_templ(templ);
TEST_CYCLE() alg->match(d_image, d_templ, dst);
- GPU_SANITY_CHECK(dst, 1e-5, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(dst, 1e-5, ERROR_RELATIVE);
}
else
{
// MatchTemplate32F
PERF_TEST_P(Sz_TemplateSz_Cn_Method, MatchTemplate32F,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(cv::Size(5, 5), cv::Size(16, 16), cv::Size(30, 30)),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_CCORR))))
{
declare.time(300.0);
cv::Mat templ(templ_size, CV_MAKE_TYPE(CV_32F, cn));
declare.in(image, templ, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_image(image);
const cv::cuda::GpuMat d_templ(templ);
TEST_CYCLE() alg->match(d_image, d_templ, dst);
- GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
}
else
{
const int sp = 50;
const int sr = 50;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(rgba);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::meanShiftFiltering(d_src, dst, sp, sr);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
const int sp = 50;
const int sr = 50;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(rgba);
cv::cuda::GpuMat dstr;
TEST_CYCLE() cv::cuda::meanShiftProc(d_src, dstr, dstsp, sp, sr);
- GPU_SANITY_CHECK(dstr);
- GPU_SANITY_CHECK(dstsp);
+ CUDA_SANITY_CHECK(dstr);
+ CUDA_SANITY_CHECK(dstsp);
}
else
{
const int sr = 10;
const int minsize = 20;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(rgba);
cv::Mat dst;
TEST_CYCLE() cv::cuda::meanShiftSegmentation(d_src, dst, sp, sr, minsize);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
void write(FileStorage& fs) const
{
- fs << "name" << "Canny_GPU"
+ fs << "name" << "Canny_CUDA"
<< "low_thresh" << low_thresh_
<< "high_thresh" << high_thresh_
<< "apperture_size" << apperture_size_
void read(const FileNode& fn)
{
- CV_Assert( String(fn["name"]) == "Canny_GPU" );
+ CV_Assert( String(fn["name"]) == "Canny_CUDA" );
low_thresh_ = (double)fn["low_thresh"];
high_thresh_ = (double)fn["high_thresh"];
apperture_size_ = (int)fn["apperture_size"];
namespace cv { namespace cuda { namespace device
{
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr555_traits::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr555_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr555_traits::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr555_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr565_traits::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr565_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr565_traits::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr565_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_bgra_traits::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_bgra_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_rgba_traits::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_rgba_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_bgra_traits::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_bgra_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_rgba_traits::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_rgba_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgra_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr555_traits::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr555_traits::functor_type)
{
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr565_traits::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr565_traits::functor_type)
{
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_yuv4_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_yuv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_yuv4_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_yuv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_bgra_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_rgba_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_YCrCb4_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_YCrCb4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_YCrCb4_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_YCrCb4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_bgra_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_rgba_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_xyz4_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_xyz4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_bgra_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_rgba_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hsv4_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hsv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hsv4_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hsv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_bgra_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_rgba_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hls4_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hls4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hls4_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hls4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_bgra_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hls4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
- OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_rgba_traits<uchar>::functor_type)
+ OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hls4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
-#define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \
+#define OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name, traits) \
void name(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream) \
{ \
traits::functor_type functor = traits::create_functor(); \
cv::cuda::device::transform((PtrStepSz<src_t>)src, (PtrStepSz<dst_t>)dst, functor, WithOutMask(), stream); \
}
-#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(name) \
- OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, name ## _traits)
-
-#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \
- OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
- OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \
- OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
-
-#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(name) \
- OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
- OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
-
-#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(name) \
- OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
- OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \
- OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \
- OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv4)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgra)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgra)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls4)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgra)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgra)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab4)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab4)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgra)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgra)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgra)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgra)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv4)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv4)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv4)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgra)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgra)
-
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgb)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgba)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgr)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgra)
- OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgra)
-
- #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR
- #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE
- #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL
- #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F
- #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL
+#define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(name) \
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name, name ## _traits)
+
+#define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(name) \
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
+
+#define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(name) \
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
+
+#define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(name) \
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv4)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgra)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgra)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls4)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgra)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgra)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab4)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab4)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgra)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgra)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgra)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgra)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv4)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv4)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv4)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgra)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgra)
+
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgb)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgba)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgr)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgra)
+ OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgra)
+
+ #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR
+ #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE
+ #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL
+ #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F
+ #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
//////////////////////////////////////////////////////////////////////
// Prepared_SQDIFF_NORMED
- // normAcc* are accurate normalization routines which make GPU matchTemplate
+ // normAcc* are accurate normalization routines which make CUDA matchTemplate
// consistent with CPU one
__device__ float normAcc(float num, float denum)
namespace cv { namespace cuda { namespace device
{
-#define OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name) \
+#define OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(name) \
void name(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
-#define OPENCV_GPU_DECLARE_CVTCOLOR_ALL(name) \
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _16u) \
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f)
-
-#define OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(name) \
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f)
-
-#define OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(name) \
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f) \
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _full_8u) \
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _full_32f)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_bgra)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_rgba)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr_to_bgr555)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr_to_bgr565)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgb_to_bgr555)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgb_to_bgr565)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgra_to_bgr555)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgra_to_bgr565)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgba_to_bgr555)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgba_to_bgr565)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_bgra)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_bgra)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(gray_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(gray_to_bgra)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(gray_to_bgr555)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(gray_to_bgr565)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_gray)
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_gray)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_gray)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_gray)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_gray)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_gray)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_yuv)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_yuv)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_yuv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_yuv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_yuv)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_yuv)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_yuv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_yuv4)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_bgra)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_bgra)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_YCrCb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_YCrCb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_YCrCb4)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_YCrCb4)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_YCrCb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_YCrCb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_YCrCb4)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_YCrCb4)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_bgra)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_bgra)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_xyz)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_xyz)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_xyz4)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_xyz4)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_xyz)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_xyz)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_xyz4)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_xyz4)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_bgra)
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_bgra)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hsv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hsv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hsv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hsv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hsv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hsv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hsv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hsv4)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_bgra)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_bgra)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hls)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hls)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hls4)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hls4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hls)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hls)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hls4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hls4)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_bgra)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_bgra)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_lab)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_lab)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_lab4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_lab4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_lab)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_lab)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_lab4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_lab4)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgb_to_lab)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgba_to_lab)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgb_to_lab4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgba_to_lab4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgr_to_lab)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgra_to_lab)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgr_to_lab4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgra_to_lab4)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_bgra)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_bgra)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_lrgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_lrgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_lrgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_lrgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_lbgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_lbgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_lbgra)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_lbgra)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_luv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_luv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_luv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_luv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_luv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_luv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_luv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_luv4)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgb_to_luv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgba_to_luv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgb_to_luv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgba_to_luv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgr_to_luv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgra_to_luv)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgr_to_luv4)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgra_to_luv4)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_rgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_rgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_bgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_bgra)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_bgra)
-
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_lrgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_lrgb)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_lrgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_lrgba)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_lbgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_lbgr)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_lbgra)
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_lbgra)
-
- #undef OPENCV_GPU_DECLARE_CVTCOLOR_ONE
- #undef OPENCV_GPU_DECLARE_CVTCOLOR_ALL
- #undef OPENCV_GPU_DECLARE_CVTCOLOR_8U32F
- #undef OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL
+#define OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(name) \
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(name ## _8u) \
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(name ## _16u) \
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(name ## _32f)
+
+#define OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(name) \
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(name ## _8u) \
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(name ## _32f)
+
+#define OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(name) \
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(name ## _8u) \
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(name ## _32f) \
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(name ## _full_8u) \
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(name ## _full_32f)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgr_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgr_to_bgra)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgr_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgra_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgra_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgra_to_rgba)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr_to_bgr555)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr_to_bgr565)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(rgb_to_bgr555)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(rgb_to_bgr565)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgra_to_bgr555)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgra_to_bgr565)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(rgba_to_bgr555)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(rgba_to_bgr565)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr555_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr565_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr555_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr565_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr555_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr565_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr555_to_bgra)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr565_to_bgra)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(gray_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(gray_to_bgra)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(gray_to_bgr555)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(gray_to_bgr565)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr555_to_gray)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ONE(bgr565_to_gray)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgb_to_gray)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgr_to_gray)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgba_to_gray)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgra_to_gray)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgb_to_yuv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgba_to_yuv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgb_to_yuv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgba_to_yuv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgr_to_yuv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgra_to_yuv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgr_to_yuv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgra_to_yuv4)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(yuv_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(yuv_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(yuv4_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(yuv4_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(yuv_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(yuv_to_bgra)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(yuv4_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(yuv4_to_bgra)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgb_to_YCrCb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgba_to_YCrCb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgb_to_YCrCb4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgba_to_YCrCb4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgr_to_YCrCb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgra_to_YCrCb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgr_to_YCrCb4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgra_to_YCrCb4)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(YCrCb_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(YCrCb_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(YCrCb4_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(YCrCb4_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(YCrCb_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(YCrCb_to_bgra)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(YCrCb4_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(YCrCb4_to_bgra)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgb_to_xyz)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgba_to_xyz)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgb_to_xyz4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(rgba_to_xyz4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgr_to_xyz)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgra_to_xyz)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgr_to_xyz4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(bgra_to_xyz4)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(xyz_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(xyz4_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(xyz_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(xyz4_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(xyz_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(xyz4_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(xyz_to_bgra)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_ALL(xyz4_to_bgra)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hsv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hsv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hsv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hsv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hsv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hsv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hsv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hsv4)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_bgra)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_bgra)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hls)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hls)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hls4)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hls4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hls)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hls)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hls4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hls4)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_bgra)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_bgra)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(rgb_to_lab)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(rgba_to_lab)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(rgb_to_lab4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(rgba_to_lab4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(bgr_to_lab)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(bgra_to_lab)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(bgr_to_lab4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(bgra_to_lab4)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lrgb_to_lab)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lrgba_to_lab)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lrgb_to_lab4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lrgba_to_lab4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lbgr_to_lab)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lbgra_to_lab)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lbgr_to_lab4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lbgra_to_lab4)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab4_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab4_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab4_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab_to_bgra)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab4_to_bgra)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab_to_lrgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab4_to_lrgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab_to_lrgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab4_to_lrgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab_to_lbgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab4_to_lbgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab_to_lbgra)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lab4_to_lbgra)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(rgb_to_luv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(rgba_to_luv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(rgb_to_luv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(rgba_to_luv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(bgr_to_luv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(bgra_to_luv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(bgr_to_luv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(bgra_to_luv4)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lrgb_to_luv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lrgba_to_luv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lrgb_to_luv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lrgba_to_luv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lbgr_to_luv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lbgra_to_luv)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lbgr_to_luv4)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(lbgra_to_luv4)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv4_to_rgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv4_to_rgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv4_to_bgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv_to_bgra)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv4_to_bgra)
+
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv_to_lrgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv4_to_lrgb)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv_to_lrgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv4_to_lrgba)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv_to_lbgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv4_to_lbgr)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv_to_lbgra)
+ OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F(luv4_to_lbgra)
+
+ #undef OPENCV_CUDA_DECLARE_CVTCOLOR_ONE
+ #undef OPENCV_CUDA_DECLARE_CVTCOLOR_ALL
+ #undef OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F
+ #undef OPENCV_CUDA_DECLARE_CVTCOLOR_8U32F_FULL
}}}
#endif
{
}
- CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE_GPU",
+ CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE_CUDA",
obj.info()->addParam(obj, "clipLimit", obj.clipLimit_);
obj.info()->addParam(obj, "tilesX", obj.tilesX_);
obj.info()->addParam(obj, "tilesY", obj.tilesY_))
void write(FileStorage& fs) const
{
- fs << "name" << "HoughCirclesDetector_GPU"
+ fs << "name" << "HoughCirclesDetector_CUDA"
<< "dp" << dp_
<< "minDist" << minDist_
<< "cannyThreshold" << cannyThreshold_
void read(const FileNode& fn)
{
- CV_Assert( String(fn["name"]) == "HoughCirclesDetector_GPU" );
+ CV_Assert( String(fn["name"]) == "HoughCirclesDetector_CUDA" );
dp_ = (float)fn["dp"];
minDist_ = (float)fn["minDist"];
cannyThreshold_ = (int)fn["cannyThreshold"];
void write(FileStorage& fs) const
{
- fs << "name" << "HoughLinesDetector_GPU"
+ fs << "name" << "HoughLinesDetector_CUDA"
<< "rho" << rho_
<< "theta" << theta_
<< "threshold" << threshold_
void read(const FileNode& fn)
{
- CV_Assert( String(fn["name"]) == "HoughLinesDetector_GPU" );
+ CV_Assert( String(fn["name"]) == "HoughLinesDetector_CUDA" );
rho_ = (float)fn["rho"];
theta_ = (float)fn["theta"];
threshold_ = (int)fn["threshold"];
void write(FileStorage& fs) const
{
- fs << "name" << "PHoughLinesDetector_GPU"
+ fs << "name" << "PHoughLinesDetector_CUDA"
<< "rho" << rho_
<< "theta" << theta_
<< "minLineLength" << minLineLength_
void read(const FileNode& fn)
{
- CV_Assert( String(fn["name"]) == "PHoughLinesDetector_GPU" );
+ CV_Assert( String(fn["name"]) == "PHoughLinesDetector_CUDA" );
rho_ = (float)fn["rho"];
theta_ = (float)fn["theta"];
minLineLength_ = (int)fn["minLineLength"];
}
////////////////////////////////////////////////////////////////////////
-// meanShiftProc_GPU
+// meanShiftProc_CUDA
namespace cv { namespace cuda { namespace device
{
}
};
-GPU_TEST_P(BilateralFilter, Accuracy)
+CUDA_TEST_P(BilateralFilter, Accuracy)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-3 : 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, BilateralFilter, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, BilateralFilter, testing::Combine(
ALL_DEVICES,
testing::Values(cv::Size(128, 128), cv::Size(113, 113), cv::Size(639, 481)),
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_32FC1), MatType(CV_32FC3))
}
};
-GPU_TEST_P(Blend, Accuracy)
+CUDA_TEST_P(Blend, Accuracy)
{
int depth = CV_MAT_DEPTH(type);
EXPECT_MAT_NEAR(result_gold, result, CV_MAT_DEPTH(type) == CV_8U ? 1.0 : 1e-5);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Blend, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, Blend, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
}
};
-GPU_TEST_P(Canny, Accuracy)
+CUDA_TEST_P(Canny, Accuracy)
{
cv::Mat img = readImage("stereobm/aloe-L.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
EXPECT_MAT_SIMILAR(edges_gold, edges, 2e-2);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Canny, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, Canny, testing::Combine(
ALL_DEVICES,
testing::Values(AppertureSize(3), AppertureSize(5)),
testing::Values(L2gradient(false), L2gradient(true)),
}
};
-GPU_TEST_P(CvtColor, BGR2RGB)
+CUDA_TEST_P(CvtColor, BGR2RGB)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR2RGBA)
+CUDA_TEST_P(CvtColor, BGR2RGBA)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR2BGRA)
+CUDA_TEST_P(CvtColor, BGR2BGRA)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGRA2RGB)
+CUDA_TEST_P(CvtColor, BGRA2RGB)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGRA2BGR)
+CUDA_TEST_P(CvtColor, BGRA2BGR)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGRA2RGBA)
+CUDA_TEST_P(CvtColor, BGRA2RGBA)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR2GRAY)
+CUDA_TEST_P(CvtColor, BGR2GRAY)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, RGB2GRAY)
+CUDA_TEST_P(CvtColor, RGB2GRAY)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, GRAY2BGR)
+CUDA_TEST_P(CvtColor, GRAY2BGR)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2GRAY);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, GRAY2BGRA)
+CUDA_TEST_P(CvtColor, GRAY2BGRA)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2GRAY);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGRA2GRAY)
+CUDA_TEST_P(CvtColor, BGRA2GRAY)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, RGBA2GRAY)
+CUDA_TEST_P(CvtColor, RGBA2GRAY)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, BGR2BGR565)
+CUDA_TEST_P(CvtColor, BGR2BGR565)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, RGB2BGR565)
+CUDA_TEST_P(CvtColor, RGB2BGR565)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR5652BGR)
+CUDA_TEST_P(CvtColor, BGR5652BGR)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR5652RGB)
+CUDA_TEST_P(CvtColor, BGR5652RGB)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGRA2BGR565)
+CUDA_TEST_P(CvtColor, BGRA2BGR565)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, RGBA2BGR565)
+CUDA_TEST_P(CvtColor, RGBA2BGR565)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR5652BGRA)
+CUDA_TEST_P(CvtColor, BGR5652BGRA)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR5652RGBA)
+CUDA_TEST_P(CvtColor, BGR5652RGBA)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, GRAY2BGR565)
+CUDA_TEST_P(CvtColor, GRAY2BGR565)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR5652GRAY)
+CUDA_TEST_P(CvtColor, BGR5652GRAY)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR2BGR555)
+CUDA_TEST_P(CvtColor, BGR2BGR555)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, RGB2BGR555)
+CUDA_TEST_P(CvtColor, RGB2BGR555)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR5552BGR)
+CUDA_TEST_P(CvtColor, BGR5552BGR)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR5552RGB)
+CUDA_TEST_P(CvtColor, BGR5552RGB)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGRA2BGR555)
+CUDA_TEST_P(CvtColor, BGRA2BGR555)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, RGBA2BGR555)
+CUDA_TEST_P(CvtColor, RGBA2BGR555)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR5552BGRA)
+CUDA_TEST_P(CvtColor, BGR5552BGRA)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR5552RGBA)
+CUDA_TEST_P(CvtColor, BGR5552RGBA)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, GRAY2BGR555)
+CUDA_TEST_P(CvtColor, GRAY2BGR555)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR5552GRAY)
+CUDA_TEST_P(CvtColor, BGR5552GRAY)
{
if (depth != CV_8U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-GPU_TEST_P(CvtColor, BGR2XYZ)
+CUDA_TEST_P(CvtColor, BGR2XYZ)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, RGB2XYZ)
+CUDA_TEST_P(CvtColor, RGB2XYZ)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, BGR2XYZ4)
+CUDA_TEST_P(CvtColor, BGR2XYZ4)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
}
-GPU_TEST_P(CvtColor, BGRA2XYZ4)
+CUDA_TEST_P(CvtColor, BGRA2XYZ4)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2BGRA);
EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
}
-GPU_TEST_P(CvtColor, XYZ2BGR)
+CUDA_TEST_P(CvtColor, XYZ2BGR)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, XYZ2RGB)
+CUDA_TEST_P(CvtColor, XYZ2RGB)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, XYZ42BGR)
+CUDA_TEST_P(CvtColor, XYZ42BGR)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, XYZ42BGRA)
+CUDA_TEST_P(CvtColor, XYZ42BGRA)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2XYZ);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, BGR2YCrCb)
+CUDA_TEST_P(CvtColor, BGR2YCrCb)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, RGB2YCrCb)
+CUDA_TEST_P(CvtColor, RGB2YCrCb)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, BGR2YCrCb4)
+CUDA_TEST_P(CvtColor, BGR2YCrCb4)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
}
-GPU_TEST_P(CvtColor, RGBA2YCrCb4)
+CUDA_TEST_P(CvtColor, RGBA2YCrCb4)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
}
-GPU_TEST_P(CvtColor, YCrCb2BGR)
+CUDA_TEST_P(CvtColor, YCrCb2BGR)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, YCrCb2RGB)
+CUDA_TEST_P(CvtColor, YCrCb2RGB)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, YCrCb42RGB)
+CUDA_TEST_P(CvtColor, YCrCb42RGB)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, YCrCb42RGBA)
+CUDA_TEST_P(CvtColor, YCrCb42RGBA)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2YCrCb);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, BGR2HSV)
+CUDA_TEST_P(CvtColor, BGR2HSV)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGB2HSV)
+CUDA_TEST_P(CvtColor, RGB2HSV)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGB2HSV4)
+CUDA_TEST_P(CvtColor, RGB2HSV4)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGBA2HSV4)
+CUDA_TEST_P(CvtColor, RGBA2HSV4)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, BGR2HLS)
+CUDA_TEST_P(CvtColor, BGR2HLS)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGB2HLS)
+CUDA_TEST_P(CvtColor, RGB2HLS)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGB2HLS4)
+CUDA_TEST_P(CvtColor, RGB2HLS4)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGBA2HLS4)
+CUDA_TEST_P(CvtColor, RGBA2HLS4)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HSV2BGR)
+CUDA_TEST_P(CvtColor, HSV2BGR)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HSV2RGB)
+CUDA_TEST_P(CvtColor, HSV2RGB)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HSV42BGR)
+CUDA_TEST_P(CvtColor, HSV42BGR)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HSV42BGRA)
+CUDA_TEST_P(CvtColor, HSV42BGRA)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HLS2BGR)
+CUDA_TEST_P(CvtColor, HLS2BGR)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HLS2RGB)
+CUDA_TEST_P(CvtColor, HLS2RGB)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HLS42RGB)
+CUDA_TEST_P(CvtColor, HLS42RGB)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HLS42RGBA)
+CUDA_TEST_P(CvtColor, HLS42RGBA)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, BGR2HSV_FULL)
+CUDA_TEST_P(CvtColor, BGR2HSV_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGB2HSV_FULL)
+CUDA_TEST_P(CvtColor, RGB2HSV_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGB2HSV4_FULL)
+CUDA_TEST_P(CvtColor, RGB2HSV4_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGBA2HSV4_FULL)
+CUDA_TEST_P(CvtColor, RGBA2HSV4_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, BGR2HLS_FULL)
+CUDA_TEST_P(CvtColor, BGR2HLS_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGB2HLS_FULL)
+CUDA_TEST_P(CvtColor, RGB2HLS_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGB2HLS4_FULL)
+CUDA_TEST_P(CvtColor, RGB2HLS4_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, RGBA2HLS4_FULL)
+CUDA_TEST_P(CvtColor, RGBA2HLS4_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HSV2BGR_FULL)
+CUDA_TEST_P(CvtColor, HSV2BGR_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HSV2RGB_FULL)
+CUDA_TEST_P(CvtColor, HSV2RGB_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HSV42RGB_FULL)
+CUDA_TEST_P(CvtColor, HSV42RGB_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HSV42RGBA_FULL)
+CUDA_TEST_P(CvtColor, HSV42RGBA_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HLS2BGR_FULL)
+CUDA_TEST_P(CvtColor, HLS2BGR_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HLS2RGB_FULL)
+CUDA_TEST_P(CvtColor, HLS2RGB_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HLS42RGB_FULL)
+CUDA_TEST_P(CvtColor, HLS42RGB_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, HLS42RGBA_FULL)
+CUDA_TEST_P(CvtColor, HLS42RGBA_FULL)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_32F ? 1e-2 : 1);
}
-GPU_TEST_P(CvtColor, BGR2YUV)
+CUDA_TEST_P(CvtColor, BGR2YUV)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, RGB2YUV)
+CUDA_TEST_P(CvtColor, RGB2YUV)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, YUV2BGR)
+CUDA_TEST_P(CvtColor, YUV2BGR)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2YUV);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, YUV42BGR)
+CUDA_TEST_P(CvtColor, YUV42BGR)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2YUV);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, YUV42BGRA)
+CUDA_TEST_P(CvtColor, YUV42BGRA)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2YUV);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, YUV2RGB)
+CUDA_TEST_P(CvtColor, YUV2RGB)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_RGB2YUV);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-GPU_TEST_P(CvtColor, BGR2YUV4)
+CUDA_TEST_P(CvtColor, BGR2YUV4)
{
cv::Mat src = img;
EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
}
-GPU_TEST_P(CvtColor, RGBA2YUV4)
+CUDA_TEST_P(CvtColor, RGBA2YUV4)
{
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
}
-GPU_TEST_P(CvtColor, BGR2Lab)
+CUDA_TEST_P(CvtColor, BGR2Lab)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, RGB2Lab)
+CUDA_TEST_P(CvtColor, RGB2Lab)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, BGRA2Lab4)
+CUDA_TEST_P(CvtColor, BGRA2Lab4)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, LBGR2Lab)
+CUDA_TEST_P(CvtColor, LBGR2Lab)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, LRGB2Lab)
+CUDA_TEST_P(CvtColor, LRGB2Lab)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, LBGRA2Lab4)
+CUDA_TEST_P(CvtColor, LBGRA2Lab4)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, Lab2BGR)
+CUDA_TEST_P(CvtColor, Lab2BGR)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
}
-GPU_TEST_P(CvtColor, Lab2RGB)
+CUDA_TEST_P(CvtColor, Lab2RGB)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
}
-GPU_TEST_P(CvtColor, Lab2BGRA)
+CUDA_TEST_P(CvtColor, Lab2BGRA)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
}
-GPU_TEST_P(CvtColor, Lab2LBGR)
+CUDA_TEST_P(CvtColor, Lab2LBGR)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
}
-GPU_TEST_P(CvtColor, Lab2LRGB)
+CUDA_TEST_P(CvtColor, Lab2LRGB)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
}
-GPU_TEST_P(CvtColor, Lab2LRGBA)
+CUDA_TEST_P(CvtColor, Lab2LRGBA)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
}
-GPU_TEST_P(CvtColor, BGR2Luv)
+CUDA_TEST_P(CvtColor, BGR2Luv)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, RGB2Luv)
+CUDA_TEST_P(CvtColor, RGB2Luv)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, BGRA2Luv4)
+CUDA_TEST_P(CvtColor, BGRA2Luv4)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, LBGR2Luv)
+CUDA_TEST_P(CvtColor, LBGR2Luv)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, LRGB2Luv)
+CUDA_TEST_P(CvtColor, LRGB2Luv)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, LBGRA2Luv4)
+CUDA_TEST_P(CvtColor, LBGRA2Luv4)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
}
-GPU_TEST_P(CvtColor, Luv2BGR)
+CUDA_TEST_P(CvtColor, Luv2BGR)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
}
-GPU_TEST_P(CvtColor, Luv2RGB)
+CUDA_TEST_P(CvtColor, Luv2RGB)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
}
-GPU_TEST_P(CvtColor, Luv2BGRA)
+CUDA_TEST_P(CvtColor, Luv2BGRA)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
}
-GPU_TEST_P(CvtColor, Luv2LBGR)
+CUDA_TEST_P(CvtColor, Luv2LBGR)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
}
-GPU_TEST_P(CvtColor, Luv2LRGB)
+CUDA_TEST_P(CvtColor, Luv2LRGB)
{
if (depth == CV_16U)
return;
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
}
-GPU_TEST_P(CvtColor, Luv2LRGBA)
+CUDA_TEST_P(CvtColor, Luv2LRGBA)
{
if (depth == CV_16U)
return;
#if defined (CUDA_VERSION) && (CUDA_VERSION >= 5000)
-GPU_TEST_P(CvtColor, RGBA2mRGBA)
+CUDA_TEST_P(CvtColor, RGBA2mRGBA)
{
if (depth != CV_8U)
return;
#endif // defined (CUDA_VERSION) && (CUDA_VERSION >= 5000)
-GPU_TEST_P(CvtColor, BayerBG2BGR)
+CUDA_TEST_P(CvtColor, BayerBG2BGR)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
-GPU_TEST_P(CvtColor, BayerBG2BGR4)
+CUDA_TEST_P(CvtColor, BayerBG2BGR4)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
-GPU_TEST_P(CvtColor, BayerGB2BGR)
+CUDA_TEST_P(CvtColor, BayerGB2BGR)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
-GPU_TEST_P(CvtColor, BayerGB2BGR4)
+CUDA_TEST_P(CvtColor, BayerGB2BGR4)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
-GPU_TEST_P(CvtColor, BayerRG2BGR)
+CUDA_TEST_P(CvtColor, BayerRG2BGR)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
-GPU_TEST_P(CvtColor, BayerRG2BGR4)
+CUDA_TEST_P(CvtColor, BayerRG2BGR4)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
-GPU_TEST_P(CvtColor, BayerGR2BGR)
+CUDA_TEST_P(CvtColor, BayerGR2BGR)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
-GPU_TEST_P(CvtColor, BayerGR2BGR4)
+CUDA_TEST_P(CvtColor, BayerGR2BGR4)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst3(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 0);
}
-GPU_TEST_P(CvtColor, BayerBG2Gray)
+CUDA_TEST_P(CvtColor, BayerBG2Gray)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
}
-GPU_TEST_P(CvtColor, BayerGB2Gray)
+CUDA_TEST_P(CvtColor, BayerGB2Gray)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
}
-GPU_TEST_P(CvtColor, BayerRG2Gray)
+CUDA_TEST_P(CvtColor, BayerRG2Gray)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
}
-GPU_TEST_P(CvtColor, BayerGR2Gray)
+CUDA_TEST_P(CvtColor, BayerGR2Gray)
{
if ((depth != CV_8U && depth != CV_16U) || useRoi)
return;
EXPECT_MAT_NEAR(dst_gold(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), dst(cv::Rect(1, 1, dst.cols - 2, dst.rows - 2)), 2);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CvtColor, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, CvtColor, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32F)),
}
};
-GPU_TEST_P(Demosaicing, BayerBG2BGR)
+CUDA_TEST_P(Demosaicing, BayerBG2BGR)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
EXPECT_MAT_SIMILAR(img, dst, 2e-2);
}
-GPU_TEST_P(Demosaicing, BayerGB2BGR)
+CUDA_TEST_P(Demosaicing, BayerGB2BGR)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
EXPECT_MAT_SIMILAR(img, dst, 2e-2);
}
-GPU_TEST_P(Demosaicing, BayerRG2BGR)
+CUDA_TEST_P(Demosaicing, BayerRG2BGR)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
EXPECT_MAT_SIMILAR(img, dst, 2e-2);
}
-GPU_TEST_P(Demosaicing, BayerGR2BGR)
+CUDA_TEST_P(Demosaicing, BayerGR2BGR)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
EXPECT_MAT_SIMILAR(img, dst, 2e-2);
}
-GPU_TEST_P(Demosaicing, BayerBG2BGR_MHT)
+CUDA_TEST_P(Demosaicing, BayerBG2BGR_MHT)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
EXPECT_MAT_SIMILAR(img, dst, 5e-3);
}
-GPU_TEST_P(Demosaicing, BayerGB2BGR_MHT)
+CUDA_TEST_P(Demosaicing, BayerGB2BGR_MHT)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
EXPECT_MAT_SIMILAR(img, dst, 5e-3);
}
-GPU_TEST_P(Demosaicing, BayerRG2BGR_MHT)
+CUDA_TEST_P(Demosaicing, BayerRG2BGR_MHT)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
EXPECT_MAT_SIMILAR(img, dst, 5e-3);
}
-GPU_TEST_P(Demosaicing, BayerGR2BGR_MHT)
+CUDA_TEST_P(Demosaicing, BayerGR2BGR_MHT)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
EXPECT_MAT_SIMILAR(img, dst, 5e-3);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Demosaicing, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, Demosaicing, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// swapChannels
}
};
-GPU_TEST_P(SwapChannels, Accuracy)
+CUDA_TEST_P(SwapChannels, Accuracy)
{
cv::Mat src = readImageType("stereobm/aloe-L.png", CV_8UC4);
ASSERT_FALSE(src.empty());
EXPECT_MAT_NEAR(dst_gold, d_src, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, SwapChannels, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, SwapChannels, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
}
};
-GPU_TEST_P(CornerHarris, Accuracy)
+CUDA_TEST_P(CornerHarris, Accuracy)
{
cv::Mat src = readImageType("stereobm/aloe-L.png", type);
ASSERT_FALSE(src.empty());
EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerHarris, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, CornerHarris, testing::Combine(
ALL_DEVICES,
testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),
testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),
}
};
-GPU_TEST_P(CornerMinEigen, Accuracy)
+CUDA_TEST_P(CornerMinEigen, Accuracy)
{
cv::Mat src = readImageType("stereobm/aloe-L.png", type);
ASSERT_FALSE(src.empty());
EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerMinEigen, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, CornerMinEigen, testing::Combine(
ALL_DEVICES,
testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),
testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),
}
};
-GPU_TEST_P(GoodFeaturesToTrack, Accuracy)
+CUDA_TEST_P(GoodFeaturesToTrack, Accuracy)
{
cv::Mat image = readImage("opticalflow/frame0.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
ASSERT_LE(bad_ratio, 0.01);
}
-GPU_TEST_P(GoodFeaturesToTrack, EmptyCorners)
+CUDA_TEST_P(GoodFeaturesToTrack, EmptyCorners)
{
int maxCorners = 1000;
double qualityLevel = 0.01;
ASSERT_TRUE(corners.empty());
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, GoodFeaturesToTrack, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, GoodFeaturesToTrack, testing::Combine(
ALL_DEVICES,
testing::Values(MinDistance(0.0), MinDistance(3.0))));
{
cv::cuda::DeviceInfo devInfo;
- cv::Size size;
-
virtual void SetUp()
{
devInfo = GET_PARAM(0);
}
};
-GPU_TEST_P(HistEven, Accuracy)
+CUDA_TEST_P(HistEven, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HistEven, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, HistEven, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES));
}
};
-GPU_TEST_P(CalcHist, Accuracy)
+CUDA_TEST_P(CalcHist, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CalcHist, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, CalcHist, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES));
}
};
-GPU_TEST_P(EqualizeHist, Accuracy)
+CUDA_TEST_P(EqualizeHist, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
EXPECT_MAT_NEAR(dst_gold, dst, 3.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, EqualizeHist, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, EqualizeHist, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES));
}
};
-GPU_TEST_P(CLAHE, Accuracy)
+CUDA_TEST_P(CLAHE, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
ASSERT_MAT_NEAR(dst_gold, dst, 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CLAHE, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, CLAHE, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(0.0, 40.0)));
}
};
-GPU_TEST_P(HoughLines, Accuracy)
+CUDA_TEST_P(HoughLines, Accuracy)
{
const cv::cuda::DeviceInfo devInfo = GET_PARAM(0);
cv::cuda::setDevice(devInfo.deviceID());
ASSERT_MAT_NEAR(src, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HoughLines, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, HoughLines, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
}
};
-GPU_TEST_P(HoughCircles, Accuracy)
+CUDA_TEST_P(HoughCircles, Accuracy)
{
const cv::cuda::DeviceInfo devInfo = GET_PARAM(0);
cv::cuda::setDevice(devInfo.deviceID());
}
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HoughCircles, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, HoughCircles, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
WHOLE_SUBMAT));
{
};
-GPU_TEST_P(GeneralizedHough, Ballard)
+CUDA_TEST_P(GeneralizedHough, Ballard)
{
const cv::cuda::DeviceInfo devInfo = GET_PARAM(0);
cv::cuda::setDevice(devInfo.deviceID());
}
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, GeneralizedHough, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, GeneralizedHough, testing::Combine(
ALL_DEVICES,
WHOLE_SUBMAT));
#include "test_precomp.hpp"
-CV_GPU_TEST_MAIN("gpu")
+CV_CUDA_TEST_MAIN("gpu")
}
};
-GPU_TEST_P(MatchTemplate8U, Accuracy)
+CUDA_TEST_P(MatchTemplate8U, Accuracy)
{
cv::Mat image = randomMat(size, CV_MAKETYPE(CV_8U, cn));
cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_8U, cn));
EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate8U, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, MatchTemplate8U, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),
}
};
-GPU_TEST_P(MatchTemplate32F, Regression)
+CUDA_TEST_P(MatchTemplate32F, Regression)
{
cv::Mat image = randomMat(size, CV_MAKETYPE(CV_32F, cn));
cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_32F, cn));
EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate32F, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, MatchTemplate32F, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),
}
};
-GPU_TEST_P(MatchTemplateBlackSource, Accuracy)
+CUDA_TEST_P(MatchTemplateBlackSource, Accuracy)
{
cv::Mat image = readImage("matchtemplate/black.png");
ASSERT_FALSE(image.empty());
ASSERT_EQ(maxLocGold, maxLoc);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplateBlackSource, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, MatchTemplateBlackSource, testing::Combine(
ALL_DEVICES,
testing::Values(TemplateMethod(cv::TM_CCOEFF_NORMED), TemplateMethod(cv::TM_CCORR_NORMED))));
}
};
-GPU_TEST_P(MatchTemplate_CCOEF_NORMED, Accuracy)
+CUDA_TEST_P(MatchTemplate_CCOEF_NORMED, Accuracy)
{
cv::Mat image = readImage(imageName);
ASSERT_FALSE(image.empty());
ASSERT_GE(minVal, -1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CCOEF_NORMED, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, MatchTemplate_CCOEF_NORMED, testing::Combine(
ALL_DEVICES,
testing::Values(std::make_pair(std::string("matchtemplate/source-0.png"), std::string("matchtemplate/target-0.png")))));
}
};
-GPU_TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF_NORMED)
+CUDA_TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF_NORMED)
{
cv::Mat scene = readImage("matchtemplate/scene.png");
ASSERT_FALSE(scene.empty());
ASSERT_EQ(0, minLoc.y);
}
-GPU_TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF)
+CUDA_TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF)
{
cv::Mat scene = readImage("matchtemplate/scene.png");
ASSERT_FALSE(scene.empty());
ASSERT_EQ(0, minLoc.y);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CanFindBigTemplate, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, MatchTemplate_CanFindBigTemplate, ALL_DEVICES);
#endif // HAVE_CUDA
}
};
-GPU_TEST_P(MeanShift, Filtering)
+CUDA_TEST_P(MeanShift, Filtering)
{
cv::Mat img_template;
if (supportFeature(devInfo, cv::cuda::FEATURE_SET_COMPUTE_20))
EXPECT_MAT_NEAR(img_template, result, 0.0);
}
-GPU_TEST_P(MeanShift, Proc)
+CUDA_TEST_P(MeanShift, Proc)
{
cv::FileStorage fs;
if (supportFeature(devInfo, cv::cuda::FEATURE_SET_COMPUTE_20))
EXPECT_MAT_NEAR(spmap_template, spmap, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShift, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, MeanShift, ALL_DEVICES);
////////////////////////////////////////////////////////////////////////////////
// MeanShiftSegmentation
}
};
-GPU_TEST_P(MeanShiftSegmentation, Regression)
+CUDA_TEST_P(MeanShiftSegmentation, Regression)
{
cv::Mat img = readImageType("meanshift/cones.png", CV_8UC4);
ASSERT_FALSE(img.empty());
EXPECT_MAT_SIMILAR(dst_gold, dst_rgb, 1e-3);
}
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShiftSegmentation, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_ImgProc, MeanShiftSegmentation, testing::Combine(
ALL_DEVICES,
testing::Values(MinSize(0), MinSize(4), MinSize(20), MinSize(84), MinSize(340), MinSize(1364))));
struct NPPST : NVidiaTest {};
struct NCV : NVidiaTest {};
-GPU_TEST_P(NPPST, Integral)
+CUDA_TEST_P(NPPST, Integral)
{
bool res = nvidia_NPPST_Integral_Image(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
-GPU_TEST_P(NPPST, SquaredIntegral)
+CUDA_TEST_P(NPPST, SquaredIntegral)
{
bool res = nvidia_NPPST_Squared_Integral_Image(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
-GPU_TEST_P(NPPST, RectStdDev)
+CUDA_TEST_P(NPPST, RectStdDev)
{
bool res = nvidia_NPPST_RectStdDev(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
-GPU_TEST_P(NPPST, Resize)
+CUDA_TEST_P(NPPST, Resize)
{
bool res = nvidia_NPPST_Resize(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
-GPU_TEST_P(NPPST, VectorOperations)
+CUDA_TEST_P(NPPST, VectorOperations)
{
bool res = nvidia_NPPST_Vector_Operations(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
-GPU_TEST_P(NPPST, Transpose)
+CUDA_TEST_P(NPPST, Transpose)
{
bool res = nvidia_NPPST_Transpose(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
-GPU_TEST_P(NCV, VectorOperations)
+CUDA_TEST_P(NCV, VectorOperations)
{
bool res = nvidia_NCV_Vector_Operations(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
-GPU_TEST_P(NCV, HaarCascadeLoader)
+CUDA_TEST_P(NCV, HaarCascadeLoader)
{
bool res = nvidia_NCV_Haar_Cascade_Loader(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
-GPU_TEST_P(NCV, HaarCascadeApplication)
+CUDA_TEST_P(NCV, HaarCascadeApplication)
{
bool res = nvidia_NCV_Haar_Cascade_Application(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
-GPU_TEST_P(NCV, HypothesesFiltration)
+CUDA_TEST_P(NCV, HypothesesFiltration)
{
bool res = nvidia_NCV_Hypotheses_Filtration(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
-GPU_TEST_P(NCV, Visualization)
+CUDA_TEST_P(NCV, Visualization)
{
bool res = nvidia_NCV_Visualization(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
-INSTANTIATE_TEST_CASE_P(GPU_Legacy, NPPST, ALL_DEVICES);
-INSTANTIATE_TEST_CASE_P(GPU_Legacy, NCV, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_Legacy, NPPST, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_Legacy, NCV, ALL_DEVICES);
#endif // HAVE_CUDA
// see reference:
// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
-class CV_EXPORTS OpticalFlowDual_TVL1_GPU
+class CV_EXPORTS OpticalFlowDual_TVL1_CUDA
{
public:
- OpticalFlowDual_TVL1_GPU();
+ OpticalFlowDual_TVL1_CUDA();
void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy);
frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0);
frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
TEST_CYCLE() cv::cuda::interpolateFrames(d_frame0, d_frame1, d_fu, d_fv, d_bu, d_bv, 0.5f, newFrame, d_buf);
- GPU_SANITY_CHECK(newFrame, 1e-4);
+ CUDA_SANITY_CHECK(newFrame, 1e-4);
}
else
{
frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0);
frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
TEST_CYCLE() cv::cuda::createOpticalFlowNeedleMap(u, v, vertex, colors);
- GPU_SANITY_CHECK(vertex, 1e-6);
- GPU_SANITY_CHECK(colors);
+ CUDA_SANITY_CHECK(vertex, 1e-6);
+ CUDA_SANITY_CHECK(colors);
}
else
{
frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0);
frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
TEST_CYCLE() d_flow(d_frame0, d_frame1, u, v);
- GPU_SANITY_CHECK(u, 1e-1);
- GPU_SANITY_CHECK(v, 1e-1);
+ CUDA_SANITY_CHECK(u, 1e-1);
+ CUDA_SANITY_CHECK(v, 1e-1);
}
else
{
cv::Mat pts;
cv::goodFeaturesToTrack(gray_frame, pts, points, 0.01, 0.0);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_pts(pts.reshape(2, 1));
TEST_CYCLE() d_pyrLK.sparse(d_frame0, d_frame1, d_pts, nextPts, status);
- GPU_SANITY_CHECK(nextPts);
- GPU_SANITY_CHECK(status);
+ CUDA_SANITY_CHECK(nextPts);
+ CUDA_SANITY_CHECK(status);
}
else
{
const cv::Mat frame1 = readImage(imagePair.second, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
TEST_CYCLE() d_pyrLK.dense(d_frame0, d_frame1, u, v);
- GPU_SANITY_CHECK(u);
- GPU_SANITY_CHECK(v);
+ CUDA_SANITY_CHECK(u);
+ CUDA_SANITY_CHECK(v);
}
else
{
const double polySigma = 1.1;
const int flags = 0;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
TEST_CYCLE() d_farneback(d_frame0, d_frame1, u, v);
- GPU_SANITY_CHECK(u, 1e-4);
- GPU_SANITY_CHECK(v, 1e-4);
+ CUDA_SANITY_CHECK(u, 1e-4);
+ CUDA_SANITY_CHECK(v, 1e-4);
}
else
{
const cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
cv::cuda::GpuMat u;
cv::cuda::GpuMat v;
- cv::cuda::OpticalFlowDual_TVL1_GPU d_alg;
+ cv::cuda::OpticalFlowDual_TVL1_CUDA d_alg;
TEST_CYCLE() d_alg(d_frame0, d_frame1, u, v);
- GPU_SANITY_CHECK(u, 1e-2);
- GPU_SANITY_CHECK(v, 1e-2);
+ CUDA_SANITY_CHECK(u, 1e-2);
+ CUDA_SANITY_CHECK(v, 1e-2);
}
else
{
const cv::Size shift_size(1, 1);
const cv::Size max_range(16, 16);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
TEST_CYCLE() cv::cuda::calcOpticalFlowBM(d_frame0, d_frame1, block_size, shift_size, max_range, false, u, v, buf);
- GPU_SANITY_CHECK(u);
- GPU_SANITY_CHECK(v);
+ CUDA_SANITY_CHECK(u);
+ CUDA_SANITY_CHECK(v);
}
else
{
const cv::Size shift_size(1, 1);
const cv::Size max_range(16, 16);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
TEST_CYCLE() fastBM(d_frame0, d_frame1, u, v, max_range.width, block_size.width);
- GPU_SANITY_CHECK(u, 2);
- GPU_SANITY_CHECK(v, 2);
+ CUDA_SANITY_CHECK(u, 2);
+ CUDA_SANITY_CHECK(v, 2);
}
else
{
#define S(x) StreamAccessor::getStream(x)
-// GPU resize() is fast, but it differs from the CPU analog. Disabling this flag
+// CUDA resize() is fast, but it differs from the CPU analog. Disabling this flag
// leads to an inefficient code. It's for debug purposes only.
-#define ENABLE_GPU_RESIZE 1
+#define ENABLE_CUDA_RESIZE 1
using namespace cv;
using namespace cv::cuda;
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
-cv::cuda::OpticalFlowDual_TVL1_GPU::OpticalFlowDual_TVL1_GPU() { throw_no_cuda(); }
-void cv::cuda::OpticalFlowDual_TVL1_GPU::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
-void cv::cuda::OpticalFlowDual_TVL1_GPU::collectGarbage() {}
-void cv::cuda::OpticalFlowDual_TVL1_GPU::procOneScale(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
+cv::cuda::OpticalFlowDual_TVL1_CUDA::OpticalFlowDual_TVL1_CUDA() { throw_no_cuda(); }
+void cv::cuda::OpticalFlowDual_TVL1_CUDA::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
+void cv::cuda::OpticalFlowDual_TVL1_CUDA::collectGarbage() {}
+void cv::cuda::OpticalFlowDual_TVL1_CUDA::procOneScale(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
#else
using namespace cv;
using namespace cv::cuda;
-cv::cuda::OpticalFlowDual_TVL1_GPU::OpticalFlowDual_TVL1_GPU()
+cv::cuda::OpticalFlowDual_TVL1_CUDA::OpticalFlowDual_TVL1_CUDA()
{
tau = 0.25;
lambda = 0.15;
useInitialFlow = false;
}
-void cv::cuda::OpticalFlowDual_TVL1_GPU::operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy)
+void cv::cuda::OpticalFlowDual_TVL1_CUDA::operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy)
{
CV_Assert( I0.type() == CV_8UC1 || I0.type() == CV_32FC1 );
CV_Assert( I0.size() == I1.size() );
void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, float taut);
}
-void cv::cuda::OpticalFlowDual_TVL1_GPU::procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2)
+void cv::cuda::OpticalFlowDual_TVL1_CUDA::procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2)
{
using namespace tvl1flow;
}
}
-void cv::cuda::OpticalFlowDual_TVL1_GPU::collectGarbage()
+void cv::cuda::OpticalFlowDual_TVL1_CUDA::collectGarbage()
{
I0s.clear();
I1s.clear();
#include "test_precomp.hpp"
-CV_GPU_TEST_MAIN("gpu")
+CV_CUDA_TEST_MAIN("gpu")
}
};
-GPU_TEST_P(BroxOpticalFlow, Regression)
+CUDA_TEST_P(BroxOpticalFlow, Regression)
{
cv::Mat frame0 = readImageType("opticalflow/frame0.png", CV_32FC1);
ASSERT_FALSE(frame0.empty());
#endif
}
-GPU_TEST_P(BroxOpticalFlow, OpticalFlowNan)
+CUDA_TEST_P(BroxOpticalFlow, OpticalFlowNan)
{
cv::Mat frame0 = readImageType("opticalflow/frame0.png", CV_32FC1);
ASSERT_FALSE(frame0.empty());
EXPECT_TRUE(cv::checkRange(h_v));
};
-INSTANTIATE_TEST_CASE_P(GPU_OptFlow, BroxOpticalFlow, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, BroxOpticalFlow, ALL_DEVICES);
//////////////////////////////////////////////////////
// PyrLKOpticalFlow
}
};
-GPU_TEST_P(PyrLKOpticalFlow, Sparse)
+CUDA_TEST_P(PyrLKOpticalFlow, Sparse)
{
cv::Mat frame0 = readImage("opticalflow/frame0.png", useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
ASSERT_FALSE(frame0.empty());
ASSERT_LE(bad_ratio, 0.01);
}
-INSTANTIATE_TEST_CASE_P(GPU_OptFlow, PyrLKOpticalFlow, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, PyrLKOpticalFlow, testing::Combine(
ALL_DEVICES,
testing::Values(UseGray(true), UseGray(false))));
}
};
-GPU_TEST_P(FarnebackOpticalFlow, Accuracy)
+CUDA_TEST_P(FarnebackOpticalFlow, Accuracy)
{
cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
EXPECT_MAT_SIMILAR(flowxy[1], d_flowy, 0.1);
}
-INSTANTIATE_TEST_CASE_P(GPU_OptFlow, FarnebackOpticalFlow, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, FarnebackOpticalFlow, testing::Combine(
ALL_DEVICES,
testing::Values(PyrScale(0.3), PyrScale(0.5), PyrScale(0.8)),
testing::Values(PolyN(5), PolyN(7)),
}
};
-GPU_TEST_P(OpticalFlowDual_TVL1, Accuracy)
+CUDA_TEST_P(OpticalFlowDual_TVL1, Accuracy)
{
cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
- cv::cuda::OpticalFlowDual_TVL1_GPU d_alg;
+ cv::cuda::OpticalFlowDual_TVL1_CUDA d_alg;
cv::cuda::GpuMat d_flowx = createMat(frame0.size(), CV_32FC1, useRoi);
cv::cuda::GpuMat d_flowy = createMat(frame0.size(), CV_32FC1, useRoi);
d_alg(loadMat(frame0, useRoi), loadMat(frame1, useRoi), d_flowx, d_flowy);
EXPECT_MAT_SIMILAR(gold[1], d_flowy, 4e-3);
}
-INSTANTIATE_TEST_CASE_P(GPU_OptFlow, OpticalFlowDual_TVL1, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, OpticalFlowDual_TVL1, testing::Combine(
ALL_DEVICES,
WHOLE_SUBMAT));
{
};
-GPU_TEST_P(OpticalFlowBM, Accuracy)
+CUDA_TEST_P(OpticalFlowBM, Accuracy)
{
cv::cuda::DeviceInfo devInfo = GetParam();
cv::cuda::setDevice(devInfo.deviceID());
EXPECT_MAT_NEAR(vely, d_vely, 0);
}
-INSTANTIATE_TEST_CASE_P(GPU_OptFlow, OpticalFlowBM, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, OpticalFlowBM, ALL_DEVICES);
//////////////////////////////////////////////////////
// FastOpticalFlowBM
{
};
-GPU_TEST_P(FastOpticalFlowBM, Accuracy)
+CUDA_TEST_P(FastOpticalFlowBM, Accuracy)
{
const double MAX_RMSE = 0.6;
EXPECT_LE(err, MAX_RMSE);
}
-INSTANTIATE_TEST_CASE_P(GPU_OptFlow, FastOpticalFlowBM, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, FastOpticalFlowBM, ALL_DEVICES);
#endif // HAVE_CUDA
const int ndisp = 256;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::StereoBM> d_bm = cv::cuda::createStereoBM(ndisp);
TEST_CYCLE() d_bm->compute(d_imgLeft, d_imgRight, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
const int ndisp = 64;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::cuda::StereoBeliefPropagation> d_bp = cv::cuda::createStereoBeliefPropagation(ndisp);
TEST_CYCLE() d_bp->compute(d_imgLeft, d_imgRight, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
const int ndisp = 128;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::cuda::StereoConstantSpaceBP> d_csbp = cv::cuda::createStereoConstantSpaceBP(ndisp);
TEST_CYCLE() d_csbp->compute(d_imgLeft, d_imgRight, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
const int ndisp = 128;
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::Ptr<cv::cuda::DisparityBilateralFilter> d_filter = cv::cuda::createDisparityBilateralFilter(ndisp);
TEST_CYCLE() d_filter->apply(d_disp, d_img, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// ReprojectImageTo3D
PERF_TEST_P(Sz_Depth, ReprojectImageTo3D,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16S)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat Q(4, 4, CV_32FC1);
cv::randu(Q, 0.1, 1.0);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::reprojectImageTo3D(d_src, dst, Q);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// DrawColorDisp
PERF_TEST_P(Sz_Depth, DrawColorDisp,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16S)))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::drawColorDisp(d_src, dst, 255);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
};
const int calles_num = sizeof(callers)/sizeof(callers[0]);
- void stereoBM_GPU(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp, int winsz, const PtrStepSz<unsigned int>& minSSD_buf, cudaStream_t& stream)
+ void stereoBM_CUDA(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp, int winsz, const PtrStepSz<unsigned int>& minSSD_buf, cudaStream_t& stream)
{
int winsz2 = winsz >> 1;
{
namespace stereobm
{
- void stereoBM_GPU(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int ndisp, int winsz, const PtrStepSz<unsigned int>& minSSD_buf, cudaStream_t & stream);
+ void stereoBM_CUDA(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int ndisp, int winsz, const PtrStepSz<unsigned int>& minSSD_buf, cudaStream_t & stream);
void prefilter_xsobel(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap /*= 31*/, cudaStream_t & stream);
void postfilter_textureness(const PtrStepSzb& input, int winsz, float avgTexturenessThreshold, const PtrStepSzb& disp, cudaStream_t & stream);
}
ri_for_bm = riBuf_;
}
- stereoBM_GPU(le_for_bm, ri_for_bm, disparity, ndisp_, winSize_, minSSD_, stream);
+ stereoBM_CUDA(le_for_bm, ri_for_bm, disparity, ndisp_, winSize_, minSSD_, stream);
if (avergeTexThreshold_ > 0)
postfilter_textureness(le_for_bm, winSize_, avergeTexThreshold_, disparity, stream);
#include "test_precomp.hpp"
-CV_GPU_TEST_MAIN("gpu")
+CV_CUDA_TEST_MAIN("gpu")
}
};
-GPU_TEST_P(StereoBM, Regression)
+CUDA_TEST_P(StereoBM, Regression)
{
cv::Mat left_image = readImage("stereobm/aloe-L.png", cv::IMREAD_GRAYSCALE);
cv::Mat right_image = readImage("stereobm/aloe-R.png", cv::IMREAD_GRAYSCALE);
EXPECT_MAT_NEAR(disp_gold, disp, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Stereo, StereoBM, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_Stereo, StereoBM, ALL_DEVICES);
//////////////////////////////////////////////////////////////////////////
// StereoBeliefPropagation
}
};
-GPU_TEST_P(StereoBeliefPropagation, Regression)
+CUDA_TEST_P(StereoBeliefPropagation, Regression)
{
cv::Mat left_image = readImage("stereobp/aloe-L.png");
cv::Mat right_image = readImage("stereobp/aloe-R.png");
EXPECT_MAT_NEAR(disp_gold, h_disp, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Stereo, StereoBeliefPropagation, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_Stereo, StereoBeliefPropagation, ALL_DEVICES);
//////////////////////////////////////////////////////////////////////////
// StereoConstantSpaceBP
}
};
-GPU_TEST_P(StereoConstantSpaceBP, Regression)
+CUDA_TEST_P(StereoConstantSpaceBP, Regression)
{
cv::Mat left_image = readImage("csstereobp/aloe-L.png");
cv::Mat right_image = readImage("csstereobp/aloe-R.png");
EXPECT_MAT_NEAR(disp_gold, h_disp, 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Stereo, StereoConstantSpaceBP, ALL_DEVICES);
+INSTANTIATE_TEST_CASE_P(CUDA_Stereo, StereoConstantSpaceBP, ALL_DEVICES);
////////////////////////////////////////////////////////////////////////////////
// reprojectImageTo3D
}
};
-GPU_TEST_P(ReprojectImageTo3D, Accuracy)
+CUDA_TEST_P(ReprojectImageTo3D, Accuracy)
{
cv::Mat disp = randomMat(size, depth, 5.0, 30.0);
cv::Mat Q = randomMat(cv::Size(4, 4), CV_32FC1, 0.1, 1.0);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
}
-INSTANTIATE_TEST_CASE_P(GPU_Stereo, ReprojectImageTo3D, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Stereo, ReprojectImageTo3D, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatDepth(CV_8U), MatDepth(CV_16S)),
DEF_PARAM_TEST(Sz_Depth_Cn_Inter_Border_Mode, cv::Size, MatDepth, MatCn, Interpolation, BorderMode, RemapMode);
PERF_TEST_P(Sz_Depth_Cn_Inter_Border_Mode, Remap,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
Values(Interpolation(cv::INTER_NEAREST), Interpolation(cv::INTER_LINEAR), Interpolation(cv::INTER_CUBIC)),
ALL_BORDER_MODES,
RemapMode::all()))
cv::Mat ymap(size, CV_32FC1);
generateMap(xmap, ymap, remapMode);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
const cv::cuda::GpuMat d_xmap(xmap);
TEST_CYCLE() cv::cuda::remap(d_src, dst, d_xmap, d_ymap, interpolation, borderMode);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Cn_Inter_Scale, cv::Size, MatDepth, MatCn, Interpolation, double);
PERF_TEST_P(Sz_Depth_Cn_Inter_Scale, Resize,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
Values(Interpolation(cv::INTER_NEAREST), Interpolation(cv::INTER_LINEAR), Interpolation(cv::INTER_CUBIC)),
Values(0.5, 0.3, 2.0)))
{
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::resize(d_src, dst, cv::Size(), f, f, interpolation);
- GPU_SANITY_CHECK(dst, 1e-3, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(dst, 1e-3, ERROR_RELATIVE);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Cn_Scale, cv::Size, MatDepth, MatCn, double);
PERF_TEST_P(Sz_Depth_Cn_Scale, ResizeArea,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
Values(0.2, 0.1, 0.05)))
{
declare.time(1.0);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::resize(d_src, dst, cv::Size(), f, f, interpolation);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Cn_Inter_Border, cv::Size, MatDepth, MatCn, Interpolation, BorderMode);
PERF_TEST_P(Sz_Depth_Cn_Inter_Border, WarpAffine,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
Values(Interpolation(cv::INTER_NEAREST), Interpolation(cv::INTER_LINEAR), Interpolation(cv::INTER_CUBIC)),
ALL_BORDER_MODES))
{
};
const cv::Mat M(2, 3, CV_64F, (void*) mat);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::warpAffine(d_src, dst, M, size, interpolation, borderMode);
- GPU_SANITY_CHECK(dst, 1);
+ CUDA_SANITY_CHECK(dst, 1);
}
else
{
// WarpPerspective
PERF_TEST_P(Sz_Depth_Cn_Inter_Border, WarpPerspective,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
Values(Interpolation(cv::INTER_NEAREST), Interpolation(cv::INTER_LINEAR), Interpolation(cv::INTER_CUBIC)),
ALL_BORDER_MODES))
{
{0.0, 0.0, 1.0}};
const cv::Mat M(3, 3, CV_64F, (void*) mat);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::warpPerspective(d_src, dst, M, size, interpolation, borderMode);
- GPU_SANITY_CHECK(dst, 1);
+ CUDA_SANITY_CHECK(dst, 1);
}
else
{
// BuildWarpPlaneMaps
PERF_TEST_P(Sz, BuildWarpPlaneMaps,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
const cv::Mat R = cv::Mat::ones(3, 3, CV_32FC1);
const cv::Mat T = cv::Mat::zeros(1, 3, CV_32F);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat map_x;
cv::cuda::GpuMat map_y;
TEST_CYCLE() cv::cuda::buildWarpPlaneMaps(size, cv::Rect(0, 0, size.width, size.height), K, R, T, 1.0, map_x, map_y);
- GPU_SANITY_CHECK(map_x);
- GPU_SANITY_CHECK(map_y);
+ CUDA_SANITY_CHECK(map_x);
+ CUDA_SANITY_CHECK(map_y);
}
else
{
// BuildWarpCylindricalMaps
PERF_TEST_P(Sz, BuildWarpCylindricalMaps,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
const cv::Mat K = cv::Mat::eye(3, 3, CV_32FC1);
const cv::Mat R = cv::Mat::ones(3, 3, CV_32FC1);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat map_x;
cv::cuda::GpuMat map_y;
TEST_CYCLE() cv::cuda::buildWarpCylindricalMaps(size, cv::Rect(0, 0, size.width, size.height), K, R, 1.0, map_x, map_y);
- GPU_SANITY_CHECK(map_x);
- GPU_SANITY_CHECK(map_y);
+ CUDA_SANITY_CHECK(map_x);
+ CUDA_SANITY_CHECK(map_y);
}
else
{
// BuildWarpSphericalMaps
PERF_TEST_P(Sz, BuildWarpSphericalMaps,
- GPU_TYPICAL_MAT_SIZES)
+ CUDA_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
const cv::Mat K = cv::Mat::eye(3, 3, CV_32FC1);
const cv::Mat R = cv::Mat::ones(3, 3, CV_32FC1);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat map_x;
cv::cuda::GpuMat map_y;
TEST_CYCLE() cv::cuda::buildWarpSphericalMaps(size, cv::Rect(0, 0, size.width, size.height), K, R, 1.0, map_x, map_y);
- GPU_SANITY_CHECK(map_x);
- GPU_SANITY_CHECK(map_y);
+ CUDA_SANITY_CHECK(map_x);
+ CUDA_SANITY_CHECK(map_y);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Cn_Inter, cv::Size, MatDepth, MatCn, Interpolation);
PERF_TEST_P(Sz_Depth_Cn_Inter, Rotate,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4,
+ CUDA_CHANNELS_1_3_4,
Values(Interpolation(cv::INTER_NEAREST), Interpolation(cv::INTER_LINEAR), Interpolation(cv::INTER_CUBIC))))
{
const cv::Size size = GET_PARAM(0);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::rotate(d_src, dst, size, 30.0, 0, 0, interpolation);
- GPU_SANITY_CHECK(dst, 1e-3, ERROR_RELATIVE);
+ CUDA_SANITY_CHECK(dst, 1e-3, ERROR_RELATIVE);
}
else
{
// PyrDown
PERF_TEST_P(Sz_Depth_Cn, PyrDown,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::pyrDown(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// PyrUp
PERF_TEST_P(Sz_Depth_Cn, PyrUp,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::pyrUp(d_src, dst);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
// ImagePyramidGetLayer
PERF_TEST_P(Sz_Depth_Cn, ImagePyramidGetLayer,
- Combine(GPU_TYPICAL_MAT_SIZES,
+ Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
- GPU_CHANNELS_1_3_4))
+ CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const int nLayers = 3;
const cv::Size dstSize(size.width / 2 + 10, size.height / 2 + 10);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() d_pyr->getLayer(dst, dstSize);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
}
};
- #define OPENCV_GPU_IMPLEMENT_REMAP_TEX(type) \
+ #define OPENCV_CUDA_IMPLEMENT_REMAP_TEX(type) \
texture< type , cudaTextureType2D> tex_remap_ ## type (0, cudaFilterModePoint, cudaAddressModeClamp); \
struct tex_remap_ ## type ## _reader \
{ \
} \
};
- OPENCV_GPU_IMPLEMENT_REMAP_TEX(uchar)
- //OPENCV_GPU_IMPLEMENT_REMAP_TEX(uchar2)
- OPENCV_GPU_IMPLEMENT_REMAP_TEX(uchar4)
+ OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar)
+ //OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar2)
+ OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar4)
- //OPENCV_GPU_IMPLEMENT_REMAP_TEX(schar)
- //OPENCV_GPU_IMPLEMENT_REMAP_TEX(char2)
- //OPENCV_GPU_IMPLEMENT_REMAP_TEX(char4)
+ //OPENCV_CUDA_IMPLEMENT_REMAP_TEX(schar)
+ //OPENCV_CUDA_IMPLEMENT_REMAP_TEX(char2)
+ //OPENCV_CUDA_IMPLEMENT_REMAP_TEX(char4)
- OPENCV_GPU_IMPLEMENT_REMAP_TEX(ushort)
- //OPENCV_GPU_IMPLEMENT_REMAP_TEX(ushort2)
- OPENCV_GPU_IMPLEMENT_REMAP_TEX(ushort4)
+ OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort)
+ //OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort2)
+ OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort4)
- OPENCV_GPU_IMPLEMENT_REMAP_TEX(short)
- //OPENCV_GPU_IMPLEMENT_REMAP_TEX(short2)
- OPENCV_GPU_IMPLEMENT_REMAP_TEX(short4)
+ OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short)
+ //OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short2)
+ OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short4)
- //OPENCV_GPU_IMPLEMENT_REMAP_TEX(int)
- //OPENCV_GPU_IMPLEMENT_REMAP_TEX(int2)
- //OPENCV_GPU_IMPLEMENT_REMAP_TEX(int4)
+ //OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int)
+ //OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int2)
+ //OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int4)
- OPENCV_GPU_IMPLEMENT_REMAP_TEX(float)
- //OPENCV_GPU_IMPLEMENT_REMAP_TEX(float2)
- OPENCV_GPU_IMPLEMENT_REMAP_TEX(float4)
+ OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float)
+ //OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float2)
+ OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float4)
- #undef OPENCV_GPU_IMPLEMENT_REMAP_TEX
+ #undef OPENCV_CUDA_IMPLEMENT_REMAP_TEX
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcher
{
template <typename T> struct TextureAccessor;
- #define OPENCV_GPU_IMPLEMENT_RESIZE_TEX(type) \
+ #define OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(type) \
texture<type, cudaTextureType2D, cudaReadModeElementType> tex_resize_##type (0, cudaFilterModePoint, cudaAddressModeClamp); \
template <> struct TextureAccessor<type> \
{ \
} \
};
- OPENCV_GPU_IMPLEMENT_RESIZE_TEX(uchar)
- OPENCV_GPU_IMPLEMENT_RESIZE_TEX(uchar4)
+ OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(uchar)
+ OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(uchar4)
- OPENCV_GPU_IMPLEMENT_RESIZE_TEX(ushort)
- OPENCV_GPU_IMPLEMENT_RESIZE_TEX(ushort4)
+ OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(ushort)
+ OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(ushort4)
- OPENCV_GPU_IMPLEMENT_RESIZE_TEX(short)
- OPENCV_GPU_IMPLEMENT_RESIZE_TEX(short4)
+ OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(short)
+ OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(short4)
- OPENCV_GPU_IMPLEMENT_RESIZE_TEX(float)
- OPENCV_GPU_IMPLEMENT_RESIZE_TEX(float4)
+ OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(float)
+ OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(float4)
- #undef OPENCV_GPU_IMPLEMENT_RESIZE_TEX
+ #undef OPENCV_CUDA_IMPLEMENT_RESIZE_TEX
template <typename T>
TextureAccessor<T> texAccessor(const PtrStepSz<T>& mat, int yoff, int xoff)
}
};
- #define OPENCV_GPU_IMPLEMENT_WARP_TEX(type) \
+ #define OPENCV_CUDA_IMPLEMENT_WARP_TEX(type) \
texture< type , cudaTextureType2D > tex_warp_ ## type (0, cudaFilterModePoint, cudaAddressModeClamp); \
struct tex_warp_ ## type ## _reader \
{ \
} \
};
- OPENCV_GPU_IMPLEMENT_WARP_TEX(uchar)
- //OPENCV_GPU_IMPLEMENT_WARP_TEX(uchar2)
- OPENCV_GPU_IMPLEMENT_WARP_TEX(uchar4)
+ OPENCV_CUDA_IMPLEMENT_WARP_TEX(uchar)
+ //OPENCV_CUDA_IMPLEMENT_WARP_TEX(uchar2)
+ OPENCV_CUDA_IMPLEMENT_WARP_TEX(uchar4)
- //OPENCV_GPU_IMPLEMENT_WARP_TEX(schar)
- //OPENCV_GPU_IMPLEMENT_WARP_TEX(char2)
- //OPENCV_GPU_IMPLEMENT_WARP_TEX(char4)
+ //OPENCV_CUDA_IMPLEMENT_WARP_TEX(schar)
+ //OPENCV_CUDA_IMPLEMENT_WARP_TEX(char2)
+ //OPENCV_CUDA_IMPLEMENT_WARP_TEX(char4)
- OPENCV_GPU_IMPLEMENT_WARP_TEX(ushort)
- //OPENCV_GPU_IMPLEMENT_WARP_TEX(ushort2)
- OPENCV_GPU_IMPLEMENT_WARP_TEX(ushort4)
+ OPENCV_CUDA_IMPLEMENT_WARP_TEX(ushort)
+ //OPENCV_CUDA_IMPLEMENT_WARP_TEX(ushort2)
+ OPENCV_CUDA_IMPLEMENT_WARP_TEX(ushort4)
- OPENCV_GPU_IMPLEMENT_WARP_TEX(short)
- //OPENCV_GPU_IMPLEMENT_WARP_TEX(short2)
- OPENCV_GPU_IMPLEMENT_WARP_TEX(short4)
+ OPENCV_CUDA_IMPLEMENT_WARP_TEX(short)
+ //OPENCV_CUDA_IMPLEMENT_WARP_TEX(short2)
+ OPENCV_CUDA_IMPLEMENT_WARP_TEX(short4)
- //OPENCV_GPU_IMPLEMENT_WARP_TEX(int)
- //OPENCV_GPU_IMPLEMENT_WARP_TEX(int2)
- //OPENCV_GPU_IMPLEMENT_WARP_TEX(int4)
+ //OPENCV_CUDA_IMPLEMENT_WARP_TEX(int)
+ //OPENCV_CUDA_IMPLEMENT_WARP_TEX(int2)
+ //OPENCV_CUDA_IMPLEMENT_WARP_TEX(int4)
- OPENCV_GPU_IMPLEMENT_WARP_TEX(float)
- //OPENCV_GPU_IMPLEMENT_WARP_TEX(float2)
- OPENCV_GPU_IMPLEMENT_WARP_TEX(float4)
+ OPENCV_CUDA_IMPLEMENT_WARP_TEX(float)
+ //OPENCV_CUDA_IMPLEMENT_WARP_TEX(float2)
+ OPENCV_CUDA_IMPLEMENT_WARP_TEX(float4)
- #undef OPENCV_GPU_IMPLEMENT_WARP_TEX
+ #undef OPENCV_CUDA_IMPLEMENT_WARP_TEX
template <class Transform, template <typename> class Filter, template <typename> class B, typename T> struct WarpDispatcher
{
#include "test_precomp.hpp"
-CV_GPU_TEST_MAIN("gpu")
+CV_CUDA_TEST_MAIN("gpu")
}
};
-GPU_TEST_P(PyrDown, Accuracy)
+CUDA_TEST_P(PyrDown, Accuracy)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-4 : 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Warping, PyrDown, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Warping, PyrDown, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_16UC1), MatType(CV_16UC3), MatType(CV_16UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
}
};
-GPU_TEST_P(PyrUp, Accuracy)
+CUDA_TEST_P(PyrUp, Accuracy)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-4 : 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Warping, PyrUp, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Warping, PyrUp, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_16UC1), MatType(CV_16UC3), MatType(CV_16UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
}
};
-GPU_TEST_P(Remap, Accuracy)
+CUDA_TEST_P(Remap, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Scalar val = randomScalar(0.0, 255.0);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-3 : 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Warping, Remap, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Warping, Remap, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
}
};
-GPU_TEST_P(Resize, Accuracy)
+CUDA_TEST_P(Resize, Accuracy)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-2 : 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Warping, Resize, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Warping, Resize, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_16UC1), MatType(CV_16UC3), MatType(CV_16UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
};
// downscaling only: used for classifiers
-GPU_TEST_P(ResizeSameAsHost, Accuracy)
+CUDA_TEST_P(ResizeSameAsHost, Accuracy)
{
cv::Mat src = randomMat(size, type);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-2 : 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Warping, ResizeSameAsHost, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Warping, ResizeSameAsHost, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_16UC1), MatType(CV_16UC3), MatType(CV_16UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
}
};
-GPU_TEST_P(BuildWarpAffineMaps, Accuracy)
+CUDA_TEST_P(BuildWarpAffineMaps, Accuracy)
{
cv::Mat M = createTransfomMatrix(size, CV_PI / 4);
cv::Mat src = randomMat(randomSize(200, 400), CV_8UC1);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Warping, BuildWarpAffineMaps, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Warping, BuildWarpAffineMaps, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DIRECT_INVERSE));
}
};
-GPU_TEST_P(WarpAffine, Accuracy)
+CUDA_TEST_P(WarpAffine, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat M = createTransfomMatrix(size, CV_PI / 3);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-1 : 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Warping, WarpAffine, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Warping, WarpAffine, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_16UC1), MatType(CV_16UC3), MatType(CV_16UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
}
};
-GPU_TEST_P(WarpAffineNPP, Accuracy)
+CUDA_TEST_P(WarpAffineNPP, Accuracy)
{
cv::Mat src = readImageType("stereobp/aloe-L.png", type);
ASSERT_FALSE(src.empty());
EXPECT_MAT_SIMILAR(dst_gold, dst, 2e-2);
}
-INSTANTIATE_TEST_CASE_P(GPU_Warping, WarpAffineNPP, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Warping, WarpAffineNPP, testing::Combine(
ALL_DEVICES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
DIRECT_INVERSE,
}
};
-GPU_TEST_P(BuildWarpPerspectiveMaps, Accuracy)
+CUDA_TEST_P(BuildWarpPerspectiveMaps, Accuracy)
{
cv::Mat M = createTransfomMatrix(size, CV_PI / 4);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Warping, BuildWarpPerspectiveMaps, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Warping, BuildWarpPerspectiveMaps, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
DIRECT_INVERSE));
}
};
-GPU_TEST_P(WarpPerspective, Accuracy)
+CUDA_TEST_P(WarpPerspective, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat M = createTransfomMatrix(size, CV_PI / 3);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-1 : 1.0);
}
-INSTANTIATE_TEST_CASE_P(GPU_Warping, WarpPerspective, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Warping, WarpPerspective, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_16UC1), MatType(CV_16UC3), MatType(CV_16UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
}
};
-GPU_TEST_P(WarpPerspectiveNPP, Accuracy)
+CUDA_TEST_P(WarpPerspectiveNPP, Accuracy)
{
cv::Mat src = readImageType("stereobp/aloe-L.png", type);
ASSERT_FALSE(src.empty());
EXPECT_MAT_SIMILAR(dst_gold, dst, 2e-2);
}
-INSTANTIATE_TEST_CASE_P(GPU_Warping, WarpPerspectiveNPP, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Warping, WarpPerspectiveNPP, testing::Combine(
ALL_DEVICES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
DIRECT_INVERSE,
namespace cv { namespace cuda {
-class CV_EXPORTS SURF_GPU
+class CV_EXPORTS SURF_CUDA
{
public:
enum KeypointLayout
};
//! the default constructor
- SURF_GPU();
+ SURF_CUDA();
//! the full constructor taking all the necessary parameters
- explicit SURF_GPU(double _hessianThreshold, int _nOctaves=4,
+ explicit SURF_CUDA(double _hessianThreshold, int _nOctaves=4,
int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false);
//! returns the descriptor size in float's (64 or 128)
{
namespace ocl
{
- //! Speeded up robust features, port from GPU module.
+ //! Speeded up robust features, port from CUDA module.
////////////////////////////////// SURF //////////////////////////////////////////
class CV_EXPORTS SURF_OCL
DEF_PARAM_TEST_1(Image, string);
-PERF_TEST_P(Image, GPU_SURF,
+PERF_TEST_P(Image, CUDA_SURF,
Values<std::string>("gpu/perf/aloe.png"))
{
declare.time(50.0);
const cv::Mat img = readImage(GetParam(), cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
- cv::cuda::SURF_GPU d_surf;
+ cv::cuda::SURF_CUDA d_surf;
const cv::cuda::GpuMat d_img(img);
cv::cuda::GpuMat d_keypoints, d_descriptors;
#if !defined (HAVE_CUDA) || !defined (HAVE_OPENCV_CUDAARITHM)
-cv::cuda::SURF_GPU::SURF_GPU() { throw_no_cuda(); }
-cv::cuda::SURF_GPU::SURF_GPU(double, int, int, bool, float, bool) { throw_no_cuda(); }
-int cv::cuda::SURF_GPU::descriptorSize() const { throw_no_cuda(); return 0;}
-void cv::cuda::SURF_GPU::uploadKeypoints(const std::vector<KeyPoint>&, GpuMat&) { throw_no_cuda(); }
-void cv::cuda::SURF_GPU::downloadKeypoints(const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
-void cv::cuda::SURF_GPU::downloadDescriptors(const GpuMat&, std::vector<float>&) { throw_no_cuda(); }
-void cv::cuda::SURF_GPU::operator()(const GpuMat&, const GpuMat&, GpuMat&) { throw_no_cuda(); }
-void cv::cuda::SURF_GPU::operator()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool) { throw_no_cuda(); }
-void cv::cuda::SURF_GPU::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
-void cv::cuda::SURF_GPU::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, GpuMat&, bool) { throw_no_cuda(); }
-void cv::cuda::SURF_GPU::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, std::vector<float>&, bool) { throw_no_cuda(); }
-void cv::cuda::SURF_GPU::releaseMemory() { throw_no_cuda(); }
+cv::cuda::SURF_CUDA::SURF_CUDA() { throw_no_cuda(); }
+cv::cuda::SURF_CUDA::SURF_CUDA(double, int, int, bool, float, bool) { throw_no_cuda(); }
+int cv::cuda::SURF_CUDA::descriptorSize() const { throw_no_cuda(); return 0;}
+void cv::cuda::SURF_CUDA::uploadKeypoints(const std::vector<KeyPoint>&, GpuMat&) { throw_no_cuda(); }
+void cv::cuda::SURF_CUDA::downloadKeypoints(const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
+void cv::cuda::SURF_CUDA::downloadDescriptors(const GpuMat&, std::vector<float>&) { throw_no_cuda(); }
+void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, GpuMat&) { throw_no_cuda(); }
+void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool) { throw_no_cuda(); }
+void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
+void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, GpuMat&, bool) { throw_no_cuda(); }
+void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, std::vector<float>&, bool) { throw_no_cuda(); }
+void cv::cuda::SURF_CUDA::releaseMemory() { throw_no_cuda(); }
#else // !defined (HAVE_CUDA)
return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
}
- class SURF_GPU_Invoker
+ class SURF_CUDA_Invoker
{
public:
- SURF_GPU_Invoker(SURF_GPU& surf, const GpuMat& img, const GpuMat& mask) :
+ SURF_CUDA_Invoker(SURF_CUDA& surf, const GpuMat& img, const GpuMat& mask) :
surf_(surf),
img_cols(img.cols), img_rows(img.rows),
use_mask(!mask.empty())
ensureSizeIsEnough(img_rows * (surf_.nOctaveLayers + 2), img_cols, CV_32FC1, surf_.trace);
ensureSizeIsEnough(1, maxCandidates, CV_32SC4, surf_.maxPosBuffer);
- ensureSizeIsEnough(SURF_GPU::ROWS_COUNT, maxFeatures, CV_32FC1, keypoints);
+ ensureSizeIsEnough(SURF_CUDA::ROWS_COUNT, maxFeatures, CV_32FC1, keypoints);
keypoints.setTo(Scalar::all(0));
for (int octave = 0; octave < surf_.nOctaves; ++octave)
if (maxCounter > 0)
{
icvInterpolateKeypoint_gpu(surf_.det, surf_.maxPosBuffer.ptr<int4>(), maxCounter,
- keypoints.ptr<float>(SURF_GPU::X_ROW), keypoints.ptr<float>(SURF_GPU::Y_ROW),
- keypoints.ptr<int>(SURF_GPU::LAPLACIAN_ROW), keypoints.ptr<int>(SURF_GPU::OCTAVE_ROW),
- keypoints.ptr<float>(SURF_GPU::SIZE_ROW), keypoints.ptr<float>(SURF_GPU::HESSIAN_ROW),
+ keypoints.ptr<float>(SURF_CUDA::X_ROW), keypoints.ptr<float>(SURF_CUDA::Y_ROW),
+ keypoints.ptr<int>(SURF_CUDA::LAPLACIAN_ROW), keypoints.ptr<int>(SURF_CUDA::OCTAVE_ROW),
+ keypoints.ptr<float>(SURF_CUDA::SIZE_ROW), keypoints.ptr<float>(SURF_CUDA::HESSIAN_ROW),
counters.ptr<unsigned int>());
}
}
keypoints.cols = featureCounter;
if (surf_.upright)
- keypoints.row(SURF_GPU::ANGLE_ROW).setTo(Scalar::all(360.0 - 90.0));
+ keypoints.row(SURF_CUDA::ANGLE_ROW).setTo(Scalar::all(360.0 - 90.0));
else
findOrientation(keypoints);
}
const int nFeatures = keypoints.cols;
if (nFeatures > 0)
{
- icvCalcOrientation_gpu(keypoints.ptr<float>(SURF_GPU::X_ROW), keypoints.ptr<float>(SURF_GPU::Y_ROW),
- keypoints.ptr<float>(SURF_GPU::SIZE_ROW), keypoints.ptr<float>(SURF_GPU::ANGLE_ROW), nFeatures);
+ icvCalcOrientation_gpu(keypoints.ptr<float>(SURF_CUDA::X_ROW), keypoints.ptr<float>(SURF_CUDA::Y_ROW),
+ keypoints.ptr<float>(SURF_CUDA::SIZE_ROW), keypoints.ptr<float>(SURF_CUDA::ANGLE_ROW), nFeatures);
}
}
if (nFeatures > 0)
{
ensureSizeIsEnough(nFeatures, descriptorSize, CV_32F, descriptors);
- compute_descriptors_gpu(descriptors, keypoints.ptr<float>(SURF_GPU::X_ROW), keypoints.ptr<float>(SURF_GPU::Y_ROW),
- keypoints.ptr<float>(SURF_GPU::SIZE_ROW), keypoints.ptr<float>(SURF_GPU::ANGLE_ROW), nFeatures);
+ compute_descriptors_gpu(descriptors, keypoints.ptr<float>(SURF_CUDA::X_ROW), keypoints.ptr<float>(SURF_CUDA::Y_ROW),
+ keypoints.ptr<float>(SURF_CUDA::SIZE_ROW), keypoints.ptr<float>(SURF_CUDA::ANGLE_ROW), nFeatures);
}
}
private:
- SURF_GPU_Invoker(const SURF_GPU_Invoker&);
- SURF_GPU_Invoker& operator =(const SURF_GPU_Invoker&);
+ SURF_CUDA_Invoker(const SURF_CUDA_Invoker&);
+ SURF_CUDA_Invoker& operator =(const SURF_CUDA_Invoker&);
- SURF_GPU& surf_;
+ SURF_CUDA& surf_;
int img_cols, img_rows;
};
}
-cv::cuda::SURF_GPU::SURF_GPU()
+cv::cuda::SURF_CUDA::SURF_CUDA()
{
hessianThreshold = 100;
extended = true;
upright = false;
}
-cv::cuda::SURF_GPU::SURF_GPU(double _threshold, int _nOctaves, int _nOctaveLayers, bool _extended, float _keypointsRatio, bool _upright)
+cv::cuda::SURF_CUDA::SURF_CUDA(double _threshold, int _nOctaves, int _nOctaveLayers, bool _extended, float _keypointsRatio, bool _upright)
{
hessianThreshold = _threshold;
extended = _extended;
upright = _upright;
}
-int cv::cuda::SURF_GPU::descriptorSize() const
+int cv::cuda::SURF_CUDA::descriptorSize() const
{
return extended ? 128 : 64;
}
-void cv::cuda::SURF_GPU::uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU)
+void cv::cuda::SURF_CUDA::uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU)
{
if (keypoints.empty())
keypointsGPU.release();
else
{
- Mat keypointsCPU(SURF_GPU::ROWS_COUNT, static_cast<int>(keypoints.size()), CV_32FC1);
+ Mat keypointsCPU(SURF_CUDA::ROWS_COUNT, static_cast<int>(keypoints.size()), CV_32FC1);
- float* kp_x = keypointsCPU.ptr<float>(SURF_GPU::X_ROW);
- float* kp_y = keypointsCPU.ptr<float>(SURF_GPU::Y_ROW);
- int* kp_laplacian = keypointsCPU.ptr<int>(SURF_GPU::LAPLACIAN_ROW);
- int* kp_octave = keypointsCPU.ptr<int>(SURF_GPU::OCTAVE_ROW);
- float* kp_size = keypointsCPU.ptr<float>(SURF_GPU::SIZE_ROW);
- float* kp_dir = keypointsCPU.ptr<float>(SURF_GPU::ANGLE_ROW);
- float* kp_hessian = keypointsCPU.ptr<float>(SURF_GPU::HESSIAN_ROW);
+ float* kp_x = keypointsCPU.ptr<float>(SURF_CUDA::X_ROW);
+ float* kp_y = keypointsCPU.ptr<float>(SURF_CUDA::Y_ROW);
+ int* kp_laplacian = keypointsCPU.ptr<int>(SURF_CUDA::LAPLACIAN_ROW);
+ int* kp_octave = keypointsCPU.ptr<int>(SURF_CUDA::OCTAVE_ROW);
+ float* kp_size = keypointsCPU.ptr<float>(SURF_CUDA::SIZE_ROW);
+ float* kp_dir = keypointsCPU.ptr<float>(SURF_CUDA::ANGLE_ROW);
+ float* kp_hessian = keypointsCPU.ptr<float>(SURF_CUDA::HESSIAN_ROW);
for (size_t i = 0, size = keypoints.size(); i < size; ++i)
{
}
}
-void cv::cuda::SURF_GPU::downloadKeypoints(const GpuMat& keypointsGPU, std::vector<KeyPoint>& keypoints)
+void cv::cuda::SURF_CUDA::downloadKeypoints(const GpuMat& keypointsGPU, std::vector<KeyPoint>& keypoints)
{
const int nFeatures = keypointsGPU.cols;
keypoints.resize(nFeatures);
- float* kp_x = keypointsCPU.ptr<float>(SURF_GPU::X_ROW);
- float* kp_y = keypointsCPU.ptr<float>(SURF_GPU::Y_ROW);
- int* kp_laplacian = keypointsCPU.ptr<int>(SURF_GPU::LAPLACIAN_ROW);
- int* kp_octave = keypointsCPU.ptr<int>(SURF_GPU::OCTAVE_ROW);
- float* kp_size = keypointsCPU.ptr<float>(SURF_GPU::SIZE_ROW);
- float* kp_dir = keypointsCPU.ptr<float>(SURF_GPU::ANGLE_ROW);
- float* kp_hessian = keypointsCPU.ptr<float>(SURF_GPU::HESSIAN_ROW);
+ float* kp_x = keypointsCPU.ptr<float>(SURF_CUDA::X_ROW);
+ float* kp_y = keypointsCPU.ptr<float>(SURF_CUDA::Y_ROW);
+ int* kp_laplacian = keypointsCPU.ptr<int>(SURF_CUDA::LAPLACIAN_ROW);
+ int* kp_octave = keypointsCPU.ptr<int>(SURF_CUDA::OCTAVE_ROW);
+ float* kp_size = keypointsCPU.ptr<float>(SURF_CUDA::SIZE_ROW);
+ float* kp_dir = keypointsCPU.ptr<float>(SURF_CUDA::ANGLE_ROW);
+ float* kp_hessian = keypointsCPU.ptr<float>(SURF_CUDA::HESSIAN_ROW);
for (int i = 0; i < nFeatures; ++i)
{
}
}
-void cv::cuda::SURF_GPU::downloadDescriptors(const GpuMat& descriptorsGPU, std::vector<float>& descriptors)
+void cv::cuda::SURF_CUDA::downloadDescriptors(const GpuMat& descriptorsGPU, std::vector<float>& descriptors)
{
if (descriptorsGPU.empty())
descriptors.clear();
}
}
-void cv::cuda::SURF_GPU::operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints)
+void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints)
{
if (!img.empty())
{
- SURF_GPU_Invoker surf(*this, img, mask);
+ SURF_CUDA_Invoker surf(*this, img, mask);
surf.detectKeypoints(keypoints);
}
}
-void cv::cuda::SURF_GPU::operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,
+void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints)
{
if (!img.empty())
{
- SURF_GPU_Invoker surf(*this, img, mask);
+ SURF_CUDA_Invoker surf(*this, img, mask);
if (!useProvidedKeypoints)
surf.detectKeypoints(keypoints);
}
}
-void cv::cuda::SURF_GPU::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints)
+void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints)
{
GpuMat keypointsGPU;
downloadKeypoints(keypointsGPU, keypoints);
}
-void cv::cuda::SURF_GPU::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints,
+void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints,
GpuMat& descriptors, bool useProvidedKeypoints)
{
GpuMat keypointsGPU;
downloadKeypoints(keypointsGPU, keypoints);
}
-void cv::cuda::SURF_GPU::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints,
+void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints,
std::vector<float>& descriptors, bool useProvidedKeypoints)
{
GpuMat descriptorsGPU;
downloadDescriptors(descriptorsGPU, descriptors);
}
-void cv::cuda::SURF_GPU::releaseMemory()
+void cv::cuda::SURF_CUDA::releaseMemory()
{
sum.release();
mask1.release();
}
};
-GPU_TEST_P(SURF, Detector)
+CUDA_TEST_P(SURF, Detector)
{
cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
- cv::cuda::SURF_GPU surf;
+ cv::cuda::SURF_CUDA surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
EXPECT_GT(matchedRatio, 0.95);
}
-GPU_TEST_P(SURF, Detector_Masked)
+CUDA_TEST_P(SURF, Detector_Masked)
{
cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::Mat mask(image.size(), CV_8UC1, cv::Scalar::all(1));
mask(cv::Range(0, image.rows / 2), cv::Range(0, image.cols / 2)).setTo(cv::Scalar::all(0));
- cv::cuda::SURF_GPU surf;
+ cv::cuda::SURF_CUDA surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
EXPECT_GT(matchedRatio, 0.95);
}
-GPU_TEST_P(SURF, Descriptor)
+CUDA_TEST_P(SURF, Descriptor)
{
cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
- cv::cuda::SURF_GPU surf;
+ cv::cuda::SURF_CUDA surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
EXPECT_GT(matchedRatio, 0.6);
}
-INSTANTIATE_TEST_CASE_P(GPU_Features2D, SURF, testing::Combine(
+INSTANTIATE_TEST_CASE_P(CUDA_Features2D, SURF, testing::Combine(
testing::Values(SURF_HessianThreshold(100.0), SURF_HessianThreshold(500.0), SURF_HessianThreshold(1000.0)),
testing::Values(SURF_Octaves(3), SURF_Octaves(4)),
testing::Values(SURF_OctaveLayers(2), SURF_OctaveLayers(3)),
using namespace testing;
using namespace perf;
-#define GPU_DENOISING_IMAGE_SIZES testing::Values(perf::szVGA, perf::sz720p)
+#define CUDA_DENOISING_IMAGE_SIZES testing::Values(perf::szVGA, perf::sz720p)
//////////////////////////////////////////////////////////////////////
// nonLocalMeans
DEF_PARAM_TEST(Sz_Depth_Cn_WinSz_BlockSz, cv::Size, MatDepth, MatCn, int, int);
-PERF_TEST_P(Sz_Depth_Cn_WinSz_BlockSz, GPU_NonLocalMeans,
- Combine(GPU_DENOISING_IMAGE_SIZES,
+PERF_TEST_P(Sz_Depth_Cn_WinSz_BlockSz, CUDA_NonLocalMeans,
+ Combine(CUDA_DENOISING_IMAGE_SIZES,
Values<MatDepth>(CV_8U),
- GPU_CHANNELS_1_3,
+ CUDA_CHANNELS_1_3,
Values(21),
Values(5)))
{
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::nonLocalMeans(d_src, dst, h, search_widow_size, block_size, borderMode);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Depth_Cn_WinSz_BlockSz, cv::Size, MatDepth, MatCn, int, int);
-PERF_TEST_P(Sz_Depth_Cn_WinSz_BlockSz, GPU_FastNonLocalMeans,
- Combine(GPU_DENOISING_IMAGE_SIZES,
+PERF_TEST_P(Sz_Depth_Cn_WinSz_BlockSz, CUDA_FastNonLocalMeans,
+ Combine(CUDA_DENOISING_IMAGE_SIZES,
Values<MatDepth>(CV_8U),
- GPU_CHANNELS_1_3,
+ CUDA_CHANNELS_1_3,
Values(21),
Values(7)))
{
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::FastNonLocalMeansDenoising fnlmd;
TEST_CYCLE() fnlmd.simpleMethod(d_src, dst, h, search_widow_size, block_size);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
DEF_PARAM_TEST(Sz_Depth_WinSz_BlockSz, cv::Size, MatDepth, int, int);
-PERF_TEST_P(Sz_Depth_WinSz_BlockSz, GPU_FastNonLocalMeansColored,
- Combine(GPU_DENOISING_IMAGE_SIZES,
+PERF_TEST_P(Sz_Depth_WinSz_BlockSz, CUDA_FastNonLocalMeansColored,
+ Combine(CUDA_DENOISING_IMAGE_SIZES,
Values<MatDepth>(CV_8U),
Values(21),
Values(7)))
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
cv::cuda::FastNonLocalMeansDenoising fnlmd;
TEST_CYCLE() fnlmd.labMethod(d_src, dst, h, h, search_widow_size, block_size);
- GPU_SANITY_CHECK(dst);
+ CUDA_SANITY_CHECK(dst);
}
else
{
////////////////////////////////////////////////////////
// Brute Force Non local means
-TEST(GPU_BruteForceNonLocalMeans, Regression)
+TEST(CUDA_BruteForceNonLocalMeans, Regression)
{
using cv::cuda::GpuMat;
////////////////////////////////////////////////////////
// Fast Force Non local means
-TEST(GPU_FastNonLocalMeans, Regression)
+TEST(CUDA_FastNonLocalMeans, Regression)
{
using cv::cuda::GpuMat;
CV_EXPORTS bool initModule_softcascade(void);
-// ======================== GPU version for soft cascade ===================== //
+// ======================== CUDA version for soft cascade ===================== //
class CV_EXPORTS ChannelsProcessor
{
};\
TEST_P(fixture##_##name, name /*perf*/){ RunPerfTestBody(); }\
INSTANTIATE_TEST_CASE_P(/*none*/, fixture##_##name, params);\
- void fixture##_##name::PerfTestBody() { if (PERF_RUN_GPU()) __gpu(); else __cpu(); }
+ void fixture##_##name::PerfTestBody() { if (PERF_RUN_CUDA()) __gpu(); else __cpu(); }
#define RUN_CPU(fixture, name)\
void fixture##_##name::__cpu()
-#define RUN_GPU(fixture, name)\
+#define RUN_CUDA(fixture, name)\
void fixture##_##name::__gpu()
#define NO_CPU(fixture, name)\
std::string("cv/cascadeandhog/cascades/sc_cvpr_2012_to_opencv_new_format.xml")),
testing::Values(std::string("cv/cascadeandhog/images/image_00000000_0.png"))))
-RUN_GPU(SCascadeTest, detect)
+RUN_CUDA(SCascadeTest, detect)
{
cv::Mat cpu = cv::imread(getDataPath(get<1>(GetParam())));;
ASSERT_FALSE(cpu.empty());
testing::Values(std::string("cv/cascadeandhog/images/image_00000000_0.png")),
testing::Range(0, 5)))
-RUN_GPU(SCascadeTestRoi, detectInRoi)
+RUN_CUDA(SCascadeTestRoi, detectInRoi)
{
cv::Mat cpu = cv::imread(getDataPath(get<1>(GetParam())));
ASSERT_FALSE(cpu.empty());
testing::Values(std::string("cv/cascadeandhog/images/image_00000000_0.png")),
testing::Range(0, 10)))
-RUN_GPU(SCascadeTestRoi, detectEachRoi)
+RUN_CUDA(SCascadeTestRoi, detectEachRoi)
{
cv::Mat cpu = cv::imread(getDataPath(get<1>(GetParam())));
ASSERT_FALSE(cpu.empty());
std::string("cv/cascadeandhog/cascades/sc_cvpr_2012_to_opencv_new_format.xml")),
testing::Values(std::string("cv/cascadeandhog/images/image_00000000_0.png"))))
-RUN_GPU(SCascadeTest, detectStream)
+RUN_CUDA(SCascadeTest, detectStream)
{
cv::Mat cpu = cv::imread(getDataPath(get<1>(GetParam())));
ASSERT_FALSE(cpu.empty());
if (!info.isCompatible())
{
- msg << "Device " << i << " [" << info.name() << "] is NOT compatible with current GPU module build";
+ msg << "Device " << i << " [" << info.name() << "] is NOT compatible with current CUDA module build";
CV_Error(cv::Error::StsBadArg, msg.str());
}
# define ALL_DEVICES testing::ValuesIn(std::vector<cv::cuda::DeviceInfo>())
#endif
-#endif // __OPENCV_GPU_TEST_UTILITY_HPP__
+#endif // __OPENCV_CUDA_TEST_UTILITY_HPP__
cuda::GpuMat image_;
cuda::GpuMat gray_image_;
- cuda::SURF_GPU surf_;
+ cuda::SURF_CUDA surf_;
cuda::GpuMat keypoints_;
cuda::GpuMat descriptors_;
int num_octaves_, num_layers_;
descriptors1_.upload(features1.descriptors);
descriptors2_.upload(features2.descriptors);
- BFMatcher_GPU matcher(NORM_L2);
+ BFMatcher_CUDA matcher(NORM_L2);
MatchesSet matches;
// Find 1->2 matches
CV_EXPORTS Ptr<FrameSource> createFrameSource_Empty();
CV_EXPORTS Ptr<FrameSource> createFrameSource_Video(const String& fileName);
- CV_EXPORTS Ptr<FrameSource> createFrameSource_Video_GPU(const String& fileName);
+ CV_EXPORTS Ptr<FrameSource> createFrameSource_Video_CUDA(const String& fileName);
CV_EXPORTS Ptr<FrameSource> createFrameSource_Camera(int deviceId = 0);
// S. Farsiu , D. Robinson, M. Elad, P. Milanfar. Fast and robust multiframe super resolution.
// Dennis Mitzel, Thomas Pock, Thomas Schoenemann, Daniel Cremers. Video Super Resolution using Duality Based TV-L1 Optical Flow.
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1();
- CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_GPU();
+ CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_CUDA();
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_OCL();
}
}
};
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback();
- CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_GPU();
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_CUDA();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_OCL();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Simple();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1();
- CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_GPU();
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_CUDA();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_OCL();
- CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Brox_GPU();
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Brox_CUDA();
- CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_GPU();
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_CUDA();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_OCL();
}
}
Mat frame_;
};
- class OneFrameSource_GPU : public FrameSource
+ class OneFrameSource_CUDA : public FrameSource
{
public:
- explicit OneFrameSource_GPU(const GpuMat& frame) : frame_(frame) {}
+ explicit OneFrameSource_CUDA(const GpuMat& frame) : frame_(frame) {}
void nextFrame(OutputArray frame)
{
const int temporalAreaRadius = 1;
Ptr<DenseOpticalFlowExt> opticalFlow(new ZeroOpticalFlow);
- if (PERF_RUN_GPU())
+ if (PERF_RUN_CUDA())
{
- Ptr<SuperResolution> superRes = createSuperResolution_BTVL1_GPU();
+ Ptr<SuperResolution> superRes = createSuperResolution_BTVL1_CUDA();
superRes->set("scale", scale);
superRes->set("iterations", iterations);
superRes->set("temporalAreaRadius", temporalAreaRadius);
superRes->set("opticalFlow", opticalFlow);
- superRes->setInput(new OneFrameSource_GPU(GpuMat(frame)));
+ superRes->setInput(new OneFrameSource_CUDA(GpuMat(frame)));
GpuMat dst;
superRes->nextFrame(dst);
TEST_CYCLE_N(10) superRes->nextFrame(dst);
- GPU_SANITY_CHECK(dst, 2);
+ CUDA_SANITY_CHECK(dst, 2);
}
else
{
#if !defined(HAVE_CUDA) || !defined(HAVE_OPENCV_CUDAARITHM) || !defined(HAVE_OPENCV_CUDAWARPING) || !defined(HAVE_OPENCV_CUDAFILTERS)
-Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1_GPU()
+Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1_CUDA()
{
CV_Error(Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<SuperResolution>();
funcs[src.channels()](src, dst, ksize);
}
- class BTVL1_GPU_Base
+ class BTVL1_CUDA_Base
{
public:
- BTVL1_GPU_Base();
+ BTVL1_CUDA_Base();
void process(const std::vector<GpuMat>& src, GpuMat& dst,
const std::vector<std::pair<GpuMat, GpuMat> >& forwardMotions, const std::vector<std::pair<GpuMat, GpuMat> >& backwardMotions,
GpuMat regTerm_;
};
- BTVL1_GPU_Base::BTVL1_GPU_Base()
+ BTVL1_CUDA_Base::BTVL1_CUDA_Base()
{
scale_ = 4;
iterations_ = 180;
blurSigma_ = 0.0;
#ifdef HAVE_OPENCV_CUDAOPTFLOW
- opticalFlow_ = createOptFlow_Farneback_GPU();
+ opticalFlow_ = createOptFlow_Farneback_CUDA();
#else
opticalFlow_ = createOptFlow_Farneback();
#endif
curAlpha_ = -1.0;
}
- void BTVL1_GPU_Base::process(const std::vector<GpuMat>& src, GpuMat& dst,
+ void BTVL1_CUDA_Base::process(const std::vector<GpuMat>& src, GpuMat& dst,
const std::vector<std::pair<GpuMat, GpuMat> >& forwardMotions, const std::vector<std::pair<GpuMat, GpuMat> >& backwardMotions,
int baseIdx)
{
highRes_(inner).copyTo(dst);
}
- void BTVL1_GPU_Base::collectGarbage()
+ void BTVL1_CUDA_Base::collectGarbage()
{
filters_.clear();
////////////////////////////////////////////////////////////
- class BTVL1_GPU : public SuperResolution, private BTVL1_GPU_Base
+ class BTVL1_CUDA : public SuperResolution, private BTVL1_CUDA_Base
{
public:
AlgorithmInfo* info() const;
- BTVL1_GPU();
+ BTVL1_CUDA();
void collectGarbage();
GpuMat finalOutput_;
};
- CV_INIT_ALGORITHM(BTVL1_GPU, "SuperResolution.BTVL1_GPU",
+ CV_INIT_ALGORITHM(BTVL1_CUDA, "SuperResolution.BTVL1_CUDA",
obj.info()->addParam(obj, "scale", obj.scale_, false, 0, 0, "Scale factor.");
obj.info()->addParam(obj, "iterations", obj.iterations_, false, 0, 0, "Iteration count.");
obj.info()->addParam(obj, "tau", obj.tau_, false, 0, 0, "Asymptotic value of steepest descent method.");
obj.info()->addParam(obj, "temporalAreaRadius", obj.temporalAreaRadius_, false, 0, 0, "Radius of the temporal search area.");
obj.info()->addParam<DenseOpticalFlowExt>(obj, "opticalFlow", obj.opticalFlow_, false, 0, 0, "Dense optical flow algorithm."));
- BTVL1_GPU::BTVL1_GPU()
+ BTVL1_CUDA::BTVL1_CUDA()
{
temporalAreaRadius_ = 4;
}
- void BTVL1_GPU::collectGarbage()
+ void BTVL1_CUDA::collectGarbage()
{
curFrame_.release();
prevFrame_.release();
finalOutput_.release();
SuperResolution::collectGarbage();
- BTVL1_GPU_Base::collectGarbage();
+ BTVL1_CUDA_Base::collectGarbage();
}
- void BTVL1_GPU::initImpl(Ptr<FrameSource>& frameSource)
+ void BTVL1_CUDA::initImpl(Ptr<FrameSource>& frameSource)
{
const int cacheSize = 2 * temporalAreaRadius_ + 1;
outPos_ = -1;
}
- void BTVL1_GPU::processImpl(Ptr<FrameSource>& frameSource, OutputArray _output)
+ void BTVL1_CUDA::processImpl(Ptr<FrameSource>& frameSource, OutputArray _output)
{
if (outPos_ >= storePos_)
{
}
}
- void BTVL1_GPU::readNextFrame(Ptr<FrameSource>& frameSource)
+ void BTVL1_CUDA::readNextFrame(Ptr<FrameSource>& frameSource)
{
frameSource->nextFrame(curFrame_);
curFrame_.copyTo(prevFrame_);
}
- void BTVL1_GPU::processFrame(int idx)
+ void BTVL1_CUDA::processFrame(int idx)
{
const int startIdx = std::max(idx - temporalAreaRadius_, 0);
const int procIdx = idx;
}
}
-Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1_GPU()
+Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1_CUDA()
{
- return new BTVL1_GPU;
+ return new BTVL1_CUDA;
}
#endif // HAVE_CUDA
#endif // HAVE_OPENCV_HIGHGUI
//////////////////////////////////////////////////////
-// VideoFrameSource_GPU
+// VideoFrameSource_CUDA
#ifndef HAVE_OPENCV_CUDACODEC
-Ptr<FrameSource> cv::superres::createFrameSource_Video_GPU(const String& fileName)
+Ptr<FrameSource> cv::superres::createFrameSource_Video_CUDA(const String& fileName)
{
(void) fileName;
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
namespace
{
- class VideoFrameSource_GPU : public FrameSource
+ class VideoFrameSource_CUDA : public FrameSource
{
public:
- VideoFrameSource_GPU(const String& fileName);
+ VideoFrameSource_CUDA(const String& fileName);
void nextFrame(OutputArray frame);
void reset();
GpuMat frame_;
};
- VideoFrameSource_GPU::VideoFrameSource_GPU(const String& fileName) : fileName_(fileName)
+ VideoFrameSource_CUDA::VideoFrameSource_CUDA(const String& fileName) : fileName_(fileName)
{
reset();
}
- void VideoFrameSource_GPU::nextFrame(OutputArray _frame)
+ void VideoFrameSource_CUDA::nextFrame(OutputArray _frame)
{
if (_frame.kind() == _InputArray::GPU_MAT)
{
}
}
- void VideoFrameSource_GPU::reset()
+ void VideoFrameSource_CUDA::reset()
{
reader_ = cudacodec::createVideoReader(fileName_);
}
}
-Ptr<FrameSource> cv::superres::createFrameSource_Video_GPU(const String& fileName)
+Ptr<FrameSource> cv::superres::createFrameSource_Video_CUDA(const String& fileName)
{
return new VideoFrameSource(fileName);
}
#ifndef HAVE_OPENCV_CUDAOPTFLOW
-Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_GPU()
+Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
-Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_GPU()
+Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
-Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_GPU()
+Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
-Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_GPU()
+Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
///////////////////////////////////////////////////////////////////
-// Brox_GPU
+// Brox_CUDA
namespace
{
- class Brox_GPU : public GpuOpticalFlow
+ class Brox_CUDA : public GpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
- Brox_GPU();
+ Brox_CUDA();
void collectGarbage();
BroxOpticalFlow alg_;
};
- CV_INIT_ALGORITHM(Brox_GPU, "DenseOpticalFlowExt.Brox_GPU",
+ CV_INIT_ALGORITHM(Brox_CUDA, "DenseOpticalFlowExt.Brox_CUDA",
obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Flow smoothness");
obj.info()->addParam(obj, "gamma", obj.gamma_, false, 0, 0, "Gradient constancy importance");
obj.info()->addParam(obj, "scaleFactor", obj.scaleFactor_, false, 0, 0, "Pyramid scale factor");
obj.info()->addParam(obj, "outerIterations", obj.outerIterations_, false, 0, 0, "Number of warping iterations (number of pyramid levels)");
obj.info()->addParam(obj, "solverIterations", obj.solverIterations_, false, 0, 0, "Number of linear system solver iterations"));
- Brox_GPU::Brox_GPU() : GpuOpticalFlow(CV_32FC1), alg_(0.197f, 50.0f, 0.8f, 10, 77, 10)
+ Brox_CUDA::Brox_CUDA() : GpuOpticalFlow(CV_32FC1), alg_(0.197f, 50.0f, 0.8f, 10, 77, 10)
{
alpha_ = alg_.alpha;
gamma_ = alg_.gamma;
solverIterations_ = alg_.solver_iterations;
}
- void Brox_GPU::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
+ void Brox_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_.alpha = static_cast<float>(alpha_);
alg_.gamma = static_cast<float>(gamma_);
alg_(input0, input1, dst1, dst2);
}
- void Brox_GPU::collectGarbage()
+ void Brox_CUDA::collectGarbage()
{
alg_.buf.release();
GpuOpticalFlow::collectGarbage();
}
}
-Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_GPU()
+Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_CUDA()
{
- return new Brox_GPU;
+ return new Brox_CUDA;
}
///////////////////////////////////////////////////////////////////
-// PyrLK_GPU
+// PyrLK_CUDA
namespace
{
- class PyrLK_GPU : public GpuOpticalFlow
+ class PyrLK_CUDA : public GpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
- PyrLK_GPU();
+ PyrLK_CUDA();
void collectGarbage();
PyrLKOpticalFlow alg_;
};
- CV_INIT_ALGORITHM(PyrLK_GPU, "DenseOpticalFlowExt.PyrLK_GPU",
+ CV_INIT_ALGORITHM(PyrLK_CUDA, "DenseOpticalFlowExt.PyrLK_CUDA",
obj.info()->addParam(obj, "winSize", obj.winSize_);
obj.info()->addParam(obj, "maxLevel", obj.maxLevel_);
obj.info()->addParam(obj, "iterations", obj.iterations_));
- PyrLK_GPU::PyrLK_GPU() : GpuOpticalFlow(CV_8UC1)
+ PyrLK_CUDA::PyrLK_CUDA() : GpuOpticalFlow(CV_8UC1)
{
winSize_ = alg_.winSize.width;
maxLevel_ = alg_.maxLevel;
iterations_ = alg_.iters;
}
- void PyrLK_GPU::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
+ void PyrLK_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_.winSize.width = winSize_;
alg_.winSize.height = winSize_;
alg_.dense(input0, input1, dst1, dst2);
}
- void PyrLK_GPU::collectGarbage()
+ void PyrLK_CUDA::collectGarbage()
{
alg_.releaseMemory();
GpuOpticalFlow::collectGarbage();
}
}
-Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_GPU()
+Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_CUDA()
{
- return new PyrLK_GPU;
+ return new PyrLK_CUDA;
}
///////////////////////////////////////////////////////////////////
-// Farneback_GPU
+// Farneback_CUDA
namespace
{
- class Farneback_GPU : public GpuOpticalFlow
+ class Farneback_CUDA : public GpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
- Farneback_GPU();
+ Farneback_CUDA();
void collectGarbage();
FarnebackOpticalFlow alg_;
};
- CV_INIT_ALGORITHM(Farneback_GPU, "DenseOpticalFlowExt.Farneback_GPU",
+ CV_INIT_ALGORITHM(Farneback_CUDA, "DenseOpticalFlowExt.Farneback_CUDA",
obj.info()->addParam(obj, "pyrScale", obj.pyrScale_);
obj.info()->addParam(obj, "numLevels", obj.numLevels_);
obj.info()->addParam(obj, "winSize", obj.winSize_);
obj.info()->addParam(obj, "polySigma", obj.polySigma_);
obj.info()->addParam(obj, "flags", obj.flags_));
- Farneback_GPU::Farneback_GPU() : GpuOpticalFlow(CV_8UC1)
+ Farneback_CUDA::Farneback_CUDA() : GpuOpticalFlow(CV_8UC1)
{
pyrScale_ = alg_.pyrScale;
numLevels_ = alg_.numLevels;
flags_ = alg_.flags;
}
- void Farneback_GPU::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
+ void Farneback_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_.pyrScale = pyrScale_;
alg_.numLevels = numLevels_;
alg_(input0, input1, dst1, dst2);
}
- void Farneback_GPU::collectGarbage()
+ void Farneback_CUDA::collectGarbage()
{
alg_.releaseMemory();
GpuOpticalFlow::collectGarbage();
}
}
-Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_GPU()
+Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_CUDA()
{
- return new Farneback_GPU;
+ return new Farneback_CUDA;
}
///////////////////////////////////////////////////////////////////
-// DualTVL1_GPU
+// DualTVL1_CUDA
namespace
{
- class DualTVL1_GPU : public GpuOpticalFlow
+ class DualTVL1_CUDA : public GpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
- DualTVL1_GPU();
+ DualTVL1_CUDA();
void collectGarbage();
int iterations_;
bool useInitialFlow_;
- OpticalFlowDual_TVL1_GPU alg_;
+ OpticalFlowDual_TVL1_CUDA alg_;
};
- CV_INIT_ALGORITHM(DualTVL1_GPU, "DenseOpticalFlowExt.DualTVL1_GPU",
+ CV_INIT_ALGORITHM(DualTVL1_CUDA, "DenseOpticalFlowExt.DualTVL1_CUDA",
obj.info()->addParam(obj, "tau", obj.tau_);
obj.info()->addParam(obj, "lambda", obj.lambda_);
obj.info()->addParam(obj, "theta", obj.theta_);
obj.info()->addParam(obj, "iterations", obj.iterations_);
obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow_));
- DualTVL1_GPU::DualTVL1_GPU() : GpuOpticalFlow(CV_8UC1)
+ DualTVL1_CUDA::DualTVL1_CUDA() : GpuOpticalFlow(CV_8UC1)
{
tau_ = alg_.tau;
lambda_ = alg_.lambda;
useInitialFlow_ = alg_.useInitialFlow;
}
- void DualTVL1_GPU::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
+ void DualTVL1_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_.tau = tau_;
alg_.lambda = lambda_;
alg_(input0, input1, dst1, dst2);
}
- void DualTVL1_GPU::collectGarbage()
+ void DualTVL1_CUDA::collectGarbage()
{
alg_.collectGarbage();
GpuOpticalFlow::collectGarbage();
}
}
-Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_GPU()
+Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_CUDA()
{
- return new DualTVL1_GPU;
+ return new DualTVL1_CUDA;
}
#endif // HAVE_OPENCV_CUDAOPTFLOW
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) && defined(HAVE_OPENCV_CUDAFILTERS)
-TEST_F(SuperResolution, BTVL1_GPU)
+TEST_F(SuperResolution, BTVL1_CUDA)
{
- RunTest(cv::superres::createSuperResolution_BTVL1_GPU());
+ RunTest(cv::superres::createSuperResolution_BTVL1_CUDA());
}
#endif
enum { Gray = 1, TwoChannel = 2, BGR = 3, BGRA = 4 };
CV_ENUM(MatCn, Gray, TwoChannel, BGR, BGRA)
- #define GPU_CHANNELS_1_3_4 testing::Values(MatCn(Gray), MatCn(BGR), MatCn(BGRA))
- #define GPU_CHANNELS_1_3 testing::Values(MatCn(Gray), MatCn(BGR))
+ #define CUDA_CHANNELS_1_3_4 testing::Values(MatCn(Gray), MatCn(BGR), MatCn(BGRA))
+ #define CUDA_CHANNELS_1_3 testing::Values(MatCn(Gray), MatCn(BGR))
#define GET_PARAM(k) std::tr1::get< k >(GetParam())
DEF_PARAM_TEST(Sz_Depth, cv::Size, perf::MatDepth);
DEF_PARAM_TEST(Sz_Depth_Cn, cv::Size, perf::MatDepth, MatCn);
- #define GPU_TYPICAL_MAT_SIZES testing::Values(perf::sz720p, perf::szSXGA, perf::sz1080p)
+ #define CUDA_TYPICAL_MAT_SIZES testing::Values(perf::sz720p, perf::szSXGA, perf::sz1080p)
#define FAIL_NO_CPU() FAIL() << "No such CPU implementation analogy"
- #define GPU_SANITY_CHECK(mat, ...) \
+ #define CUDA_SANITY_CHECK(mat, ...) \
do{ \
cv::Mat gpu_##mat(mat); \
SANITY_CHECK(gpu_##mat, ## __VA_ARGS__); \
//////////////////////////////////////////////////////////////////////
// Helper structs for value-parameterized tests
- #define GPU_TEST_P(test_case_name, test_name) \
+ #define CUDA_TEST_P(test_case_name, test_name) \
class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
: public test_case_name { \
public: \
#ifdef HAVE_CUDA
-#define CV_GPU_TEST_MAIN(resourcesubdir) \
+#define CV_CUDA_TEST_MAIN(resourcesubdir) \
int main(int argc, char* argv[]) \
{ \
try \
#else // HAVE_CUDA
-#define CV_GPU_TEST_MAIN(resourcesubdir) \
+#define CV_CUDA_TEST_MAIN(resourcesubdir) \
int main() \
{ \
printf("OpenCV was built without CUDA support\n"); \
static bool targetDevice();
};
-#define PERF_RUN_GPU() ::perf::GpuPerf::targetDevice()
+#define PERF_RUN_CUDA() ::perf::GpuPerf::targetDevice()
/*****************************************************************************************\
* Container for performance metrics *
printf("[----------]\n"), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
- printf("[ GPU INFO ] \tGPU module was compiled for the following GPU archs.\n"), fflush(stdout);
+ printf("[ GPU INFO ] \tCUDA module was compiled for the following GPU archs.\n"), fflush(stdout);
printf("[ BIN ] \t%s.\n", CUDA_ARCH_BIN), fflush(stdout);
printf("[ PTX ] \t%s.\n", CUDA_ARCH_PTX), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
printf("[ ] \tTotal memory: %d Mb\n", static_cast<int>(static_cast<int>(info.totalMemory() / 1024.0) / 1024.0)), fflush(stdout);
printf("[ ] \tFree memory: %d Mb\n", static_cast<int>(static_cast<int>(info.freeMemory() / 1024.0) / 1024.0)), fflush(stdout);
if (!info.isCompatible())
- printf("[ GPU INFO ] \tThis device is NOT compatible with current GPU module build\n");
+ printf("[ GPU INFO ] \tThis device is NOT compatible with current CUDA module build\n");
printf("[----------]\n"), fflush(stdout);
}
if (!info.isCompatible())
{
- msg << "Device " << i << " [" << info.name() << "] is NOT compatible with current GPU module build";
+ msg << "Device " << i << " [" << info.name() << "] is NOT compatible with current CUDA module build";
throw std::runtime_error(msg.str());
}
"{ perf_max_deviation |1.0 |}"
"{ help h |false |print help info}"
#ifdef HAVE_CUDA
- "{ perf_cuda_device |0 |run GPU test suite onto specific CUDA capable device}"
+ "{ perf_cuda_device |0 |run CUDA test suite onto specific CUDA capable device}"
"{ perf_cuda_info_only |false |print an information about system and an available CUDA devices and then exit.}"
#endif
;
cv::cuda::DeviceInfo info(param_cuda_device);
if (!info.isCompatible())
{
- printf("[----------]\n[ FAILURE ] \tDevice %s is NOT compatible with current GPU module build.\n[----------]\n", info.name()), fflush(stdout);
+ printf("[----------]\n[ FAILURE ] \tDevice %s is NOT compatible with current CUDA module build.\n[----------]\n", info.name()), fflush(stdout);
exit(-1);
}
#ifdef HAVE_OPENCV_CUDAOPTFLOW
setOptFlowEstimator(new DensePyrLkOptFlowEstimatorGpu());
#else
- CV_Error(Error::StsNotImplemented, "Current implementation of MotionInpainter requires GPU");
+ CV_Error(Error::StsNotImplemented, "Current implementation of MotionInpainter requires CUDA");
#endif
setFlowErrorThreshold(1e-4f);
setDistThreshold(5.f);
#include <opencv2/imgproc.hpp>// Image processing methods for the CPU
#include <opencv2/highgui.hpp>// Read images
-// GPU structures and methods
+// CUDA structures and methods
#include <opencv2/cudaarithm.hpp>
#include <opencv2/cudafilters.hpp>
double getPSNR(const Mat& I1, const Mat& I2); // CPU versions
Scalar getMSSIM( const Mat& I1, const Mat& I2);
-double getPSNR_GPU(const Mat& I1, const Mat& I2); // Basic GPU versions
-Scalar getMSSIM_GPU( const Mat& I1, const Mat& I2);
+double getPSNR_CUDA(const Mat& I1, const Mat& I2); // Basic CUDA versions
+Scalar getMSSIM_CUDA( const Mat& I1, const Mat& I2);
-struct BufferPSNR // Optimized GPU versions
-{ // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later.
+struct BufferPSNR // Optimized CUDA versions
+{ // Data allocations are very expensive on CUDA. Use a buffer to solve: allocate once reuse later.
cuda::GpuMat gI1, gI2, gs, t1,t2;
cuda::GpuMat buf;
};
-double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b);
+double getPSNR_CUDA_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b);
-struct BufferMSSIM // Optimized GPU versions
-{ // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later.
+struct BufferMSSIM // Optimized CUDA versions
+{ // Data allocations are very expensive on CUDA. Use a buffer to solve: allocate once reuse later.
cuda::GpuMat gI1, gI2, gs, t1,t2;
cuda::GpuMat I1_2, I2_2, I1_I2;
cuda::GpuMat buf;
};
-Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b);
+Scalar getMSSIM_CUDA_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b);
static void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
- << "This program shows how to port your CPU code to GPU or write that from scratch." << endl
+ << "This program shows how to port your CPU code to CUDA or write that from scratch." << endl
<< "You can see the performance improvement for the similarity check methods (PSNR and SSIM)." << endl
<< "Usage:" << endl
<< "./gpu-basics-similarity referenceImage comparedImage numberOfTimesToRunTest(like 10)." << endl
cout << "Time of PSNR CPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of: " << result << endl;
- //------------------------------- PSNR GPU ----------------------------------------------------
+ //------------------------------- PSNR CUDA ----------------------------------------------------
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
- result = getPSNR_GPU(I1,I2);
+ result = getPSNR_CUDA(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
- cout << "Time of PSNR GPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
+ cout << "Time of PSNR CUDA (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of: " << result << endl;
- //------------------------------- PSNR GPU Optimized--------------------------------------------
+ //------------------------------- PSNR CUDA Optimized--------------------------------------------
time = (double)getTickCount(); // Initial call
- result = getPSNR_GPU_optimized(I1, I2, bufferPSNR);
+ result = getPSNR_CUDA_optimized(I1, I2, bufferPSNR);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
- cout << "Initial call GPU optimized: " << time <<" milliseconds."
+ cout << "Initial call CUDA optimized: " << time <<" milliseconds."
<< " With result of: " << result << endl;
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
- result = getPSNR_GPU_optimized(I1, I2, bufferPSNR);
+ result = getPSNR_CUDA_optimized(I1, I2, bufferPSNR);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
- cout << "Time of PSNR GPU OPTIMIZED ( / " << TIMES << " runs): " << time
+ cout << "Time of PSNR CUDA OPTIMIZED ( / " << TIMES << " runs): " << time
<< " milliseconds." << " With result of: " << result << endl << endl;
cout << "Time of MSSIM CPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
- //------------------------------- SSIM GPU -----------------------------------------------------
+ //------------------------------- SSIM CUDA -----------------------------------------------------
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
- x = getMSSIM_GPU(I1,I2);
+ x = getMSSIM_CUDA(I1,I2);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
- cout << "Time of MSSIM GPU (averaged for " << TIMES << " runs): " << time << " milliseconds."
+ cout << "Time of MSSIM CUDA (averaged for " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
- //------------------------------- SSIM GPU Optimized--------------------------------------------
+ //------------------------------- SSIM CUDA Optimized--------------------------------------------
time = (double)getTickCount();
- x = getMSSIM_GPU_optimized(I1,I2, bufferMSSIM);
+ x = getMSSIM_CUDA_optimized(I1,I2, bufferMSSIM);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
- cout << "Time of MSSIM GPU Initial Call " << time << " milliseconds."
+ cout << "Time of MSSIM CUDA Initial Call " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl;
time = (double)getTickCount();
for (int i = 0; i < TIMES; ++i)
- x = getMSSIM_GPU_optimized(I1,I2, bufferMSSIM);
+ x = getMSSIM_CUDA_optimized(I1,I2, bufferMSSIM);
time = 1000*((double)getTickCount() - time)/getTickFrequency();
time /= TIMES;
- cout << "Time of MSSIM GPU OPTIMIZED ( / " << TIMES << " runs): " << time << " milliseconds."
+ cout << "Time of MSSIM CUDA OPTIMIZED ( / " << TIMES << " runs): " << time << " milliseconds."
<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl << endl;
return 0;
}
-double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b)
+double getPSNR_CUDA_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b)
{
b.gI1.upload(I1);
b.gI2.upload(I2);
}
}
-double getPSNR_GPU(const Mat& I1, const Mat& I2)
+double getPSNR_CUDA(const Mat& I1, const Mat& I2)
{
cuda::GpuMat gI1, gI2, gs, t1,t2;
return mssim;
}
-Scalar getMSSIM_GPU( const Mat& i1, const Mat& i2)
+Scalar getMSSIM_CUDA( const Mat& i1, const Mat& i2)
{
const float C1 = 6.5025f, C2 = 58.5225f;
/***************************** INITS **********************************/
return mssim;
}
-Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b)
+Scalar getMSSIM_CUDA_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b)
{
const float C1 = 6.5025f, C2 = 58.5225f;
/***************************** INITS **********************************/
" --mosaic-stdev=<float_number>\n"
" Consistent mosaicing stdev threshold. The default is 10.0.\n\n"
" -mi, --motion-inpaint=(yes|no)\n"
- " Do motion inpainting (requires GPU support). The default is no.\n"
+ " Do motion inpainting (requires CUDA support). The default is no.\n"
" --mi-dist-thresh=<float_number>\n"
" Estimated flow distance threshold for motion inpainting. The default is 5.0.\n\n"
" -ci, --color-inpaint=(no|average|ns|telea)\n"
" -lm2, --load-motions2=(<file_path>|no)\n"
" Load motions for wobble suppression from file. The default is no.\n\n"
" -gpu=(yes|no)\n"
- " Use GPU optimization whenever possible. The default is no.\n\n"
+ " Use CUDA optimization whenever possible. The default is no.\n\n"
" -o, --output=(no|<file_path>)\n"
" Set output file path explicitely. The default is stabilized.avi.\n"
" --fps=(<float_number>|auto)\n"
#ifdef HAVE_OPENCV_CUDA
ws = new MoreAccurateMotionWobbleSuppressorGpu();
#else
- throw runtime_error("OpenCV is built without GPU support");
+ throw runtime_error("OpenCV is built without CUDA support");
#endif
ws->setMotionEstimator(wsMotionEstBuilder->build());
-SET(OPENCV_GPU_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_highgui
+SET(OPENCV_CUDA_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_highgui
opencv_ml opencv_video opencv_objdetect opencv_features2d
opencv_calib3d opencv_legacy opencv_contrib opencv_cuda
opencv_nonfree opencv_softcascade opencv_superres
opencv_cudafeatures2d opencv_cudaoptflow opencv_cudabgsegm
opencv_cudastereo opencv_cudalegacy)
-ocv_check_dependencies(${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})
+ocv_check_dependencies(${OPENCV_CUDA_SAMPLES_REQUIRED_DEPS})
if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
set(project "gpu")
project("${project}_samples")
- ocv_include_modules(${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})
+ ocv_include_modules(${OPENCV_CUDA_SAMPLES_REQUIRED_DEPS})
ocv_include_directories(
"${OpenCV_SOURCE_DIR}/modules/gpu/src/nvidia"
"${OpenCV_SOURCE_DIR}/modules/gpu/src/nvidia/core"
# ---------------------------------------------
# Define executable targets
# ---------------------------------------------
- MACRO(OPENCV_DEFINE_GPU_EXAMPLE name srcs)
+ MACRO(OPENCV_DEFINE_CUDA_EXAMPLE name srcs)
set(the_target "example_${project}_${name}")
add_executable(${the_target} ${srcs})
- target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})
+ target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_CUDA_SAMPLES_REQUIRED_DEPS})
if(HAVE_CUDA)
target_link_libraries(${the_target} ${CUDA_CUDA_LIBRARY})
foreach(sample_filename ${all_samples})
get_filename_component(sample ${sample_filename} NAME_WE)
file(GLOB sample_srcs RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${sample}.*)
- OPENCV_DEFINE_GPU_EXAMPLE(${sample} ${sample_srcs})
+ OPENCV_DEFINE_CUDA_EXAMPLE(${sample} ${sample_srcs})
endforeach()
include("performance/CMakeLists.txt")
if (getCudaEnabledDeviceCount() == 0)
{
- return cerr << "No GPU found or the library is compiled without GPU support" << endl, -1;
+ return cerr << "No GPU found or the library is compiled without CUDA support" << endl, -1;
}
cv::cuda::printShortCudaDeviceInfo(cv::cuda::getDevice());
}
}
- CascadeClassifier_GPU cascade_gpu;
+ CascadeClassifier_CUDA cascade_gpu;
if (!cascade_gpu.load(cascadeName))
{
return cerr << "ERROR: Could not load cascade classifier \"" << cascadeName << "\"" << endl, help(), -1;
cout << "Syntax: exename <cascade_file> <image_or_video_or_cameraid>" << endl;
cout << "=========================================" << endl;
- ncvAssertPrintReturn(cv::cuda::getCudaEnabledDeviceCount() != 0, "No GPU found or the library is compiled without GPU support", -1);
+ ncvAssertPrintReturn(cv::cuda::getCudaEnabledDeviceCount() != 0, "No GPU found or the library is compiled without CUDA support", -1);
ncvAssertPrintReturn(argc == 3, "Invalid number of arguments", -1);
cv::cuda::printShortCudaDeviceInfo(cv::cuda::getDevice());
DeviceInfo dev_info(i);
if (!dev_info.isCompatible())
{
- std::cout << "GPU module isn't built for GPU #" << i << " ("
+ std::cout << "CUDA module isn't built for GPU #" << i << " ("
<< dev_info.name() << ", CC " << dev_info.majorVersion()
<< dev_info.minorVersion() << "\n";
return -1;
DeviceInfo dev_info(i);
if (!dev_info.isCompatible())
{
- std::cout << "GPU module isn't built for GPU #" << i << " ("
+ std::cout << "CUDA module isn't built for GPU #" << i << " ("
<< dev_info.name() << ", CC " << dev_info.majorVersion()
<< dev_info.minorVersion() << "\n";
return -1;
BroxOpticalFlow brox(0.197f, 50.0f, 0.8f, 10, 77, 10);
PyrLKOpticalFlow lk; lk.winSize = Size(7, 7);
FarnebackOpticalFlow farn;
- OpticalFlowDual_TVL1_GPU tvl1;
+ OpticalFlowDual_TVL1_CUDA tvl1;
FastOpticalFlowBM fastBM;
{
endif()
add_executable(${the_target} ${sources} ${headers})
-target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})
+target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_CUDA_SAMPLES_REQUIRED_DEPS})
if(HAVE_opencv_nonfree)
target_link_libraries(${the_target} opencv_nonfree)
set_target_properties(${the_target} PROPERTIES
OUTPUT_NAME "performance_gpu"
- PROJECT_LABEL "(EXAMPLE_GPU) performance")
+ PROJECT_LABEL "(EXAMPLE_CUDA) performance")
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${the_target} PROPERTIES FOLDER "samples//gpu")
endif()
if(INSTALL_C_EXAMPLES AND NOT WIN32)
- file(GLOB GPU_FILES performance/*.cpp performance/*.h)
- install(FILES ${GPU_FILES}
+ file(GLOB CUDA_FILES performance/*.cpp performance/*.h)
+ install(FILES ${CUDA_FILES}
DESTINATION share/OpenCV/samples/gpu/performance
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ)
endif()
int num_devices = getCudaEnabledDeviceCount();
if (num_devices == 0)
{
- cerr << "No GPU found or the library was compiled without GPU support";
+ cerr << "No GPU found or the library was compiled without CUDA support";
return -1;
}
DeviceInfo dev_info(device);
if (!dev_info.isCompatible())
{
- cerr << "GPU module isn't built for GPU #" << device << " " << dev_info.name() << ", CC " << dev_info.majorVersion() << '.' << dev_info.minorVersion() << endl;
+ cerr << "CUDA module isn't built for GPU #" << device << " " << dev_info.name() << ", CC " << dev_info.majorVersion() << '.' << dev_info.minorVersion() << endl;
return -1;
}
setDevice(device);
-#ifndef OPENCV_GPU_SAMPLE_PERFORMANCE_H_
-#define OPENCV_GPU_SAMPLE_PERFORMANCE_H_
+#ifndef OPENCV_CUDA_SAMPLE_PERFORMANCE_H_
+#define OPENCV_CUDA_SAMPLE_PERFORMANCE_H_
#include <iostream>
#include <cstdio>
TestSystem::instance().cpuOff(); \
} TestSystem::instance().cpuComplete()
-#define GPU_ON \
+#define CUDA_ON \
while (!TestSystem::instance().stop()) { \
TestSystem::instance().gpuOn()
-#define GPU_OFF \
+#define CUDA_OFF \
TestSystem::instance().gpuOff(); \
} TestSystem::instance().gpuComplete()
// Returns abs path taking into account test system working dir
std::string abspath(const std::string& relpath);
-#endif // OPENCV_GPU_SAMPLE_PERFORMANCE_H_
+#endif // OPENCV_CUDA_SAMPLE_PERFORMANCE_H_
d_templ.upload(templ);
alg->match(d_src, d_templ, d_dst);
- GPU_ON;
+ CUDA_ON;
alg->match(d_src, d_templ, d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
d_src.upload(src);
- GPU_ON;
+ CUDA_ON;
cuda::minMaxLoc(d_src, &min_val, &max_val, &min_loc, &max_loc);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::remap(d_src, d_dst, d_xmap, d_ymap, interpolation, borderMode);
- GPU_ON;
+ CUDA_ON;
cuda::remap(d_src, d_dst, d_xmap, d_ymap, interpolation, borderMode);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::dft(d_src, d_dst, Size(size, size));
- GPU_ON;
+ CUDA_ON;
cuda::dft(d_src, d_dst, Size(size, size));
- GPU_OFF;
+ CUDA_OFF;
}
}
harris->compute(d_src, d_dst);
- GPU_ON;
+ CUDA_ON;
harris->compute(d_src, d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::integralBuffered(d_src, d_sum, d_buf);
- GPU_ON;
+ CUDA_ON;
cuda::integralBuffered(d_src, d_sum, d_buf);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::norm(d_src, NORM_INF, d_buf);
- GPU_ON;
+ CUDA_ON;
cuda::norm(d_src, NORM_INF, d_buf);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::meanShiftFiltering(d_src, d_dst, sp, sr);
- GPU_ON;
+ CUDA_ON;
cuda::meanShiftFiltering(d_src, d_dst, sp, sr);
- GPU_OFF;
+ CUDA_OFF;
}
}
surf(src, Mat(), keypoints, descriptors);
CPU_OFF;
- cuda::SURF_GPU d_surf;
+ cuda::SURF_CUDA d_surf;
cuda::GpuMat d_src(src);
cuda::GpuMat d_keypoints;
cuda::GpuMat d_descriptors;
d_surf(d_src, cuda::GpuMat(), d_keypoints, d_descriptors);
- GPU_ON;
+ CUDA_ON;
d_surf(d_src, cuda::GpuMat(), d_keypoints, d_descriptors);
- GPU_OFF;
+ CUDA_OFF;
}
#endif
FAST(src, keypoints, 20);
CPU_OFF;
- cuda::FAST_GPU d_FAST(20);
+ cuda::FAST_CUDA d_FAST(20);
cuda::GpuMat d_src(src);
cuda::GpuMat d_keypoints;
d_FAST(d_src, cuda::GpuMat(), d_keypoints);
- GPU_ON;
+ CUDA_ON;
d_FAST(d_src, cuda::GpuMat(), d_keypoints);
- GPU_OFF;
+ CUDA_OFF;
}
orb(src, Mat(), keypoints, descriptors);
CPU_OFF;
- cuda::ORB_GPU d_orb;
+ cuda::ORB_CUDA d_orb;
cuda::GpuMat d_src(src);
cuda::GpuMat d_keypoints;
cuda::GpuMat d_descriptors;
d_orb(d_src, cuda::GpuMat(), d_keypoints, d_descriptors);
- GPU_ON;
+ CUDA_ON;
d_orb(d_src, cuda::GpuMat(), d_keypoints, d_descriptors);
- GPU_OFF;
+ CUDA_OFF;
}
Mat train;
gen(train, 3000, desc_len, CV_32F, 0, 1);
- // Init GPU matcher
+ // Init CUDA matcher
- cuda::BFMatcher_GPU d_matcher(NORM_L2);
+ cuda::BFMatcher_CUDA d_matcher(NORM_L2);
cuda::GpuMat d_query(query);
cuda::GpuMat d_train(train);
d_matcher.matchSingle(d_query, d_train, d_trainIdx, d_distance);
- GPU_ON;
+ CUDA_ON;
d_matcher.matchSingle(d_query, d_train, d_trainIdx, d_distance);
- GPU_OFF;
+ CUDA_OFF;
SUBTEST << "knnMatch";
d_matcher.knnMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_allDist, 2);
- GPU_ON;
+ CUDA_ON;
d_matcher.knnMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_allDist, 2);
- GPU_OFF;
+ CUDA_OFF;
SUBTEST << "radiusMatch";
d_matcher.radiusMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_nMatches, max_distance);
- GPU_ON;
+ CUDA_ON;
d_matcher.radiusMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_nMatches, max_distance);
- GPU_OFF;
+ CUDA_OFF;
}
cuda::magnitude(d_x, d_y, d_mag);
- GPU_ON;
+ CUDA_ON;
cuda::magnitude(d_x, d_y, d_mag);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::add(d_src1, d_src2, d_dst);
- GPU_ON;
+ CUDA_ON;
cuda::add(d_src1, d_src2, d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::log(d_src, d_dst);
- GPU_ON;
+ CUDA_ON;
cuda::log(d_src, d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::mulSpectrums(d_src1, d_src2, d_dst, 0, true);
- GPU_ON;
+ CUDA_ON;
cuda::mulSpectrums(d_src1, d_src2, d_dst, 0, true);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::resize(d_src, d_dst, Size(), 2.0, 2.0);
- GPU_ON;
+ CUDA_ON;
cuda::resize(d_src, d_dst, Size(), 2.0, 2.0);
- GPU_OFF;
+ CUDA_OFF;
}
for (int size = 1000; size <= 3000; size += 1000)
cuda::resize(d_src, d_dst, Size(), 0.5, 0.5);
- GPU_ON;
+ CUDA_ON;
cuda::resize(d_src, d_dst, Size(), 0.5, 0.5);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::cvtColor(d_src, d_dst, COLOR_GRAY2BGRA, 4);
- GPU_ON;
+ CUDA_ON;
cuda::cvtColor(d_src, d_dst, COLOR_GRAY2BGRA, 4);
- GPU_OFF;
+ CUDA_OFF;
cv::swap(src, dst);
d_src.swap(d_dst);
cuda::cvtColor(d_src, d_dst, COLOR_BGR2YCrCb, 4);
- GPU_ON;
+ CUDA_ON;
cuda::cvtColor(d_src, d_dst, COLOR_BGR2YCrCb, 4);
- GPU_OFF;
+ CUDA_OFF;
cv::swap(src, dst);
d_src.swap(d_dst);
cuda::cvtColor(d_src, d_dst, COLOR_YCrCb2BGR, 4);
- GPU_ON;
+ CUDA_ON;
cuda::cvtColor(d_src, d_dst, COLOR_YCrCb2BGR, 4);
- GPU_OFF;
+ CUDA_OFF;
cv::swap(src, dst);
d_src.swap(d_dst);
cuda::cvtColor(d_src, d_dst, COLOR_BGR2XYZ, 4);
- GPU_ON;
+ CUDA_ON;
cuda::cvtColor(d_src, d_dst, COLOR_BGR2XYZ, 4);
- GPU_OFF;
+ CUDA_OFF;
cv::swap(src, dst);
d_src.swap(d_dst);
cuda::cvtColor(d_src, d_dst, COLOR_XYZ2BGR, 4);
- GPU_ON;
+ CUDA_ON;
cuda::cvtColor(d_src, d_dst, COLOR_XYZ2BGR, 4);
- GPU_OFF;
+ CUDA_OFF;
cv::swap(src, dst);
d_src.swap(d_dst);
cuda::cvtColor(d_src, d_dst, COLOR_BGR2HSV, 4);
- GPU_ON;
+ CUDA_ON;
cuda::cvtColor(d_src, d_dst, COLOR_BGR2HSV, 4);
- GPU_OFF;
+ CUDA_OFF;
cv::swap(src, dst);
d_src.swap(d_dst);
cuda::cvtColor(d_src, d_dst, COLOR_HSV2BGR, 4);
- GPU_ON;
+ CUDA_ON;
cuda::cvtColor(d_src, d_dst, COLOR_HSV2BGR, 4);
- GPU_OFF;
+ CUDA_OFF;
cv::swap(src, dst);
d_src.swap(d_dst);
erode->apply(d_src, d_dst);
- GPU_ON;
+ CUDA_ON;
erode->apply(d_src, d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::threshold(d_src, d_dst, 50.0, 0.0, THRESH_BINARY);
- GPU_ON;
+ CUDA_ON;
cuda::threshold(d_src, d_dst, 50.0, 0.0, THRESH_BINARY);
- GPU_OFF;
+ CUDA_OFF;
}
for (int size = 2000; size <= 4000; size += 1000)
cuda::threshold(d_src, d_dst, 50.0, 0.0, THRESH_TRUNC);
- GPU_ON;
+ CUDA_ON;
cuda::threshold(d_src, d_dst, 50.0, 0.0, THRESH_TRUNC);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::pow(d_src, -2.0, d_dst);
- GPU_ON;
+ CUDA_ON;
cuda::pow(d_src, -2.0, d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::projectPoints(d_src, rvec, tvec, camera_mat, Mat(), d_dst);
- GPU_ON;
+ CUDA_ON;
cuda::projectPoints(d_src, rvec, tvec, camera_mat, Mat(), d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
max_dist, int(num_points * 0.05), inliers_cpu);
CPU_OFF;
- GPU_ON;
+ CUDA_ON;
cuda::solvePnPRansac(object, image, camera_mat, Mat::zeros(1, 8, CV_32F), rvec, tvec, false, num_iters,
max_dist, int(num_points * 0.05), &inliers_gpu);
- GPU_OFF;
+ CUDA_OFF;
}
}
gauss->apply(d_src, d_dst);
- GPU_ON;
+ CUDA_ON;
gauss->apply(d_src, d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
Ptr<cuda::Filter> filter2D = cuda::createLinearFilter(d_src.type(), -1, kernel);
filter2D->apply(d_src, d_dst);
- GPU_ON;
+ CUDA_ON;
filter2D->apply(d_src, d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
}
cuda::pyrDown(d_src, d_dst);
- GPU_ON;
+ CUDA_ON;
cuda::pyrDown(d_src, d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::pyrUp(d_src, d_dst);
- GPU_ON;
+ CUDA_ON;
cuda::pyrUp(d_src, d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::equalizeHist(d_src, d_dst, d_buf);
- GPU_ON;
+ CUDA_ON;
cuda::equalizeHist(d_src, d_dst, d_buf);
- GPU_OFF;
+ CUDA_OFF;
}
}
canny->detect(d_img, d_edges);
- GPU_ON;
+ CUDA_ON;
canny->detect(d_img, d_edges);
- GPU_OFF;
+ CUDA_OFF;
}
cuda::reduce(d_src, d_dst0, 0, REDUCE_MIN);
- GPU_ON;
+ CUDA_ON;
cuda::reduce(d_src, d_dst0, 0, REDUCE_MIN);
- GPU_OFF;
+ CUDA_OFF;
SUBTEST << size << 'x' << size << ", dim = 1";
cuda::reduce(d_src, d_dst1, 1, REDUCE_MIN);
- GPU_ON;
+ CUDA_ON;
cuda::reduce(d_src, d_dst1, 1, REDUCE_MIN);
- GPU_OFF;
+ CUDA_OFF;
}
}
cuda::gemm(d_src1, d_src2, 1.0, d_src3, 1.0, d_dst);
- GPU_ON;
+ CUDA_ON;
cuda::gemm(d_src1, d_src2, 1.0, d_src3, 1.0, d_dst);
- GPU_OFF;
+ CUDA_OFF;
}
}
detector->detect(d_src, d_pts);
- GPU_ON;
+ CUDA_ON;
detector->detect(d_src, d_pts);
- GPU_OFF;
+ CUDA_OFF;
}
TEST(PyrLKOpticalFlow)
d_pyrLK.sparse(d_frame0, d_frame1, d_pts, d_nextPts, d_status, &d_err);
- GPU_ON;
+ CUDA_ON;
d_pyrLK.sparse(d_frame0, d_frame1, d_pts, d_nextPts, d_status, &d_err);
- GPU_OFF;
+ CUDA_OFF;
}
}
calc.flags |= useGaussianBlur ? OPTFLOW_FARNEBACK_GAUSSIAN : 0;
cuda::GpuMat d_frame0(frame0), d_frame1(frame1), d_flowx, d_flowy;
- GPU_ON;
+ CUDA_ON;
calc(d_frame0, d_frame1, d_flowx, d_flowy);
- GPU_OFF;
+ CUDA_OFF;
Mat flow;
CPU_ON;
DeviceInfo dev_info(i);
if (!dev_info.isCompatible())
{
- std::cout << "GPU module isn't built for GPU #" << i << " ("
+ std::cout << "CUDA module isn't built for GPU #" << i << " ("
<< dev_info.name() << ", CC " << dev_info.majorVersion()
<< dev_info.minorVersion() << "\n";
return -1;
if (name == "farneback")
{
if (useGpu)
- return createOptFlow_Farneback_GPU();
+ return createOptFlow_Farneback_CUDA();
else
return createOptFlow_Farneback();
}
else if (name == "tvl1")
{
if (useGpu)
- return createOptFlow_DualTVL1_GPU();
+ return createOptFlow_DualTVL1_CUDA();
else
return createOptFlow_DualTVL1();
}
else if (name == "brox")
- return createOptFlow_Brox_GPU();
+ return createOptFlow_Brox_CUDA();
else if (name == "pyrlk")
- return createOptFlow_PyrLK_GPU();
+ return createOptFlow_PyrLK_CUDA();
else
{
cerr << "Incorrect Optical Flow algorithm - " << name << endl;
#endif
{
if (useCuda)
- superRes = createSuperResolution_BTVL1_GPU();
+ superRes = createSuperResolution_BTVL1_CUDA();
else
superRes = createSuperResolution_BTVL1();
// Try to use gpu Video Decoding
try
{
- frameSource = createFrameSource_Video_GPU(inputVideoName);
+ frameSource = createFrameSource_Video_CUDA(inputVideoName);
Mat frame;
frameSource->nextFrame(frame);
}
static void help()
{
- cout << "\nThis program demonstrates using SURF_GPU features detector, descriptor extractor and BruteForceMatcher_GPU" << endl;
+ cout << "\nThis program demonstrates using SURF_CUDA features detector, descriptor extractor and BruteForceMatcher_CUDA" << endl;
cout << "\nUsage:\n\tmatcher_simple_gpu --left <image1> --right <image2>" << endl;
}
cv::cuda::printShortCudaDeviceInfo(cv::cuda::getDevice());
- SURF_GPU surf;
+ SURF_CUDA surf;
// detecting keypoints & computing descriptors
GpuMat keypoints1GPU, keypoints2GPU;
cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;
// matching descriptors
- BFMatcher_GPU matcher(NORM_L2);
+ BFMatcher_CUDA matcher(NORM_L2);
GpuMat trainIdx, distance;
matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance);
surf.downloadKeypoints(keypoints2GPU, keypoints2);
surf.downloadDescriptors(descriptors1GPU, descriptors1);
surf.downloadDescriptors(descriptors2GPU, descriptors2);
- BFMatcher_GPU::matchDownload(trainIdx, distance, matches);
+ BFMatcher_CUDA::matchDownload(trainIdx, distance, matches);
// drawing the results
Mat img_matches;
int main()
{
- std::cout << "OpenCV was built without GPU Video decoding support\n" << std::endl;
+ std::cout << "OpenCV was built without CUDA Video decoding support\n" << std::endl;
return 0;
}
if (d_writer.empty())
{
- std::cout << "Open GPU Writer" << std::endl;
+ std::cout << "Open CUDA Writer" << std::endl;
d_writer = cv::cudacodec::createVideoWriter("output_gpu.avi", frame.size(), FPS);
}
int main()
{
- std::cout << "OpenCV was built without GPU Video encoding support\n" << std::endl;
+ std::cout << "OpenCV was built without CUDA Video encoding support\n" << std::endl;
return 0;
}