added BORDER_REFLECT and BORDER_WRAP support to gpu module
authorVladislav Vinogradov <no@email>
Wed, 31 Aug 2011 13:21:14 +0000 (13:21 +0000)
committerVladislav Vinogradov <no@email>
Wed, 31 Aug 2011 13:21:14 +0000 (13:21 +0000)
switched to gpu::remap in opencv_stitching

modules/gpu/src/cuda/filters.cu
modules/gpu/src/cuda/imgproc.cu
modules/gpu/src/cuda/internal_shared.hpp
modules/gpu/src/filtering.cpp
modules/gpu/src/imgproc.cpp
modules/gpu/src/opencv2/gpu/device/border_interpolate.hpp
modules/gpu/test/test_imgproc.cpp
modules/stitching/matchers.cpp
modules/stitching/warpers.cpp
modules/stitching/warpers.hpp

index 3384249..41f403a 100644 (file)
@@ -151,7 +151,7 @@ namespace cv { namespace gpu { namespace filters
     void linearRowFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream)\r
     {\r
         typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor, cudaStream_t stream);\r
-        static const caller_t callers[3][17] = \r
+        static const caller_t callers[5][17] = \r
         {\r
             {\r
                 0, \r
@@ -170,7 +170,7 @@ namespace cv { namespace gpu { namespace filters
                 linearRowFilter_caller<13, T, D, BrdRowReflect101>, \r
                 linearRowFilter_caller<14, T, D, BrdRowReflect101>,\r
                 linearRowFilter_caller<15, T, D, BrdRowReflect101>, \r
-                linearRowFilter_caller<16, T, D, BrdRowReflect101>,\r
+                linearRowFilter_caller<16, T, D, BrdRowReflect101>\r
             },\r
             {\r
                 0, \r
@@ -189,7 +189,7 @@ namespace cv { namespace gpu { namespace filters
                 linearRowFilter_caller<13, T, D, BrdRowReplicate>, \r
                 linearRowFilter_caller<14, T, D, BrdRowReplicate>,\r
                 linearRowFilter_caller<15, T, D, BrdRowReplicate>, \r
-                linearRowFilter_caller<16, T, D, BrdRowReplicate>,\r
+                linearRowFilter_caller<16, T, D, BrdRowReplicate>\r
             },\r
             {\r
                 0, \r
@@ -208,7 +208,45 @@ namespace cv { namespace gpu { namespace filters
                 linearRowFilter_caller<13, T, D, BrdRowConstant>,\r
                 linearRowFilter_caller<14, T, D, BrdRowConstant>,\r
                 linearRowFilter_caller<15, T, D, BrdRowConstant>, \r
-                linearRowFilter_caller<16, T, D, BrdRowConstant>,\r
+                linearRowFilter_caller<16, T, D, BrdRowConstant>\r
+            },\r
+            {\r
+                0, \r
+                linearRowFilter_caller<1 , T, D, BrdRowReflect>, \r
+                linearRowFilter_caller<2 , T, D, BrdRowReflect>,\r
+                linearRowFilter_caller<3 , T, D, BrdRowReflect>, \r
+                linearRowFilter_caller<4 , T, D, BrdRowReflect>, \r
+                linearRowFilter_caller<5 , T, D, BrdRowReflect>, \r
+                linearRowFilter_caller<6 , T, D, BrdRowReflect>, \r
+                linearRowFilter_caller<7 , T, D, BrdRowReflect>, \r
+                linearRowFilter_caller<8 , T, D, BrdRowReflect>,\r
+                linearRowFilter_caller<9 , T, D, BrdRowReflect>,\r
+                linearRowFilter_caller<10, T, D, BrdRowReflect>, \r
+                linearRowFilter_caller<11, T, D, BrdRowReflect>, \r
+                linearRowFilter_caller<12, T, D, BrdRowReflect>, \r
+                linearRowFilter_caller<13, T, D, BrdRowReflect>,\r
+                linearRowFilter_caller<14, T, D, BrdRowReflect>,\r
+                linearRowFilter_caller<15, T, D, BrdRowReflect>, \r
+                linearRowFilter_caller<16, T, D, BrdRowReflect>\r
+            },\r
+            {\r
+                0, \r
+                linearRowFilter_caller<1 , T, D, BrdRowWrap>, \r
+                linearRowFilter_caller<2 , T, D, BrdRowWrap>,\r
+                linearRowFilter_caller<3 , T, D, BrdRowWrap>, \r
+                linearRowFilter_caller<4 , T, D, BrdRowWrap>, \r
+                linearRowFilter_caller<5 , T, D, BrdRowWrap>, \r
+                linearRowFilter_caller<6 , T, D, BrdRowWrap>, \r
+                linearRowFilter_caller<7 , T, D, BrdRowWrap>, \r
+                linearRowFilter_caller<8 , T, D, BrdRowWrap>,\r
+                linearRowFilter_caller<9 , T, D, BrdRowWrap>,\r
+                linearRowFilter_caller<10, T, D, BrdRowWrap>, \r
+                linearRowFilter_caller<11, T, D, BrdRowWrap>, \r
+                linearRowFilter_caller<12, T, D, BrdRowWrap>, \r
+                linearRowFilter_caller<13, T, D, BrdRowWrap>,\r
+                linearRowFilter_caller<14, T, D, BrdRowWrap>,\r
+                linearRowFilter_caller<15, T, D, BrdRowWrap>, \r
+                linearRowFilter_caller<16, T, D, BrdRowWrap>\r
             }\r
         };\r
         \r
@@ -292,7 +330,7 @@ namespace cv { namespace gpu { namespace filters
     void linearColumnFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor, int brd_type, cudaStream_t stream)\r
     {\r
         typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor, cudaStream_t stream);\r
-        static const caller_t callers[3][17] = \r
+        static const caller_t callers[5][17] = \r
         {\r
             {\r
                 0, \r
@@ -311,7 +349,7 @@ namespace cv { namespace gpu { namespace filters
                 linearColumnFilter_caller<13, T, D, BrdColReflect101>, \r
                 linearColumnFilter_caller<14, T, D, BrdColReflect101>, \r
                 linearColumnFilter_caller<15, T, D, BrdColReflect101>, \r
-                linearColumnFilter_caller<16, T, D, BrdColReflect101>, \r
+                linearColumnFilter_caller<16, T, D, BrdColReflect101> \r
             },\r
             {\r
                 0, \r
@@ -330,7 +368,7 @@ namespace cv { namespace gpu { namespace filters
                 linearColumnFilter_caller<13, T, D, BrdColReplicate>, \r
                 linearColumnFilter_caller<14, T, D, BrdColReplicate>, \r
                 linearColumnFilter_caller<15, T, D, BrdColReplicate>, \r
-                linearColumnFilter_caller<16, T, D, BrdColReplicate>\r
+                linearColumnFilter_caller<16, T, D, BrdColReplicate>\r
             },\r
             {\r
                 0, \r
@@ -349,7 +387,45 @@ namespace cv { namespace gpu { namespace filters
                 linearColumnFilter_caller<13, T, D, BrdColConstant>, \r
                 linearColumnFilter_caller<14, T, D, BrdColConstant>, \r
                 linearColumnFilter_caller<15, T, D, BrdColConstant>, \r
-                linearColumnFilter_caller<16, T, D, BrdColConstant>, \r
+                linearColumnFilter_caller<16, T, D, BrdColConstant> \r
+            },\r
+            {\r
+                0, \r
+                linearColumnFilter_caller<1 , T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<2 , T, D, BrdColReflect>,\r
+                linearColumnFilter_caller<3 , T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<4 , T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<5 , T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<6 , T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<7 , T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<8 , T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<9 , T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<10, T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<11, T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<12, T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<13, T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<14, T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<15, T, D, BrdColReflect>, \r
+                linearColumnFilter_caller<16, T, D, BrdColReflect>\r
+            },\r
+            {\r
+                0, \r
+                linearColumnFilter_caller<1 , T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<2 , T, D, BrdColWrap>,\r
+                linearColumnFilter_caller<3 , T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<4 , T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<5 , T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<6 , T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<7 , T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<8 , T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<9 , T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<10, T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<11, T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<12, T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<13, T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<14, T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<15, T, D, BrdColWrap>, \r
+                linearColumnFilter_caller<16, T, D, BrdColWrap>,\r
             }\r
         };\r
         \r
index b5f3b6b..40f0d99 100644 (file)
@@ -120,10 +120,10 @@ namespace cv { namespace gpu { namespace imgproc
     {\r
         typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2Df& xmap, const DevMem2Df& ymap, const DevMem2D_<T>& dst, T borderValue);\r
 \r
-        static const caller_t callers[2][3] = \r
+        static const caller_t callers[2][5] = \r
         {\r
-            { remap_caller<PointFilter, BrdReflect101>, remap_caller<PointFilter, BrdReplicate>, remap_caller<PointFilter, BrdConstant> },\r
-            { remap_caller<LinearFilter, BrdReflect101>, remap_caller<LinearFilter, BrdReplicate>, remap_caller<LinearFilter, BrdConstant> }\r
+            { remap_caller<PointFilter, BrdReflect101>, remap_caller<PointFilter, BrdReplicate>, remap_caller<PointFilter, BrdConstant>, remap_caller<PointFilter, BrdReflect>, remap_caller<PointFilter, BrdWrap> },\r
+            { remap_caller<LinearFilter, BrdReflect101>, remap_caller<LinearFilter, BrdReplicate>, remap_caller<LinearFilter, BrdConstant>, remap_caller<LinearFilter, BrdReflect>, remap_caller<LinearFilter, BrdWrap> }\r
         };\r
 \r
         typename VecTraits<T>::elem_type brd[] = {(typename VecTraits<T>::elem_type)borderValue[0], (typename VecTraits<T>::elem_type)borderValue[1], (typename VecTraits<T>::elem_type)borderValue[2], (typename VecTraits<T>::elem_type)borderValue[3]};\r
@@ -1089,7 +1089,7 @@ namespace cv { namespace gpu { namespace imgproc
 \r
         static const caller_t callers[] = \r
         {\r
-            pyrDown_caller<type, BrdReflect101>, pyrDown_caller<type, BrdReplicate>, pyrDown_caller<type, BrdConstant>\r
+            pyrDown_caller<type, BrdReflect101>, pyrDown_caller<type, BrdReplicate>, pyrDown_caller<type, BrdConstant>, pyrDown_caller<type, BrdReflect>, pyrDown_caller<type, BrdWrap>\r
         };\r
 \r
         callers[borderType](static_cast< DevMem2D_<type> >(src), static_cast< DevMem2D_<type> >(dst), stream);\r
@@ -1219,7 +1219,7 @@ namespace cv { namespace gpu { namespace imgproc
 \r
         static const caller_t callers[] = \r
         {\r
-            pyrUp_caller<type, BrdReflect101>, pyrUp_caller<type, BrdReplicate>, pyrUp_caller<type, BrdConstant>\r
+            pyrUp_caller<type, BrdReflect101>, pyrUp_caller<type, BrdReplicate>, pyrUp_caller<type, BrdConstant>, pyrUp_caller<type, BrdReflect>, pyrUp_caller<type, BrdWrap>\r
         };\r
 \r
         callers[borderType](static_cast< DevMem2D_<type> >(src), static_cast< DevMem2D_<type> >(dst), stream);\r
index 9a3086b..1d13735 100644 (file)
@@ -70,7 +70,9 @@ namespace cv
         {\r
             BORDER_REFLECT101_GPU = 0,\r
             BORDER_REPLICATE_GPU,\r
-            BORDER_CONSTANT_GPU\r
+            BORDER_CONSTANT_GPU,\r
+            BORDER_REFLECT_GPU,\r
+            BORDER_WRAP_GPU\r
         };\r
                 \r
         // Converts CPU border extrapolation mode into GPU internal analogue.\r
index 2d1a934..0144d4b 100644 (file)
@@ -718,7 +718,7 @@ Ptr<BaseRowFilter_GPU> cv::gpu::getLinearRowFilter_GPU(int srcType, int bufType,
             nppFilter1D_callers[CV_MAT_CN(srcType)]));\r
     }\r
     \r
-    CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT);\r
+    CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);\r
     int gpuBorderType;\r
     CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));\r
 \r
@@ -833,7 +833,7 @@ Ptr<BaseColumnFilter_GPU> cv::gpu::getLinearColumnFilter_GPU(int bufType, int ds
             nppFilter1D_callers[CV_MAT_CN(bufType)]));\r
     }\r
     \r
-    CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT);\r
+    CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);\r
     int gpuBorderType;\r
     CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));\r
    \r
index 20782c6..f65eee1 100644 (file)
@@ -133,7 +133,7 @@ void cv::gpu::remap(const GpuMat& src, GpuMat& dst, const GpuMat& xmap, const Gp
 \r
     CV_Assert(interpolation == INTER_NEAREST || interpolation == INTER_LINEAR);\r
 \r
-    CV_Assert(borderMode == BORDER_REFLECT101 || borderMode == BORDER_REPLICATE || borderMode == BORDER_CONSTANT);\r
+    CV_Assert(borderMode == BORDER_REFLECT101 || borderMode == BORDER_REPLICATE || borderMode == BORDER_CONSTANT || borderMode == BORDER_REFLECT || borderMode == BORDER_WRAP);\r
     int gpuBorderType;\r
     CV_Assert(tryConvertToGpuBorderType(borderMode, gpuBorderType));\r
 \r
@@ -1228,24 +1228,26 @@ namespace
 \r
 bool cv::gpu::tryConvertToGpuBorderType(int cpuBorderType, int& gpuBorderType)\r
 {\r
-    if (cpuBorderType == cv::BORDER_REFLECT101)\r
+    switch (cpuBorderType)\r
     {\r
+    case cv::BORDER_REFLECT101:\r
         gpuBorderType = cv::gpu::BORDER_REFLECT101_GPU;\r
         return true;\r
-    }\r
-\r
-    if (cpuBorderType == cv::BORDER_REPLICATE)\r
-    {\r
+    case cv::BORDER_REPLICATE:\r
         gpuBorderType = cv::gpu::BORDER_REPLICATE_GPU;\r
         return true;\r
-    }\r
-    \r
-    if (cpuBorderType == cv::BORDER_CONSTANT)\r
-    {\r
+    case cv::BORDER_CONSTANT:\r
         gpuBorderType = cv::gpu::BORDER_CONSTANT_GPU;\r
         return true;\r
-    }\r
-\r
+    case cv::BORDER_REFLECT:\r
+        gpuBorderType = cv::gpu::BORDER_REFLECT_GPU;\r
+        return true;\r
+    case cv::BORDER_WRAP:\r
+        gpuBorderType = cv::gpu::BORDER_WRAP_GPU;\r
+        return true;\r
+    default:\r
+        return false;\r
+    };\r
     return false;\r
 }\r
 \r
@@ -1647,7 +1649,7 @@ void cv::gpu::pyrDown(const GpuMat& src, GpuMat& dst, int borderType, Stream& st
 \r
     CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);\r
 \r
-    CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT);\r
+    CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);\r
     int gpuBorderType;\r
     CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));\r
 \r
@@ -1683,7 +1685,7 @@ void cv::gpu::pyrUp(const GpuMat& src, GpuMat& dst, int borderType, Stream& stre
 \r
     CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);\r
 \r
-    CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT);\r
+    CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);\r
     int gpuBorderType;\r
     CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));\r
 \r
index 2ac09a0..53b8189 100644 (file)
@@ -246,55 +246,31 @@ namespace cv { namespace gpu { namespace device
         {\r
             return ::max(y, 0);\r
         }\r
-        __device__ __forceinline__ float idx_row_low(float y) const\r
-        {\r
-            return ::fmax(y, 0.0f);\r
-        }\r
 \r
         __device__ __forceinline__ int idx_row_high(int y) const \r
         {\r
             return ::min(y, last_row);\r
         }\r
-        __device__ __forceinline__ float idx_row_high(float y) const \r
-        {\r
-            return ::fmin(y, last_row);\r
-        }\r
 \r
         __device__ __forceinline__ int idx_row(int y) const\r
         {\r
             return idx_row_low(idx_row_high(y));\r
         }\r
-        __device__ __forceinline__ float idx_row(float y) const\r
-        {\r
-            return idx_row_low(idx_row_high(y));\r
-        }\r
 \r
         __device__ __forceinline__ int idx_col_low(int x) const\r
         {\r
             return ::max(x, 0);\r
         }\r
-        __device__ __forceinline__ float idx_col_low(float x) const\r
-        {\r
-            return ::fmax(x, 0);\r
-        }\r
 \r
         __device__ __forceinline__ int idx_col_high(int x) const \r
         {\r
             return ::min(x, last_col);\r
         }\r
-        __device__ __forceinline__ float idx_col_high(float x) const \r
-        {\r
-            return ::fmin(x, last_col);\r
-        }\r
 \r
         __device__ __forceinline__ int idx_col(int x) const\r
         {\r
             return idx_col_low(idx_col_high(x));\r
         }\r
-        __device__ __forceinline__ float idx_col(float x) const\r
-        {\r
-            return idx_col_low(idx_col_high(x));\r
-        }\r
 \r
         template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const \r
         {\r
@@ -421,52 +397,179 @@ namespace cv { namespace gpu { namespace device
         {\r
             return ::abs(y);\r
         }\r
-        __device__ __forceinline__ float idx_row_low(float y) const\r
-        {\r
-            return ::fabs(y);\r
-        }\r
 \r
         __device__ __forceinline__ int idx_row_high(int y) const \r
         {\r
             return last_row - ::abs(last_row - y);\r
         }\r
-        __device__ __forceinline__ float idx_row_high(float y) const \r
-        {\r
-            return last_row - ::fabs(last_row - y);\r
-        }\r
 \r
         __device__ __forceinline__ int idx_row(int y) const\r
         {\r
             return idx_row_low(idx_row_high(y));\r
         }\r
-        __device__ __forceinline__ float idx_row(float y) const\r
-        {\r
-            return idx_row_low(idx_row_high(y));\r
-        }\r
 \r
         __device__ __forceinline__ int idx_col_low(int x) const\r
         {\r
             return ::abs(x);\r
         }\r
-        __device__ __forceinline__ float idx_col_low(float x) const\r
-        {\r
-            return ::fabs(x);\r
-        }\r
 \r
         __device__ __forceinline__ int idx_col_high(int x) const \r
         {\r
             return last_col - ::abs(last_col - x);\r
         }\r
-        __device__ __forceinline__ float idx_col_high(float x) const \r
+\r
+        __device__ __forceinline__ int idx_col(int x) const\r
+        {\r
+            return idx_col_low(idx_col_high(x));\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const \r
+        {\r
+            return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);\r
+        }\r
+\r
+        template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const \r
+        {\r
+            return saturate_cast<D>(src(idx_row(y), idx_col(x)));\r
+        }\r
+\r
+        const int last_row;\r
+        const int last_col;\r
+    };\r
+\r
+    //////////////////////////////////////////////////////////////\r
+    // BrdReflect\r
+\r
+    template <typename D> struct BrdRowReflect\r
+    {\r
+        typedef D result_type;\r
+\r
+        explicit __host__ __device__ __forceinline__ BrdRowReflect(int width) : last_col(width - 1) {}\r
+        template <typename U> __host__ __device__ __forceinline__ BrdRowReflect(int width, U) : last_col(width - 1) {}\r
+\r
+        __device__ __forceinline__ int idx_col_low(int x) const\r
         {\r
-            return last_col - ::fabs(last_col - x);\r
+            return ::abs(x) - (x < 0);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_col_high(int x) const \r
+        {\r
+            return last_col - ::abs(last_col - x) + (x > last_col);\r
         }\r
 \r
         __device__ __forceinline__ int idx_col(int x) const\r
         {\r
             return idx_col_low(idx_col_high(x));\r
         }\r
-        __device__ __forceinline__ float idx_col(float x) const\r
+\r
+        template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const \r
+        {\r
+            return saturate_cast<D>(data[idx_col_low(x)]);\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const \r
+        {\r
+            return saturate_cast<D>(data[idx_col_high(x)]);\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at(int x, const T* data) const \r
+        {\r
+            return saturate_cast<D>(data[idx_col(x)]);\r
+        }\r
+\r
+        __host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const \r
+        {\r
+            return -last_col <= mini && maxi <= 2 * last_col;\r
+        }\r
+\r
+        const int last_col;\r
+    };\r
+\r
+    template <typename D> struct BrdColReflect\r
+    {\r
+        typedef D result_type;\r
+\r
+        explicit __host__ __device__ __forceinline__ BrdColReflect(int height) : last_row(height - 1) {}\r
+        template <typename U> __host__ __device__ __forceinline__ BrdColReflect(int height, U) : last_row(height - 1) {}\r
+\r
+        __device__ __forceinline__ int idx_row_low(int y) const\r
+        {\r
+            return ::abs(y) - (y < 0);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_row_high(int y) const \r
+        {\r
+            return last_row - ::abs(last_row - y) + (y > last_row);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_row(int y) const\r
+        {\r
+            return idx_row_low(idx_row_high(y));\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const \r
+        {\r
+            return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const \r
+        {\r
+            return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const \r
+        {\r
+            return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));\r
+        }\r
+\r
+        __host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const \r
+        {\r
+            return -last_row <= mini && maxi <= 2 * last_row;\r
+        }\r
+\r
+        const int last_row;\r
+    };\r
+\r
+    template <typename D> struct BrdReflect\r
+    {\r
+        typedef D result_type;\r
+\r
+        __host__ __device__ __forceinline__ BrdReflect(int height, int width) : \r
+            last_row(height - 1), last_col(width - 1) \r
+        {\r
+        }\r
+        template <typename U> \r
+        __host__ __device__ __forceinline__ BrdReflect(int height, int width, U) : \r
+            last_row(height - 1), last_col(width - 1) \r
+        {\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_row_low(int y) const\r
+        {\r
+            return ::abs(y) - (y < 0);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_row_high(int y) const \r
+        {\r
+            return last_row - ::abs(last_row - y) + (y > last_row);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_row(int y) const\r
+        {\r
+            return idx_row_low(idx_row_high(y));\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_col_low(int x) const\r
+        {\r
+            return ::abs(x) - (x < 0);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_col_high(int x) const \r
+        {\r
+            return last_col - ::abs(last_col - x) + (x > last_col);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_col(int x) const\r
         {\r
             return idx_col_low(idx_col_high(x));\r
         }\r
@@ -486,6 +589,157 @@ namespace cv { namespace gpu { namespace device
     };\r
 \r
     //////////////////////////////////////////////////////////////\r
+    // BrdWrap\r
+\r
+    template <typename D> struct BrdRowWrap\r
+    {\r
+        typedef D result_type;\r
+\r
+        explicit __host__ __device__ __forceinline__ BrdRowWrap(int width_) : width(width_) {}\r
+        template <typename U> __host__ __device__ __forceinline__ BrdRowWrap(int width_, U) : width(width_) {}\r
+\r
+        __device__ __forceinline__ int idx_col_low(int x) const\r
+        {\r
+            return (x >= 0) * x + (x < 0) * (x - ((x - width + 1) / width) * width);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_col_high(int x) const \r
+        {\r
+            return (x < width) * x + (x >= width) * (x % width);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_col(int x) const\r
+        {\r
+            return idx_col_high(idx_col_low(x));\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const \r
+        {\r
+            return saturate_cast<D>(data[idx_col_low(x)]);\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const \r
+        {\r
+            return saturate_cast<D>(data[idx_col_high(x)]);\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at(int x, const T* data) const \r
+        {\r
+            return saturate_cast<D>(data[idx_col(x)]);\r
+        }\r
+\r
+        __host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const \r
+        {\r
+            return true;\r
+        }\r
+\r
+        const int width;\r
+    };\r
+\r
+    template <typename D> struct BrdColWrap\r
+    {\r
+        typedef D result_type;\r
+\r
+        explicit __host__ __device__ __forceinline__ BrdColWrap(int height_) : height(height_) {}\r
+        template <typename U> __host__ __device__ __forceinline__ BrdColWrap(int height_, U) : height(height_) {}\r
+\r
+        __device__ __forceinline__ int idx_row_low(int y) const\r
+        {\r
+            return (y >= 0) * y + (y < 0) * (y - ((y - height + 1) / height) * height);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_row_high(int y) const \r
+        {\r
+            return (y < height) * y + (y >= height) * (y % height);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_row(int y) const\r
+        {\r
+            return idx_row_high(idx_row_low(y));\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const \r
+        {\r
+            return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const \r
+        {\r
+            return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const \r
+        {\r
+            return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));\r
+        }\r
+\r
+        __host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const \r
+        {\r
+            return true;\r
+        }\r
+\r
+        const int height;\r
+    };\r
+\r
+    template <typename D> struct BrdWrap\r
+    {\r
+        typedef D result_type;\r
+\r
+        __host__ __device__ __forceinline__ BrdWrap(int height_, int width_) : \r
+            height(height_), width(width_) \r
+        {\r
+        }\r
+        template <typename U> \r
+        __host__ __device__ __forceinline__ BrdWrap(int height_, int width_, U) : \r
+            height(height_), width(width_) \r
+        {\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_row_low(int y) const\r
+        {\r
+            return (y >= 0) * y + (y < 0) * (y - ((y - height + 1) / height) * height);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_row_high(int y) const \r
+        {\r
+            return (y < height) * y + (y >= height) * (y % height);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_row(int y) const\r
+        {\r
+            return idx_row_high(idx_row_low(y));\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_col_low(int x) const\r
+        {\r
+            return (x >= 0) * x + (x < 0) * (x - ((x - width + 1) / width) * width);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_col_high(int x) const \r
+        {\r
+            return (x < width) * x + (x >= width) * (x % width);\r
+        }\r
+\r
+        __device__ __forceinline__ int idx_col(int x) const\r
+        {\r
+            return idx_col_high(idx_col_low(x));\r
+        }\r
+\r
+        template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const \r
+        {\r
+            return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);\r
+        }\r
+\r
+        template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const \r
+        {\r
+            return saturate_cast<D>(src(idx_row(y), idx_col(x)));\r
+        }\r
+\r
+        const int height;\r
+        const int width;\r
+    };\r
+\r
+    //////////////////////////////////////////////////////////////\r
     // BorderReader\r
 \r
     template <typename Ptr2D, typename B> struct BorderReader\r
index 8236c16..5e26581 100644 (file)
@@ -192,7 +192,6 @@ struct Remap : testing::TestWithParam< std::tr1::tuple<cv::gpu::DeviceInfo, int,
     cv::Mat src;\r
     cv::Mat xmap;\r
     cv::Mat ymap;\r
-    cv::Scalar borderValue;\r
 \r
     cv::Mat dst_gold;\r
     \r
@@ -221,17 +220,12 @@ struct Remap : testing::TestWithParam< std::tr1::tuple<cv::gpu::DeviceInfo, int,
 \r
             for (int x = 0; x < src.cols; ++x)\r
             {\r
-                xmap_row[x] = src.cols - 1 - x;\r
-                ymap_row[x] = src.rows - 1 - y;\r
+                xmap_row[x] = src.cols - 1 - x + 10;\r
+                ymap_row[x] = src.rows - 1 - y + 10;\r
             }\r
         }\r
-\r
-        borderValue[0] = rng.uniform(0.0, 256.0);\r
-        borderValue[1] = rng.uniform(0.0, 256.0);\r
-        borderValue[2] = rng.uniform(0.0, 256.0);\r
-        borderValue[3] = rng.uniform(0.0, 256.0);\r
         \r
-        cv::remap(src, dst_gold, xmap, ymap, interpolation, borderType, borderValue);\r
+        cv::remap(src, dst_gold, xmap, ymap, interpolation, borderType);\r
     }\r
 };\r
 \r
@@ -248,18 +242,23 @@ TEST_P(Remap, Accuracy)
     PRINT_PARAM(interpolationStr);\r
     PRINT_PARAM(borderTypeStr);\r
     PRINT_PARAM(size);\r
-    PRINT_PARAM(borderValue);\r
 \r
     cv::Mat dst;\r
 \r
     ASSERT_NO_THROW(\r
         cv::gpu::GpuMat gpuRes;\r
         \r
-        cv::gpu::remap(cv::gpu::GpuMat(src), gpuRes, cv::gpu::GpuMat(xmap), cv::gpu::GpuMat(ymap), interpolation, borderType, borderValue);\r
+        cv::gpu::remap(cv::gpu::GpuMat(src), gpuRes, cv::gpu::GpuMat(xmap), cv::gpu::GpuMat(ymap), interpolation, borderType);\r
 \r
         gpuRes.download(dst);\r
     );\r
 \r
+    if (dst_gold.depth() == CV_32F)\r
+    {\r
+        dst_gold.convertTo(dst_gold, CV_8U);\r
+        dst.convertTo(dst, CV_8U);\r
+    }\r
+\r
     EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);\r
 }\r
 \r
@@ -274,7 +273,7 @@ INSTANTIATE_TEST_CASE_P
             CV_32FC1, CV_32FC3, CV_32FC4\r
         ),\r
         testing::Values(cv::INTER_NEAREST, cv::INTER_LINEAR),\r
-        testing::Values(cv::BORDER_REFLECT101, cv::BORDER_REPLICATE, cv::BORDER_CONSTANT)\r
+        testing::Values(cv::BORDER_REFLECT101, cv::BORDER_REPLICATE, cv::BORDER_CONSTANT, cv::BORDER_REFLECT, cv::BORDER_WRAP)\r
     )\r
 );\r
                         \r
index 6365c2b..6a61c4b 100644 (file)
@@ -411,15 +411,7 @@ namespace
 \r
 BestOf2NearestMatcher::BestOf2NearestMatcher(bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2)\r
 {\r
-    bool use_gpu = false;\r
     if (try_use_gpu && getCudaEnabledDeviceCount() > 0)\r
-    {\r
-        DeviceInfo info;\r
-        if (info.majorVersion() >= 2 && cv::getNumberOfCPUs() < 4)\r
-            use_gpu = true;\r
-    }\r
-\r
-    if (use_gpu)\r
         impl_ = new GpuMatcher(match_conf);\r
     else\r
         impl_ = new CpuMatcher(match_conf);\r
index aceedac..e9d9cd9 100644 (file)
@@ -118,8 +118,14 @@ Point PlaneWarperGpu::warp(const Mat &src, float focal, const cv::Mat &R, cv::Ma
     gpu::buildWarpPlaneMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),\r
                             R, focal, projector_.scale, projector_.plane_dist, d_xmap_, d_ymap_);\r
 \r
-    dst.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, src.type());\r
-    remap(src, dst, Mat(d_xmap_), Mat(d_ymap_), interp_mode, border_mode);\r
+    gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);\r
+    d_src_.upload(src);\r
+\r
+    gpu::ensureSizeIsEnough(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, src.type(), d_dst_);\r
+\r
+    gpu::remap(d_src_, d_dst_, d_xmap_, d_ymap_, interp_mode, border_mode);\r
+\r
+    d_dst_.download(dst);\r
 \r
     return dst_tl;\r
 }\r
@@ -183,8 +189,14 @@ Point SphericalWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &d
     gpu::buildWarpSphericalMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),\r
                                 R, focal, projector_.scale, d_xmap_, d_ymap_);\r
 \r
-    dst.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, src.type());\r
-    remap(src, dst, Mat(d_xmap_), Mat(d_ymap_), interp_mode, border_mode);\r
+    gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);\r
+    d_src_.upload(src);\r
+\r
+    gpu::ensureSizeIsEnough(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, src.type(), d_dst_);\r
+\r
+    gpu::remap(d_src_, d_dst_, d_xmap_, d_ymap_, interp_mode, border_mode);\r
+\r
+    d_dst_.download(dst);\r
 \r
     return dst_tl;\r
 }\r
@@ -204,8 +216,14 @@ Point CylindricalWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat
     gpu::buildWarpCylindricalMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),\r
                                   R, focal, projector_.scale, d_xmap_, d_ymap_);\r
 \r
-    dst.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, src.type());\r
-    remap(src, dst, Mat(d_xmap_), Mat(d_ymap_), interp_mode, border_mode);\r
+    gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);\r
+    d_src_.upload(src);\r
+\r
+    gpu::ensureSizeIsEnough(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, src.type(), d_dst_);\r
+\r
+    gpu::remap(d_src_, d_dst_, d_xmap_, d_ymap_, interp_mode, border_mode);\r
+\r
+    d_dst_.download(dst);\r
 \r
     return dst_tl;\r
 }\r
index 47a162e..b39fb1a 100644 (file)
@@ -122,7 +122,7 @@ public:
                    int interp_mode, int border_mode);\r
 \r
 private:\r
-    cv::gpu::GpuMat d_xmap_, d_ymap_, d_dst_;\r
+    cv::gpu::GpuMat d_xmap_, d_ymap_, d_dst_, d_src_;\r
 };\r
 \r
 \r
@@ -153,7 +153,7 @@ public:
                    int interp_mode, int border_mode);\r
 \r
 private:\r
-    cv::gpu::GpuMat d_xmap_, d_ymap_, d_dst_;\r
+    cv::gpu::GpuMat d_xmap_, d_ymap_, d_dst_, d_src_;\r
 };\r
 \r
 \r
@@ -186,7 +186,7 @@ public:
                    int interp_mode, int border_mode);\r
 \r
 private:\r
-    cv::gpu::GpuMat d_xmap_, d_ymap_, d_dst_;\r
+    cv::gpu::GpuMat d_xmap_, d_ymap_, d_dst_, d_src_;\r
 };\r
 \r
 #include "warpers_inl.hpp"\r