\r
namespace filter_krnls\r
{\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int KERNEL_SIZE, int CN, typename T, typename D>\r
+ template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int KERNEL_SIZE, typename T, typename D>\r
__global__ void linearRowFilter(const T* src, size_t src_step, D* dst, size_t dst_step, int anchor, int width, int height)\r
{\r
__shared__ T smem[BLOCK_DIM_Y * BLOCK_DIM_X * 3];\r
\r
if (threadX < width)\r
{\r
- typedef typename TypeVec<float, CN>::vec_t sum_t;\r
+ typedef typename TypeVec<float, VecTraits<T>::cn>::vec_t sum_t;\r
sum_t sum = VecTraits<sum_t>::all(0);\r
\r
sDataRow += threadIdx.x + blockDim.x - anchor;\r
\r
namespace cv { namespace gpu { namespace filters\r
{\r
- template <int KERNEL_SIZE, int CN, typename T, typename D>\r
+ template <int KERNEL_SIZE, typename T, typename D>\r
void linearRowFilter_caller(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor)\r
{\r
const int BLOCK_DIM_X = 16;\r
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y);\r
dim3 blocks(divUp(src.cols, BLOCK_DIM_X), divUp(src.rows, BLOCK_DIM_Y));\r
\r
- filter_krnls::linearRowFilter<BLOCK_DIM_X, BLOCK_DIM_Y, KERNEL_SIZE, CN><<<blocks, threads>>>(src.data, src.step/src.elemSize(), \r
+ filter_krnls::linearRowFilter<BLOCK_DIM_X, BLOCK_DIM_Y, KERNEL_SIZE><<<blocks, threads>>>(src.data, src.step/src.elemSize(), \r
dst.data, dst.step/dst.elemSize(), anchor, src.cols, src.rows);\r
\r
cudaSafeCall( cudaThreadSynchronize() );\r
}\r
\r
- template <int CN, typename T, typename D>\r
- inline void linearRowFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
+ template <typename T, typename D>\r
+ void linearRowFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
{\r
typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor);\r
static const caller_t callers[] = \r
- {linearRowFilter_caller<0 , CN, T, D>, linearRowFilter_caller<1 , CN, T, D>, \r
- linearRowFilter_caller<2 , CN, T, D>, linearRowFilter_caller<3 , CN, T, D>, \r
- linearRowFilter_caller<4 , CN, T, D>, linearRowFilter_caller<5 , CN, T, D>, \r
- linearRowFilter_caller<6 , CN, T, D>, linearRowFilter_caller<7 , CN, T, D>, \r
- linearRowFilter_caller<8 , CN, T, D>, linearRowFilter_caller<9 , CN, T, D>, \r
- linearRowFilter_caller<10, CN, T, D>, linearRowFilter_caller<11, CN, T, D>, \r
- linearRowFilter_caller<12, CN, T, D>, linearRowFilter_caller<13, CN, T, D>, \r
- linearRowFilter_caller<14, CN, T, D>, linearRowFilter_caller<15, CN, T, D>};\r
+ {linearRowFilter_caller<0 , T, D>, linearRowFilter_caller<1 , T, D>, \r
+ linearRowFilter_caller<2 , T, D>, linearRowFilter_caller<3 , T, D>, \r
+ linearRowFilter_caller<4 , T, D>, linearRowFilter_caller<5 , T, D>, \r
+ linearRowFilter_caller<6 , T, D>, linearRowFilter_caller<7 , T, D>, \r
+ linearRowFilter_caller<8 , T, D>, linearRowFilter_caller<9 , T, D>, \r
+ linearRowFilter_caller<10, T, D>, linearRowFilter_caller<11, T, D>, \r
+ linearRowFilter_caller<12, T, D>, linearRowFilter_caller<13, T, D>, \r
+ linearRowFilter_caller<14, T, D>, linearRowFilter_caller<15, T, D>};\r
\r
loadLinearKernel(kernel, ksize);\r
+\r
callers[ksize]((DevMem2D_<T>)src, (DevMem2D_<D>)dst, anchor);\r
}\r
\r
- template void linearRowFilter_gpu<4, uchar4, uchar4>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearRowFilter_gpu<uchar4, uchar4>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearRowFilter_gpu<uchar4, char4>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearRowFilter_gpu<char4, uchar4>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearRowFilter_gpu<char4, char4>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
\r
- /* void linearRowFilter_gpu_8u_8u_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<4, uchar4, uchar4>(src, dst, kernel, ksize, anchor);\r
- }*/\r
- void linearRowFilter_gpu_8u_8s_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<4, uchar4, char4>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearRowFilter_gpu_8s_8u_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<4, char4, uchar4>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearRowFilter_gpu_8s_8s_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<4, char4, char4>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearRowFilter_gpu_16u_16u_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<2, ushort2, ushort2>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearRowFilter_gpu_16u_16s_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<2, ushort2, short2>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearRowFilter_gpu_16s_16u_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<2, short2, ushort2>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearRowFilter_gpu_16s_16s_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<2, short2, short2>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearRowFilter_gpu_32s_32s_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<1, int, int>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearRowFilter_gpu_32s_32f_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<1, int, float>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearRowFilter_gpu_32f_32s_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<1, float, int>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearRowFilter_gpu_32f_32f_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearRowFilter_gpu<1 ,float, float>(src, dst, kernel, ksize, anchor);\r
- }\r
+ template void linearRowFilter_gpu<ushort2, ushort2>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearRowFilter_gpu<ushort2, short2>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearRowFilter_gpu<short2, ushort2>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearRowFilter_gpu<short2, short2>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+\r
+ template void linearRowFilter_gpu<int, int>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearRowFilter_gpu<int, float>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearRowFilter_gpu<float, int>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearRowFilter_gpu<float, float>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
}}}\r
\r
namespace filter_krnls\r
{\r
- template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int KERNEL_SIZE, int CN, typename T, typename D>\r
+ template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int KERNEL_SIZE, typename T, typename D>\r
__global__ void linearColumnFilter(const T* src, size_t src_step, D* dst, size_t dst_step, int anchor, int width, int height)\r
{\r
__shared__ T smem[BLOCK_DIM_Y * BLOCK_DIM_X * 3];\r
\r
if (threadY < height)\r
{\r
- typedef typename TypeVec<float, CN>::vec_t sum_t;\r
+ typedef typename TypeVec<float, VecTraits<T>::cn>::vec_t sum_t;\r
sum_t sum = VecTraits<sum_t>::all(0);\r
\r
sDataColumn += (threadIdx.y + blockDim.y - anchor)* smem_step;\r
\r
namespace cv { namespace gpu { namespace filters\r
{\r
- template <int KERNEL_SIZE, int CN, typename T, typename D>\r
+ template <int KERNEL_SIZE, typename T, typename D>\r
void linearColumnFilter_caller(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor)\r
{\r
const int BLOCK_DIM_X = 16;\r
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y);\r
dim3 blocks(divUp(src.cols, BLOCK_DIM_X), divUp(src.rows, BLOCK_DIM_Y));\r
\r
- filter_krnls::linearColumnFilter<BLOCK_DIM_X, BLOCK_DIM_Y, KERNEL_SIZE, CN><<<blocks, threads>>>(src.data, src.step/src.elemSize(), \r
+ filter_krnls::linearColumnFilter<BLOCK_DIM_X, BLOCK_DIM_Y, KERNEL_SIZE><<<blocks, threads>>>(src.data, src.step/src.elemSize(), \r
dst.data, dst.step/dst.elemSize(), anchor, src.cols, src.rows);\r
\r
cudaSafeCall( cudaThreadSynchronize() );\r
}\r
\r
- template <int CN, typename T, typename D>\r
- inline void linearColumnFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
+ template <typename T, typename D>\r
+ void linearColumnFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
{\r
typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor);\r
static const caller_t callers[] = \r
- {linearColumnFilter_caller<0 , CN, T, D>, linearColumnFilter_caller<1 , CN, T, D>, \r
- linearColumnFilter_caller<2 , CN, T, D>, linearColumnFilter_caller<3 , CN, T, D>, \r
- linearColumnFilter_caller<4 , CN, T, D>, linearColumnFilter_caller<5 , CN, T, D>, \r
- linearColumnFilter_caller<6 , CN, T, D>, linearColumnFilter_caller<7 , CN, T, D>, \r
- linearColumnFilter_caller<8 , CN, T, D>, linearColumnFilter_caller<9 , CN, T, D>, \r
- linearColumnFilter_caller<10, CN, T, D>, linearColumnFilter_caller<11, CN, T, D>, \r
- linearColumnFilter_caller<12, CN, T, D>, linearColumnFilter_caller<13, CN, T, D>, \r
- linearColumnFilter_caller<14, CN, T, D>, linearColumnFilter_caller<15, CN, T, D>};\r
+ {linearColumnFilter_caller<0 , T, D>, linearColumnFilter_caller<1 , T, D>, \r
+ linearColumnFilter_caller<2 , T, D>, linearColumnFilter_caller<3 , T, D>, \r
+ linearColumnFilter_caller<4 , T, D>, linearColumnFilter_caller<5 , T, D>, \r
+ linearColumnFilter_caller<6 , T, D>, linearColumnFilter_caller<7 , T, D>, \r
+ linearColumnFilter_caller<8 , T, D>, linearColumnFilter_caller<9 , T, D>, \r
+ linearColumnFilter_caller<10, T, D>, linearColumnFilter_caller<11, T, D>, \r
+ linearColumnFilter_caller<12, T, D>, linearColumnFilter_caller<13, T, D>, \r
+ linearColumnFilter_caller<14, T, D>, linearColumnFilter_caller<15, T, D>};\r
\r
loadLinearKernel(kernel, ksize);\r
+\r
callers[ksize]((DevMem2D_<T>)src, (DevMem2D_<D>)dst, anchor);\r
}\r
\r
- void linearColumnFilter_gpu_8u_8u_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<4, uchar4, uchar4>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearColumnFilter_gpu_8u_8s_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<4, uchar4, char4>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearColumnFilter_gpu_8s_8u_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<4, char4, uchar4>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearColumnFilter_gpu_8s_8s_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<4, char4, char4>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearColumnFilter_gpu_16u_16u_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<2, ushort2, ushort2>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearColumnFilter_gpu_16u_16s_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<2, ushort2, short2>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearColumnFilter_gpu_16s_16u_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<2, short2, ushort2>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearColumnFilter_gpu_16s_16s_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<2, short2, short2>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearColumnFilter_gpu_32s_32s_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<1, int, int>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearColumnFilter_gpu_32s_32f_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<1, int, float>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearColumnFilter_gpu_32f_32s_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<1, float, int>(src, dst, kernel, ksize, anchor);\r
- }\r
- void linearColumnFilter_gpu_32f_32f_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor)\r
- {\r
- linearColumnFilter_gpu<1, float, float>(src, dst, kernel, ksize, anchor);\r
- }\r
+ template void linearColumnFilter_gpu<uchar4, uchar4>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearColumnFilter_gpu<uchar4, char4>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearColumnFilter_gpu<char4, uchar4>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearColumnFilter_gpu<char4, char4>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+\r
+ template void linearColumnFilter_gpu<ushort2, ushort2>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearColumnFilter_gpu<ushort2, short2>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearColumnFilter_gpu<short2, ushort2>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearColumnFilter_gpu<short2, short2>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+\r
+ template void linearColumnFilter_gpu<int, int>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearColumnFilter_gpu<int, float>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearColumnFilter_gpu<float, int>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
+ template void linearColumnFilter_gpu<float, float>(const DevMem2D&, const DevMem2D&, const float[], int , int);\r
}}}\r
\r
/////////////////////////////////////////////////////////////////////////////////////////////////\r
\r
namespace cv { namespace gpu { namespace filters\r
{\r
- template <int CN, typename T, typename D>\r
+ template <typename T, typename D>\r
void linearRowFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
\r
- //void linearRowFilter_gpu_8u_8u_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearRowFilter_gpu_8u_8s_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearRowFilter_gpu_8s_8u_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearRowFilter_gpu_8s_8s_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearRowFilter_gpu_16u_16u_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearRowFilter_gpu_16u_16s_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearRowFilter_gpu_16s_16u_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearRowFilter_gpu_16s_16s_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearRowFilter_gpu_32s_32s_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearRowFilter_gpu_32s_32f_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearRowFilter_gpu_32f_32s_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearRowFilter_gpu_32f_32f_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
-\r
- void linearColumnFilter_gpu_8u_8u_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearColumnFilter_gpu_8u_8s_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearColumnFilter_gpu_8s_8u_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearColumnFilter_gpu_8s_8s_c4(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearColumnFilter_gpu_16u_16u_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearColumnFilter_gpu_16u_16s_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearColumnFilter_gpu_16s_16u_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearColumnFilter_gpu_16s_16s_c2(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearColumnFilter_gpu_32s_32s_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearColumnFilter_gpu_32s_32f_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearColumnFilter_gpu_32f_32s_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
- void linearColumnFilter_gpu_32f_32f_c1(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
+ template <typename T, typename D>\r
+ void linearColumnFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor);\r
}}}\r
\r
namespace\r
static const nppFilter1D_t nppFilter1D_callers[] = {0, nppiFilterRow_8u_C1R, 0, 0, nppiFilterRow_8u_C4R};\r
static const gpuFilter1D_t gpuFilter1D_callers[6][6] =\r
{\r
- {linearRowFilter_gpu<4, uchar4, uchar4>/*linearRowFilter_gpu_8u_8u_c4*/,linearRowFilter_gpu_8u_8s_c4,0,0,0,0},\r
-\r
- {linearRowFilter_gpu_8s_8u_c4,linearRowFilter_gpu_8s_8s_c4,0,0,0,0},\r
- {0,0,linearRowFilter_gpu_16u_16u_c2,linearRowFilter_gpu_16u_16s_c2,0,0},\r
- {0,0,linearRowFilter_gpu_16s_16u_c2,linearRowFilter_gpu_16s_16s_c2,0,0},\r
- {0,0,0,0,linearRowFilter_gpu_32s_32s_c1, linearRowFilter_gpu_32s_32f_c1},\r
- {0,0,0,0,linearRowFilter_gpu_32f_32s_c1, linearRowFilter_gpu_32f_32f_c1}\r
+ {linearRowFilter_gpu<uchar4, uchar4>,linearRowFilter_gpu<uchar4, char4>,0,0,0,0},\r
+ {linearRowFilter_gpu<char4, uchar4>,linearRowFilter_gpu<char4, char4>,0,0,0,0},\r
+ {0,0,linearRowFilter_gpu<ushort2, ushort2>,linearRowFilter_gpu<ushort2, short2>,0,0},\r
+ {0,0,linearRowFilter_gpu<short2, ushort2>,linearRowFilter_gpu<short2, short2>,0,0},\r
+ {0,0,0,0,linearRowFilter_gpu<int, int>, linearRowFilter_gpu<int, float>},\r
+ {0,0,0,0,linearRowFilter_gpu<float, int>, linearRowFilter_gpu<float, float>}\r
};\r
\r
if ((bufType == srcType) && (srcType == CV_8UC1 || srcType == CV_8UC4))\r
Mat cont_krnl = temp.reshape(1, 1);\r
\r
int ksize = cont_krnl.cols;\r
+\r
+ CV_Assert(ksize < 16);\r
+\r
normalizeAnchor(anchor, ksize);\r
\r
return Ptr<BaseRowFilter_GPU>(new GpuLinearRowFilter(ksize, anchor, cont_krnl, \r
static const nppFilter1D_t nppFilter1D_callers[] = {0, nppiFilterColumn_8u_C1R, 0, 0, nppiFilterColumn_8u_C4R};\r
static const gpuFilter1D_t gpuFilter1D_callers[6][6] =\r
{\r
- {linearColumnFilter_gpu_8u_8u_c4,linearColumnFilter_gpu_8u_8s_c4,0,0,0,0},\r
- {linearColumnFilter_gpu_8s_8u_c4,linearColumnFilter_gpu_8s_8s_c4,0,0,0,0},\r
- {0,0,linearColumnFilter_gpu_16u_16u_c2,linearColumnFilter_gpu_16u_16s_c2,0,0},\r
- {0,0,linearColumnFilter_gpu_16s_16u_c2,linearColumnFilter_gpu_16s_16s_c2,0,0},\r
- {0,0,0,0,linearColumnFilter_gpu_32s_32s_c1, linearColumnFilter_gpu_32s_32f_c1},\r
- {0,0,0,0,linearColumnFilter_gpu_32f_32s_c1, linearColumnFilter_gpu_32f_32f_c1}\r
+ {linearColumnFilter_gpu<uchar4, uchar4>,linearColumnFilter_gpu<uchar4, char4>,0,0,0,0},\r
+ {linearColumnFilter_gpu<char4, uchar4>,linearColumnFilter_gpu<char4, char4>,0,0,0,0},\r
+ {0,0,linearColumnFilter_gpu<ushort2, ushort2>,linearColumnFilter_gpu<ushort2, short2>,0,0},\r
+ {0,0,linearColumnFilter_gpu<short2, ushort2>,linearColumnFilter_gpu<short2, short2>,0,0},\r
+ {0,0,0,0,linearColumnFilter_gpu<int, int>, linearColumnFilter_gpu<int, float>},\r
+ {0,0,0,0,linearColumnFilter_gpu<float, int>, linearColumnFilter_gpu<float, float>}\r
};\r
\r
- double kernelMin;\r
- minMaxLoc(columnKernel, &kernelMin);\r
- \r
if ((bufType == dstType) && (bufType == CV_8UC1 || bufType == CV_8UC4))\r
{\r
GpuMat gpu_col_krnl;\r
Mat cont_krnl = temp.reshape(1, 1);\r
\r
int ksize = cont_krnl.cols;\r
+\r
+ CV_Assert(ksize < 16);\r
+\r
normalizeAnchor(anchor, ksize);\r
\r
return Ptr<BaseColumnFilter_GPU>(new GpuLinearColumnFilter(ksize, anchor, cont_krnl, \r