1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
41 // Copyright (c) 2010, Paul Furgale, Chi Hay Tong
43 // The original code was written by Paul Furgale and Chi Hay Tong
44 // and later optimized and prepared for integration into OpenCV by Itseez.
48 #if !defined CUDA_DISABLER
50 #include "opencv2/gpu/device/common.hpp"
51 #include "opencv2/gpu/device/utility.hpp"
52 #include "opencv2/gpu/device/functional.hpp"
53 #include "opencv2/gpu/device/limits.hpp"
54 #include "opencv2/gpu/device/vec_math.hpp"
55 #include "opencv2/gpu/device/reduce.hpp"
57 using namespace cv::gpu;
58 using namespace cv::gpu::device;
62 __constant__ int c_winSize_x;
63 __constant__ int c_winSize_y;
64 __constant__ int c_halfWin_x;
65 __constant__ int c_halfWin_y;
66 __constant__ int c_iters;
68 texture<float, cudaTextureType2D, cudaReadModeElementType> tex_If(false, cudaFilterModeLinear, cudaAddressModeClamp);
69 texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_If4(false, cudaFilterModeLinear, cudaAddressModeClamp);
70 texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_Ib(false, cudaFilterModePoint, cudaAddressModeClamp);
72 texture<float, cudaTextureType2D, cudaReadModeElementType> tex_Jf(false, cudaFilterModeLinear, cudaAddressModeClamp);
73 texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_Jf4(false, cudaFilterModeLinear, cudaAddressModeClamp);
75 template <int cn> struct Tex_I;
76 template <> struct Tex_I<1>
78 static __device__ __forceinline__ float read(float x, float y)
80 return tex2D(tex_If, x, y);
83 template <> struct Tex_I<4>
85 static __device__ __forceinline__ float4 read(float x, float y)
87 return tex2D(tex_If4, x, y);
91 template <int cn> struct Tex_J;
92 template <> struct Tex_J<1>
94 static __device__ __forceinline__ float read(float x, float y)
96 return tex2D(tex_Jf, x, y);
99 template <> struct Tex_J<4>
101 static __device__ __forceinline__ float4 read(float x, float y)
103 return tex2D(tex_Jf4, x, y);
107 __device__ __forceinline__ void accum(float& dst, float val)
111 __device__ __forceinline__ void accum(float& dst, const float4& val)
113 dst += val.x + val.y + val.z;
116 __device__ __forceinline__ float abs_(float a)
120 __device__ __forceinline__ float4 abs_(const float4& a)
125 template <int cn, int PATCH_X, int PATCH_Y, bool calcErr>
126 __global__ void sparseKernel(const float2* prevPts, float2* nextPts, uchar* status, float* err, const int level, const int rows, const int cols)
128 #if __CUDA_ARCH__ <= 110
129 const int BLOCK_SIZE = 128;
131 const int BLOCK_SIZE = 256;
134 __shared__ float smem1[BLOCK_SIZE];
135 __shared__ float smem2[BLOCK_SIZE];
136 __shared__ float smem3[BLOCK_SIZE];
138 const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
140 float2 prevPt = prevPts[blockIdx.x];
141 prevPt.x *= (1.0f / (1 << level));
142 prevPt.y *= (1.0f / (1 << level));
144 if (prevPt.x < 0 || prevPt.x >= cols || prevPt.y < 0 || prevPt.y >= rows)
146 if (tid == 0 && level == 0)
147 status[blockIdx.x] = 0;
152 prevPt.x -= c_halfWin_x;
153 prevPt.y -= c_halfWin_y;
155 // extract the patch from the first image, compute covariation matrix of derivatives
161 typedef typename TypeVec<float, cn>::vec_type work_type;
163 work_type I_patch [PATCH_Y][PATCH_X];
164 work_type dIdx_patch[PATCH_Y][PATCH_X];
165 work_type dIdy_patch[PATCH_Y][PATCH_X];
167 for (int yBase = threadIdx.y, i = 0; yBase < c_winSize_y; yBase += blockDim.y, ++i)
169 for (int xBase = threadIdx.x, j = 0; xBase < c_winSize_x; xBase += blockDim.x, ++j)
171 float x = prevPt.x + xBase + 0.5f;
172 float y = prevPt.y + yBase + 0.5f;
174 I_patch[i][j] = Tex_I<cn>::read(x, y);
178 work_type dIdx = 3.0f * Tex_I<cn>::read(x+1, y-1) + 10.0f * Tex_I<cn>::read(x+1, y) + 3.0f * Tex_I<cn>::read(x+1, y+1) -
179 (3.0f * Tex_I<cn>::read(x-1, y-1) + 10.0f * Tex_I<cn>::read(x-1, y) + 3.0f * Tex_I<cn>::read(x-1, y+1));
181 work_type dIdy = 3.0f * Tex_I<cn>::read(x-1, y+1) + 10.0f * Tex_I<cn>::read(x, y+1) + 3.0f * Tex_I<cn>::read(x+1, y+1) -
182 (3.0f * Tex_I<cn>::read(x-1, y-1) + 10.0f * Tex_I<cn>::read(x, y-1) + 3.0f * Tex_I<cn>::read(x+1, y-1));
184 dIdx_patch[i][j] = dIdx;
185 dIdy_patch[i][j] = dIdy;
187 accum(A11, dIdx * dIdx);
188 accum(A12, dIdx * dIdy);
189 accum(A22, dIdy * dIdy);
193 reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2, smem3), thrust::tie(A11, A12, A22), tid, thrust::make_tuple(plus<float>(), plus<float>(), plus<float>()));
195 #if __CUDA_ARCH__ >= 300
210 float D = A11 * A22 - A12 * A12;
212 if (D < numeric_limits<float>::epsilon())
214 if (tid == 0 && level == 0)
215 status[blockIdx.x] = 0;
226 float2 nextPt = nextPts[blockIdx.x];
230 nextPt.x -= c_halfWin_x;
231 nextPt.y -= c_halfWin_y;
233 for (int k = 0; k < c_iters; ++k)
235 if (nextPt.x < -c_halfWin_x || nextPt.x >= cols || nextPt.y < -c_halfWin_y || nextPt.y >= rows)
237 if (tid == 0 && level == 0)
238 status[blockIdx.x] = 0;
246 for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i)
248 for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j)
250 work_type I_val = I_patch[i][j];
251 work_type J_val = Tex_J<cn>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f);
253 work_type diff = (J_val - I_val) * 32.0f;
255 accum(b1, diff * dIdx_patch[i][j]);
256 accum(b2, diff * dIdy_patch[i][j]);
260 reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2), thrust::tie(b1, b2), tid, thrust::make_tuple(plus<float>(), plus<float>()));
262 #if __CUDA_ARCH__ >= 300
276 delta.x = A12 * b2 - A22 * b1;
277 delta.y = A12 * b1 - A11 * b2;
282 if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f)
289 for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i)
291 for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j)
293 work_type I_val = I_patch[i][j];
294 work_type J_val = Tex_J<cn>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f);
296 work_type diff = J_val - I_val;
298 accum(errval, abs_(diff));
302 reduce<BLOCK_SIZE>(smem1, errval, tid, plus<float>());
307 nextPt.x += c_halfWin_x;
308 nextPt.y += c_halfWin_y;
310 nextPts[blockIdx.x] = nextPt;
313 err[blockIdx.x] = static_cast<float>(errval) / (cn * c_winSize_x * c_winSize_y);
317 template <int cn, int PATCH_X, int PATCH_Y>
318 void sparse_caller(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
319 int level, dim3 block, cudaStream_t stream)
323 if (level == 0 && err)
324 sparseKernel<cn, PATCH_X, PATCH_Y, true><<<grid, block>>>(prevPts, nextPts, status, err, level, rows, cols);
326 sparseKernel<cn, PATCH_X, PATCH_Y, false><<<grid, block>>>(prevPts, nextPts, status, err, level, rows, cols);
328 cudaSafeCall( cudaGetLastError() );
331 cudaSafeCall( cudaDeviceSynchronize() );
334 template <bool calcErr>
335 __global__ void denseKernel(PtrStepf u, PtrStepf v, const PtrStepf prevU, const PtrStepf prevV, PtrStepf err, const int rows, const int cols)
337 extern __shared__ int smem[];
339 const int patchWidth = blockDim.x + 2 * c_halfWin_x;
340 const int patchHeight = blockDim.y + 2 * c_halfWin_y;
343 int* dIdx_patch = I_patch + patchWidth * patchHeight;
344 int* dIdy_patch = dIdx_patch + patchWidth * patchHeight;
346 const int xBase = blockIdx.x * blockDim.x;
347 const int yBase = blockIdx.y * blockDim.y;
349 for (int i = threadIdx.y; i < patchHeight; i += blockDim.y)
351 for (int j = threadIdx.x; j < patchWidth; j += blockDim.x)
353 float x = xBase - c_halfWin_x + j + 0.5f;
354 float y = yBase - c_halfWin_y + i + 0.5f;
356 I_patch[i * patchWidth + j] = tex2D(tex_Ib, x, y);
360 dIdx_patch[i * patchWidth + j] = 3 * tex2D(tex_Ib, x+1, y-1) + 10 * tex2D(tex_Ib, x+1, y) + 3 * tex2D(tex_Ib, x+1, y+1) -
361 (3 * tex2D(tex_Ib, x-1, y-1) + 10 * tex2D(tex_Ib, x-1, y) + 3 * tex2D(tex_Ib, x-1, y+1));
363 dIdy_patch[i * patchWidth + j] = 3 * tex2D(tex_Ib, x-1, y+1) + 10 * tex2D(tex_Ib, x, y+1) + 3 * tex2D(tex_Ib, x+1, y+1) -
364 (3 * tex2D(tex_Ib, x-1, y-1) + 10 * tex2D(tex_Ib, x, y-1) + 3 * tex2D(tex_Ib, x+1, y-1));
370 const int x = xBase + threadIdx.x;
371 const int y = yBase + threadIdx.y;
373 if (x >= cols || y >= rows)
380 for (int i = 0; i < c_winSize_y; ++i)
382 for (int j = 0; j < c_winSize_x; ++j)
384 int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
385 int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
397 float D = A11 * A22 - A12 * A12;
399 if (D < numeric_limits<float>::epsilon())
402 err(y, x) = numeric_limits<float>::max();
414 nextPt.x = x + prevU(y/2, x/2) * 2.0f;
415 nextPt.y = y + prevV(y/2, x/2) * 2.0f;
417 for (int k = 0; k < c_iters; ++k)
419 if (nextPt.x < 0 || nextPt.x >= cols || nextPt.y < 0 || nextPt.y >= rows)
422 err(y, x) = numeric_limits<float>::max();
430 for (int i = 0; i < c_winSize_y; ++i)
432 for (int j = 0; j < c_winSize_x; ++j)
434 int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j];
435 int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f);
437 int diff = (J - I) * 32;
439 int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
440 int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
448 delta.x = A12 * b2 - A22 * b1;
449 delta.y = A12 * b1 - A11 * b2;
454 if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f)
458 u(y, x) = nextPt.x - x;
459 v(y, x) = nextPt.y - y;
465 for (int i = 0; i < c_winSize_y; ++i)
467 for (int j = 0; j < c_winSize_x; ++j)
469 int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j];
470 int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f);
472 errval += ::abs(J - I);
476 err(y, x) = static_cast<float>(errval) / (c_winSize_x * c_winSize_y);
480 void loadConstants(int2 winSize, int iters)
482 cudaSafeCall( cudaMemcpyToSymbol(c_winSize_x, &winSize.x, sizeof(int)) );
483 cudaSafeCall( cudaMemcpyToSymbol(c_winSize_y, &winSize.y, sizeof(int)) );
485 int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2);
486 cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_x, &halfWin.x, sizeof(int)) );
487 cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_y, &halfWin.y, sizeof(int)) );
489 cudaSafeCall( cudaMemcpyToSymbol(c_iters, &iters, sizeof(int)) );
492 void sparse1(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
493 int level, dim3 block, dim3 patch, cudaStream_t stream)
495 typedef void (*func_t)(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
496 int level, dim3 block, cudaStream_t stream);
498 static const func_t funcs[5][5] =
500 {sparse_caller<1, 1, 1>, sparse_caller<1, 2, 1>, sparse_caller<1, 3, 1>, sparse_caller<1, 4, 1>, sparse_caller<1, 5, 1>},
501 {sparse_caller<1, 1, 2>, sparse_caller<1, 2, 2>, sparse_caller<1, 3, 2>, sparse_caller<1, 4, 2>, sparse_caller<1, 5, 2>},
502 {sparse_caller<1, 1, 3>, sparse_caller<1, 2, 3>, sparse_caller<1, 3, 3>, sparse_caller<1, 4, 3>, sparse_caller<1, 5, 3>},
503 {sparse_caller<1, 1, 4>, sparse_caller<1, 2, 4>, sparse_caller<1, 3, 4>, sparse_caller<1, 4, 4>, sparse_caller<1, 5, 4>},
504 {sparse_caller<1, 1, 5>, sparse_caller<1, 2, 5>, sparse_caller<1, 3, 5>, sparse_caller<1, 4, 5>, sparse_caller<1, 5, 5>}
507 bindTexture(&tex_If, I);
508 bindTexture(&tex_Jf, J);
510 funcs[patch.y - 1][patch.x - 1](I.rows, I.cols, prevPts, nextPts, status, err, ptcount,
511 level, block, stream);
514 void sparse4(PtrStepSz<float4> I, PtrStepSz<float4> J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
515 int level, dim3 block, dim3 patch, cudaStream_t stream)
517 typedef void (*func_t)(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
518 int level, dim3 block, cudaStream_t stream);
520 static const func_t funcs[5][5] =
522 {sparse_caller<4, 1, 1>, sparse_caller<4, 2, 1>, sparse_caller<4, 3, 1>, sparse_caller<4, 4, 1>, sparse_caller<4, 5, 1>},
523 {sparse_caller<4, 1, 2>, sparse_caller<4, 2, 2>, sparse_caller<4, 3, 2>, sparse_caller<4, 4, 2>, sparse_caller<4, 5, 2>},
524 {sparse_caller<4, 1, 3>, sparse_caller<4, 2, 3>, sparse_caller<4, 3, 3>, sparse_caller<4, 4, 3>, sparse_caller<4, 5, 3>},
525 {sparse_caller<4, 1, 4>, sparse_caller<4, 2, 4>, sparse_caller<4, 3, 4>, sparse_caller<4, 4, 4>, sparse_caller<4, 5, 4>},
526 {sparse_caller<4, 1, 5>, sparse_caller<4, 2, 5>, sparse_caller<4, 3, 5>, sparse_caller<4, 4, 5>, sparse_caller<4, 5, 5>}
529 bindTexture(&tex_If4, I);
530 bindTexture(&tex_Jf4, J);
532 funcs[patch.y - 1][patch.x - 1](I.rows, I.cols, prevPts, nextPts, status, err, ptcount,
533 level, block, stream);
536 void dense(PtrStepSzb I, PtrStepSzf J, PtrStepSzf u, PtrStepSzf v, PtrStepSzf prevU, PtrStepSzf prevV, PtrStepSzf err, int2 winSize, cudaStream_t stream)
539 dim3 grid(divUp(I.cols, block.x), divUp(I.rows, block.y));
541 bindTexture(&tex_Ib, I);
542 bindTexture(&tex_Jf, J);
544 int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2);
545 const int patchWidth = block.x + 2 * halfWin.x;
546 const int patchHeight = block.y + 2 * halfWin.y;
547 size_t smem_size = 3 * patchWidth * patchHeight * sizeof(int);
551 denseKernel<true><<<grid, block, smem_size, stream>>>(u, v, prevU, prevV, err, I.rows, I.cols);
552 cudaSafeCall( cudaGetLastError() );
556 denseKernel<false><<<grid, block, smem_size, stream>>>(u, v, prevU, prevV, PtrStepf(), I.rows, I.cols);
557 cudaSafeCall( cudaGetLastError() );
561 cudaSafeCall( cudaDeviceSynchronize() );
565 #endif /* CUDA_DISABLER */