1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #if !defined CUDA_DISABLER
45 #include "internal_shared.hpp"
47 namespace cv { namespace gpu { namespace device
51 //////////////////////////////////////////////////////////////////////////////////////////////////
52 /////////////////////////////////////// Stereo BM ////////////////////////////////////////////////
53 //////////////////////////////////////////////////////////////////////////////////////////////////
55 #define ROWSperTHREAD 21 // the number of rows a thread will process
57 #define BLOCK_W 128 // the thread block width (464)
58 #define N_DISPARITIES 8
60 #define STEREO_MIND 0 // The minimum d range to check
61 #define STEREO_DISP_STEP N_DISPARITIES // the d step, must be <= 1 to avoid aliasing
63 __constant__ unsigned int* cminSSDImage;
64 __constant__ size_t cminSSD_step;
65 __constant__ int cwidth;
66 __constant__ int cheight;
68 __device__ __forceinline__ int SQ(int a)
74 __device__ unsigned int CalcSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd)
76 unsigned int cache = 0;
77 unsigned int cache2 = 0;
79 for(int i = 1; i <= RADIUS; i++)
82 col_ssd_cache[0] = cache;
86 if (threadIdx.x < BLOCK_W - RADIUS)
87 cache2 = col_ssd_cache[RADIUS];
89 for(int i = RADIUS + 1; i < (2 * RADIUS + 1); i++)
92 return col_ssd[0] + cache + cache2;
96 __device__ uint2 MinSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd)
98 unsigned int ssd[N_DISPARITIES];
100 //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
101 ssd[0] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 0 * (BLOCK_W + 2 * RADIUS));
103 ssd[1] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 1 * (BLOCK_W + 2 * RADIUS));
105 ssd[2] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 2 * (BLOCK_W + 2 * RADIUS));
107 ssd[3] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 3 * (BLOCK_W + 2 * RADIUS));
109 ssd[4] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 4 * (BLOCK_W + 2 * RADIUS));
111 ssd[5] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 5 * (BLOCK_W + 2 * RADIUS));
113 ssd[6] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 6 * (BLOCK_W + 2 * RADIUS));
115 ssd[7] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 7 * (BLOCK_W + 2 * RADIUS));
117 int mssd = ::min(::min(::min(ssd[0], ssd[1]), ::min(ssd[4], ssd[5])), ::min(::min(ssd[2], ssd[3]), ::min(ssd[6], ssd[7])));
120 for (int i = 0; i < N_DISPARITIES; i++)
126 return make_uint2(mssd, bestIdx);
130 __device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
132 unsigned char leftPixel1;
133 unsigned char leftPixel2;
134 unsigned char rightPixel1[8];
135 unsigned char rightPixel2[8];
136 unsigned int diff1, diff2;
138 leftPixel1 = imageL[idx1];
139 leftPixel2 = imageL[idx2];
144 rightPixel1[7] = imageR[idx1 - 7];
145 rightPixel1[0] = imageR[idx1 - 0];
146 rightPixel1[1] = imageR[idx1 - 1];
147 rightPixel1[2] = imageR[idx1 - 2];
148 rightPixel1[3] = imageR[idx1 - 3];
149 rightPixel1[4] = imageR[idx1 - 4];
150 rightPixel1[5] = imageR[idx1 - 5];
151 rightPixel1[6] = imageR[idx1 - 6];
153 rightPixel2[7] = imageR[idx2 - 7];
154 rightPixel2[0] = imageR[idx2 - 0];
155 rightPixel2[1] = imageR[idx2 - 1];
156 rightPixel2[2] = imageR[idx2 - 2];
157 rightPixel2[3] = imageR[idx2 - 3];
158 rightPixel2[4] = imageR[idx2 - 4];
159 rightPixel2[5] = imageR[idx2 - 5];
160 rightPixel2[6] = imageR[idx2 - 6];
162 //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
163 diff1 = leftPixel1 - rightPixel1[0];
164 diff2 = leftPixel2 - rightPixel2[0];
165 col_ssd[0 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
167 diff1 = leftPixel1 - rightPixel1[1];
168 diff2 = leftPixel2 - rightPixel2[1];
169 col_ssd[1 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
171 diff1 = leftPixel1 - rightPixel1[2];
172 diff2 = leftPixel2 - rightPixel2[2];
173 col_ssd[2 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
175 diff1 = leftPixel1 - rightPixel1[3];
176 diff2 = leftPixel2 - rightPixel2[3];
177 col_ssd[3 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
179 diff1 = leftPixel1 - rightPixel1[4];
180 diff2 = leftPixel2 - rightPixel2[4];
181 col_ssd[4 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
183 diff1 = leftPixel1 - rightPixel1[5];
184 diff2 = leftPixel2 - rightPixel2[5];
185 col_ssd[5 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
187 diff1 = leftPixel1 - rightPixel1[6];
188 diff2 = leftPixel2 - rightPixel2[6];
189 col_ssd[6 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
191 diff1 = leftPixel1 - rightPixel1[7];
192 diff2 = leftPixel2 - rightPixel2[7];
193 col_ssd[7 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
197 __device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
199 unsigned char leftPixel1;
201 unsigned int diffa[] = {0, 0, 0, 0, 0, 0, 0, 0};
203 for(int i = 0; i < (2 * RADIUS + 1); i++)
205 idx = y_tex * im_pitch + x_tex;
206 leftPixel1 = imageL[idx];
209 diffa[0] += SQ(leftPixel1 - imageR[idx - 0]);
210 diffa[1] += SQ(leftPixel1 - imageR[idx - 1]);
211 diffa[2] += SQ(leftPixel1 - imageR[idx - 2]);
212 diffa[3] += SQ(leftPixel1 - imageR[idx - 3]);
213 diffa[4] += SQ(leftPixel1 - imageR[idx - 4]);
214 diffa[5] += SQ(leftPixel1 - imageR[idx - 5]);
215 diffa[6] += SQ(leftPixel1 - imageR[idx - 6]);
216 diffa[7] += SQ(leftPixel1 - imageR[idx - 7]);
220 //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
221 col_ssd[0 * (BLOCK_W + 2 * RADIUS)] = diffa[0];
222 col_ssd[1 * (BLOCK_W + 2 * RADIUS)] = diffa[1];
223 col_ssd[2 * (BLOCK_W + 2 * RADIUS)] = diffa[2];
224 col_ssd[3 * (BLOCK_W + 2 * RADIUS)] = diffa[3];
225 col_ssd[4 * (BLOCK_W + 2 * RADIUS)] = diffa[4];
226 col_ssd[5 * (BLOCK_W + 2 * RADIUS)] = diffa[5];
227 col_ssd[6 * (BLOCK_W + 2 * RADIUS)] = diffa[6];
228 col_ssd[7 * (BLOCK_W + 2 * RADIUS)] = diffa[7];
232 __global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, PtrStepb disp, int maxdisp)
234 extern __shared__ unsigned int col_ssd_cache[];
235 volatile unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x;
236 volatile unsigned int *col_ssd_extra = threadIdx.x < (2 * RADIUS) ? col_ssd + BLOCK_W : 0; //#define N_DIRTY_PIXELS (2 * RADIUS)
238 //#define X (blockIdx.x * BLOCK_W + threadIdx.x + STEREO_MAXD)
239 int X = (blockIdx.x * BLOCK_W + threadIdx.x + maxdisp + RADIUS);
240 //#define Y (__mul24(blockIdx.y, ROWSperTHREAD) + RADIUS)
241 #define Y (blockIdx.y * ROWSperTHREAD + RADIUS)
242 //int Y = blockIdx.y * ROWSperTHREAD + RADIUS;
244 unsigned int* minSSDImage = cminSSDImage + X + Y * cminSSD_step;
245 unsigned char* disparImage = disp.data + X + Y * disp.step;
248 unsigned int *minSSDImage_end = minSSDImage + min(ROWSperTHREAD, cheight - Y) * minssd_step;
249 for(uint *ptr = minSSDImage; ptr != minSSDImage_end; ptr += minssd_step )
252 int end_row = ::min(ROWSperTHREAD, cheight - Y - RADIUS);
254 int x_tex = X - RADIUS;
259 for(int d = STEREO_MIND; d < maxdisp; d += STEREO_DISP_STEP)
263 InitColSSD<RADIUS>(x_tex, y_tex, img_step, left, right, d, col_ssd);
265 if (col_ssd_extra > 0)
266 if (x_tex + BLOCK_W < cwidth)
267 InitColSSD<RADIUS>(x_tex + BLOCK_W, y_tex, img_step, left, right, d, col_ssd_extra);
269 __syncthreads(); //before MinSSD function
271 if (X < cwidth - RADIUS && Y < cheight - RADIUS)
273 uint2 minSSD = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd);
274 if (minSSD.x < minSSDImage[0])
276 disparImage[0] = (unsigned char)(d + minSSD.y);
277 minSSDImage[0] = minSSD.x;
281 for(int row = 1; row < end_row; row++)
283 int idx1 = y_tex * img_step + x_tex;
284 int idx2 = (y_tex + (2 * RADIUS + 1)) * img_step + x_tex;
288 StepDown<RADIUS>(idx1, idx2, left, right, d, col_ssd);
291 if (x_tex + BLOCK_W < cwidth)
292 StepDown<RADIUS>(idx1, idx2, left + BLOCK_W, right + BLOCK_W, d, col_ssd_extra);
296 __syncthreads(); //before MinSSD function
298 if (X < cwidth - RADIUS && row < cheight - RADIUS - Y)
300 int idx = row * cminSSD_step;
301 uint2 minSSD = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd);
302 if (minSSD.x < minSSDImage[idx])
304 disparImage[disp.step * row] = (unsigned char)(d + minSSD.y);
305 minSSDImage[idx] = minSSD.x;
313 template<int RADIUS> void kernel_caller(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp, cudaStream_t & stream)
316 dim3 threads(BLOCK_W, 1, 1);
318 grid.x = divUp(left.cols - maxdisp - 2 * RADIUS, BLOCK_W);
319 grid.y = divUp(left.rows - 2 * RADIUS, ROWSperTHREAD);
321 //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
322 size_t smem_size = (BLOCK_W + N_DISPARITIES * (BLOCK_W + 2 * RADIUS)) * sizeof(unsigned int);
324 stereoKernel<RADIUS><<<grid, threads, smem_size, stream>>>(left.data, right.data, left.step, disp, maxdisp);
325 cudaSafeCall( cudaGetLastError() );
328 cudaSafeCall( cudaDeviceSynchronize() );
331 typedef void (*kernel_caller_t)(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp, cudaStream_t & stream);
333 const static kernel_caller_t callers[] =
336 kernel_caller< 1>, kernel_caller< 2>, kernel_caller< 3>, kernel_caller< 4>, kernel_caller< 5>,
337 kernel_caller< 6>, kernel_caller< 7>, kernel_caller< 8>, kernel_caller< 9>, kernel_caller<10>,
338 kernel_caller<11>, kernel_caller<12>, kernel_caller<13>, kernel_caller<15>, kernel_caller<15>,
339 kernel_caller<16>, kernel_caller<17>, kernel_caller<18>, kernel_caller<19>, kernel_caller<20>,
340 kernel_caller<21>, kernel_caller<22>, kernel_caller<23>, kernel_caller<24>, kernel_caller<25>
342 //0,0,0, 0,0,0, 0,0,kernel_caller<9>
344 const int calles_num = sizeof(callers)/sizeof(callers[0]);
346 void stereoBM_GPU(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp, int winsz, const PtrStepSz<unsigned int>& minSSD_buf, cudaStream_t& stream)
348 int winsz2 = winsz >> 1;
350 if (winsz2 == 0 || winsz2 >= calles_num)
351 cv::gpu::error("Unsupported window size", __FILE__, __LINE__, "stereoBM_GPU");
353 //cudaSafeCall( cudaFuncSetCacheConfig(&stereoKernel, cudaFuncCachePreferL1) );
354 //cudaSafeCall( cudaFuncSetCacheConfig(&stereoKernel, cudaFuncCachePreferShared) );
356 cudaSafeCall( cudaMemset2D(disp.data, disp.step, 0, disp.cols, disp.rows) );
357 cudaSafeCall( cudaMemset2D(minSSD_buf.data, minSSD_buf.step, 0xFF, minSSD_buf.cols * minSSD_buf.elemSize(), disp.rows) );
359 cudaSafeCall( cudaMemcpyToSymbol( cwidth, &left.cols, sizeof(left.cols) ) );
360 cudaSafeCall( cudaMemcpyToSymbol( cheight, &left.rows, sizeof(left.rows) ) );
361 cudaSafeCall( cudaMemcpyToSymbol( cminSSDImage, &minSSD_buf.data, sizeof(minSSD_buf.data) ) );
363 size_t minssd_step = minSSD_buf.step/minSSD_buf.elemSize();
364 cudaSafeCall( cudaMemcpyToSymbol( cminSSD_step, &minssd_step, sizeof(minssd_step) ) );
366 callers[winsz2](left, right, disp, maxdisp, stream);
369 //////////////////////////////////////////////////////////////////////////////////////////////////
370 /////////////////////////////////////// Sobel Prefiler ///////////////////////////////////////////
371 //////////////////////////////////////////////////////////////////////////////////////////////////
373 texture<unsigned char, 2, cudaReadModeElementType> texForSobel;
375 __global__ void prefilter_kernel(PtrStepSzb output, int prefilterCap)
377 int x = blockDim.x * blockIdx.x + threadIdx.x;
378 int y = blockDim.y * blockIdx.y + threadIdx.y;
380 if (x < output.cols && y < output.rows)
382 int conv = (int)tex2D(texForSobel, x - 1, y - 1) * (-1) + (int)tex2D(texForSobel, x + 1, y - 1) * (1) +
383 (int)tex2D(texForSobel, x - 1, y ) * (-2) + (int)tex2D(texForSobel, x + 1, y ) * (2) +
384 (int)tex2D(texForSobel, x - 1, y + 1) * (-1) + (int)tex2D(texForSobel, x + 1, y + 1) * (1);
387 conv = ::min(::min(::max(-prefilterCap, conv), prefilterCap) + prefilterCap, 255);
388 output.ptr(y)[x] = conv & 0xFF;
392 void prefilter_xsobel(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap, cudaStream_t & stream)
394 cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
395 cudaSafeCall( cudaBindTexture2D( 0, texForSobel, input.data, desc, input.cols, input.rows, input.step ) );
397 dim3 threads(16, 16, 1);
400 grid.x = divUp(input.cols, threads.x);
401 grid.y = divUp(input.rows, threads.y);
403 prefilter_kernel<<<grid, threads, 0, stream>>>(output, prefilterCap);
404 cudaSafeCall( cudaGetLastError() );
407 cudaSafeCall( cudaDeviceSynchronize() );
409 cudaSafeCall( cudaUnbindTexture (texForSobel ) );
413 //////////////////////////////////////////////////////////////////////////////////////////////////
414 /////////////////////////////////// Textureness filtering ////////////////////////////////////////
415 //////////////////////////////////////////////////////////////////////////////////////////////////
417 texture<unsigned char, 2, cudaReadModeNormalizedFloat> texForTF;
419 __device__ __forceinline__ float sobel(int x, int y)
421 float conv = tex2D(texForTF, x - 1, y - 1) * (-1) + tex2D(texForTF, x + 1, y - 1) * (1) +
422 tex2D(texForTF, x - 1, y ) * (-2) + tex2D(texForTF, x + 1, y ) * (2) +
423 tex2D(texForTF, x - 1, y + 1) * (-1) + tex2D(texForTF, x + 1, y + 1) * (1);
427 __device__ float CalcSums(float *cols, float *cols_cache, int winsz)
431 int winsz2 = winsz/2;
433 for(int i = 1; i <= winsz2; i++)
436 cols_cache[0] = cache;
440 if (threadIdx.x < blockDim.x - winsz2)
441 cache2 = cols_cache[winsz2];
443 for(int i = winsz2 + 1; i < winsz; i++)
446 return cols[0] + cache + cache2;
449 #define RpT (2 * ROWSperTHREAD) // got experimentally
451 __global__ void textureness_kernel(PtrStepSzb disp, int winsz, float threshold)
453 int winsz2 = winsz/2;
454 int n_dirty_pixels = (winsz2) * 2;
456 extern __shared__ float cols_cache[];
457 float *cols = cols_cache + blockDim.x + threadIdx.x;
458 float *cols_extra = threadIdx.x < n_dirty_pixels ? cols + blockDim.x : 0;
460 int x = blockIdx.x * blockDim.x + threadIdx.x;
461 int beg_row = blockIdx.y * RpT;
462 int end_row = ::min(beg_row + RpT, disp.rows);
471 for(int i = y - winsz2; i <= y + winsz2; ++i)
473 sum += sobel(x - winsz2, i);
475 sum_extra += sobel(x + blockDim.x - winsz2, i);
479 *cols_extra = sum_extra;
483 float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
484 if (sum_win < threshold)
485 disp.data[y * disp.step + x] = 0;
489 for(int y = beg_row + 1; y < end_row; ++y)
491 sum = sum - sobel(x - winsz2, y - winsz2 - 1) + sobel(x - winsz2, y + winsz2);
496 sum_extra = sum_extra - sobel(x + blockDim.x - winsz2, y - winsz2 - 1) + sobel(x + blockDim.x - winsz2, y + winsz2);
497 *cols_extra = sum_extra;
501 float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
502 if (sum_win < threshold)
503 disp.data[y * disp.step + x] = 0;
510 void postfilter_textureness(const PtrStepSzb& input, int winsz, float avgTexturenessThreshold, const PtrStepSzb& disp, cudaStream_t & stream)
512 avgTexturenessThreshold *= winsz * winsz;
514 texForTF.filterMode = cudaFilterModeLinear;
515 texForTF.addressMode[0] = cudaAddressModeWrap;
516 texForTF.addressMode[1] = cudaAddressModeWrap;
518 cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
519 cudaSafeCall( cudaBindTexture2D( 0, texForTF, input.data, desc, input.cols, input.rows, input.step ) );
521 dim3 threads(128, 1, 1);
524 grid.x = divUp(input.cols, threads.x);
525 grid.y = divUp(input.rows, RpT);
527 size_t smem_size = (threads.x + threads.x + (winsz/2) * 2 ) * sizeof(float);
528 textureness_kernel<<<grid, threads, smem_size, stream>>>(disp, winsz, avgTexturenessThreshold);
529 cudaSafeCall( cudaGetLastError() );
532 cudaSafeCall( cudaDeviceSynchronize() );
534 cudaSafeCall( cudaUnbindTexture (texForTF) );
536 } // namespace stereobm
537 }}} // namespace cv { namespace gpu { namespace device
540 #endif /* CUDA_DISABLER */