1 /*M///////////////////////////////////////////////////////////////////////////////////////
\r
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
\r
5 // By downloading, copying, installing or using the software you agree to this license.
\r
6 // If you do not agree to this license, do not download, install,
\r
7 // copy or use the software.
\r
10 // License Agreement
\r
11 // For Open Source Computer Vision Library
\r
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
\r
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
\r
15 // Third party copyrights are property of their respective owners.
\r
17 // Redistribution and use in source and binary forms, with or without modification,
\r
18 // are permitted provided that the following conditions are met:
\r
20 // * Redistribution's of source code must retain the above copyright notice,
\r
21 // this list of conditions and the following disclaimer.
\r
23 // * Redistribution's in binary form must reproduce the above copyright notice,
\r
24 // this list of conditions and the following disclaimer in the documentation
\r
25 // and/or other materials provided with the distribution.
\r
27 // * The name of the copyright holders may not be used to endorse or promote products
\r
28 // derived from this software without specific prior written permission.
\r
30 // This software is provided by the copyright holders and contributors "as is" and
\r
31 // any express or implied warranties, including, but not limited to, the implied
\r
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
\r
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
\r
34 // indirect, incidental, special, exemplary, or consequential damages
\r
35 // (including, but not limited to, procurement of substitute goods or services;
\r
36 // loss of use, data, or profits; or business interruption) however caused
\r
37 // and on any theory of liability, whether in contract, strict liability,
\r
38 // or tort (including negligence or otherwise) arising in any way out of
\r
39 // the use of this software, even if advised of the possibility of such damage.
\r
43 #include "internal_shared.hpp"
\r
45 namespace cv { namespace gpu { namespace device
\r
49 //////////////////////////////////////////////////////////////////////////////////////////////////
\r
50 /////////////////////////////////////// Stereo BM ////////////////////////////////////////////////
\r
51 //////////////////////////////////////////////////////////////////////////////////////////////////
\r
53 #define ROWSperTHREAD 21 // the number of rows a thread will process
\r
55 #define BLOCK_W 128 // the thread block width (464)
\r
56 #define N_DISPARITIES 8
\r
58 #define STEREO_MIND 0 // The minimum d range to check
\r
59 #define STEREO_DISP_STEP N_DISPARITIES // the d step, must be <= 1 to avoid aliasing
\r
61 __constant__ unsigned int* cminSSDImage;
\r
62 __constant__ size_t cminSSD_step;
\r
63 __constant__ int cwidth;
\r
64 __constant__ int cheight;
\r
66 __device__ __forceinline__ int SQ(int a)
\r
71 template<int RADIUS>
\r
72 __device__ unsigned int CalcSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd)
\r
74 unsigned int cache = 0;
\r
75 unsigned int cache2 = 0;
\r
77 for(int i = 1; i <= RADIUS; i++)
\r
78 cache += col_ssd[i];
\r
80 col_ssd_cache[0] = cache;
\r
84 if (threadIdx.x < BLOCK_W - RADIUS)
\r
85 cache2 = col_ssd_cache[RADIUS];
\r
87 for(int i = RADIUS + 1; i < (2 * RADIUS + 1); i++)
\r
88 cache2 += col_ssd[i];
\r
90 return col_ssd[0] + cache + cache2;
\r
93 template<int RADIUS>
\r
94 __device__ uint2 MinSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd)
\r
96 unsigned int ssd[N_DISPARITIES];
\r
98 //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
\r
99 ssd[0] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 0 * (BLOCK_W + 2 * RADIUS));
\r
101 ssd[1] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 1 * (BLOCK_W + 2 * RADIUS));
\r
103 ssd[2] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 2 * (BLOCK_W + 2 * RADIUS));
\r
105 ssd[3] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 3 * (BLOCK_W + 2 * RADIUS));
\r
107 ssd[4] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 4 * (BLOCK_W + 2 * RADIUS));
\r
109 ssd[5] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 5 * (BLOCK_W + 2 * RADIUS));
\r
111 ssd[6] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 6 * (BLOCK_W + 2 * RADIUS));
\r
113 ssd[7] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 7 * (BLOCK_W + 2 * RADIUS));
\r
115 int mssd = ::min(::min(::min(ssd[0], ssd[1]), ::min(ssd[4], ssd[5])), ::min(::min(ssd[2], ssd[3]), ::min(ssd[6], ssd[7])));
\r
118 for (int i = 0; i < N_DISPARITIES; i++)
\r
120 if (mssd == ssd[i])
\r
124 return make_uint2(mssd, bestIdx);
\r
127 template<int RADIUS>
\r
128 __device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
\r
130 unsigned char leftPixel1;
\r
131 unsigned char leftPixel2;
\r
132 unsigned char rightPixel1[8];
\r
133 unsigned char rightPixel2[8];
\r
134 unsigned int diff1, diff2;
\r
136 leftPixel1 = imageL[idx1];
\r
137 leftPixel2 = imageL[idx2];
\r
142 rightPixel1[7] = imageR[idx1 - 7];
\r
143 rightPixel1[0] = imageR[idx1 - 0];
\r
144 rightPixel1[1] = imageR[idx1 - 1];
\r
145 rightPixel1[2] = imageR[idx1 - 2];
\r
146 rightPixel1[3] = imageR[idx1 - 3];
\r
147 rightPixel1[4] = imageR[idx1 - 4];
\r
148 rightPixel1[5] = imageR[idx1 - 5];
\r
149 rightPixel1[6] = imageR[idx1 - 6];
\r
151 rightPixel2[7] = imageR[idx2 - 7];
\r
152 rightPixel2[0] = imageR[idx2 - 0];
\r
153 rightPixel2[1] = imageR[idx2 - 1];
\r
154 rightPixel2[2] = imageR[idx2 - 2];
\r
155 rightPixel2[3] = imageR[idx2 - 3];
\r
156 rightPixel2[4] = imageR[idx2 - 4];
\r
157 rightPixel2[5] = imageR[idx2 - 5];
\r
158 rightPixel2[6] = imageR[idx2 - 6];
\r
160 //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
\r
161 diff1 = leftPixel1 - rightPixel1[0];
\r
162 diff2 = leftPixel2 - rightPixel2[0];
\r
163 col_ssd[0 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
\r
165 diff1 = leftPixel1 - rightPixel1[1];
\r
166 diff2 = leftPixel2 - rightPixel2[1];
\r
167 col_ssd[1 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
\r
169 diff1 = leftPixel1 - rightPixel1[2];
\r
170 diff2 = leftPixel2 - rightPixel2[2];
\r
171 col_ssd[2 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
\r
173 diff1 = leftPixel1 - rightPixel1[3];
\r
174 diff2 = leftPixel2 - rightPixel2[3];
\r
175 col_ssd[3 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
\r
177 diff1 = leftPixel1 - rightPixel1[4];
\r
178 diff2 = leftPixel2 - rightPixel2[4];
\r
179 col_ssd[4 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
\r
181 diff1 = leftPixel1 - rightPixel1[5];
\r
182 diff2 = leftPixel2 - rightPixel2[5];
\r
183 col_ssd[5 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
\r
185 diff1 = leftPixel1 - rightPixel1[6];
\r
186 diff2 = leftPixel2 - rightPixel2[6];
\r
187 col_ssd[6 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
\r
189 diff1 = leftPixel1 - rightPixel1[7];
\r
190 diff2 = leftPixel2 - rightPixel2[7];
\r
191 col_ssd[7 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
\r
194 template<int RADIUS>
\r
195 __device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
\r
197 unsigned char leftPixel1;
\r
199 unsigned int diffa[] = {0, 0, 0, 0, 0, 0, 0, 0};
\r
201 for(int i = 0; i < (2 * RADIUS + 1); i++)
\r
203 idx = y_tex * im_pitch + x_tex;
\r
204 leftPixel1 = imageL[idx];
\r
207 diffa[0] += SQ(leftPixel1 - imageR[idx - 0]);
\r
208 diffa[1] += SQ(leftPixel1 - imageR[idx - 1]);
\r
209 diffa[2] += SQ(leftPixel1 - imageR[idx - 2]);
\r
210 diffa[3] += SQ(leftPixel1 - imageR[idx - 3]);
\r
211 diffa[4] += SQ(leftPixel1 - imageR[idx - 4]);
\r
212 diffa[5] += SQ(leftPixel1 - imageR[idx - 5]);
\r
213 diffa[6] += SQ(leftPixel1 - imageR[idx - 6]);
\r
214 diffa[7] += SQ(leftPixel1 - imageR[idx - 7]);
\r
218 //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
\r
219 col_ssd[0 * (BLOCK_W + 2 * RADIUS)] = diffa[0];
\r
220 col_ssd[1 * (BLOCK_W + 2 * RADIUS)] = diffa[1];
\r
221 col_ssd[2 * (BLOCK_W + 2 * RADIUS)] = diffa[2];
\r
222 col_ssd[3 * (BLOCK_W + 2 * RADIUS)] = diffa[3];
\r
223 col_ssd[4 * (BLOCK_W + 2 * RADIUS)] = diffa[4];
\r
224 col_ssd[5 * (BLOCK_W + 2 * RADIUS)] = diffa[5];
\r
225 col_ssd[6 * (BLOCK_W + 2 * RADIUS)] = diffa[6];
\r
226 col_ssd[7 * (BLOCK_W + 2 * RADIUS)] = diffa[7];
\r
229 template<int RADIUS>
\r
230 __global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, PtrStepb disp, int maxdisp)
\r
232 extern __shared__ unsigned int col_ssd_cache[];
\r
233 volatile unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x;
\r
234 volatile unsigned int *col_ssd_extra = threadIdx.x < (2 * RADIUS) ? col_ssd + BLOCK_W : 0; //#define N_DIRTY_PIXELS (2 * RADIUS)
\r
236 //#define X (blockIdx.x * BLOCK_W + threadIdx.x + STEREO_MAXD)
\r
237 int X = (blockIdx.x * BLOCK_W + threadIdx.x + maxdisp + RADIUS);
\r
238 //#define Y (__mul24(blockIdx.y, ROWSperTHREAD) + RADIUS)
\r
239 #define Y (blockIdx.y * ROWSperTHREAD + RADIUS)
\r
240 //int Y = blockIdx.y * ROWSperTHREAD + RADIUS;
\r
242 unsigned int* minSSDImage = cminSSDImage + X + Y * cminSSD_step;
\r
243 unsigned char* disparImage = disp.data + X + Y * disp.step;
\r
246 unsigned int *minSSDImage_end = minSSDImage + min(ROWSperTHREAD, cheight - Y) * minssd_step;
\r
247 for(uint *ptr = minSSDImage; ptr != minSSDImage_end; ptr += minssd_step )
\r
250 int end_row = ::min(ROWSperTHREAD, cheight - Y - RADIUS);
\r
252 int x_tex = X - RADIUS;
\r
254 if (x_tex >= cwidth)
\r
257 for(int d = STEREO_MIND; d < maxdisp; d += STEREO_DISP_STEP)
\r
259 y_tex = Y - RADIUS;
\r
261 InitColSSD<RADIUS>(x_tex, y_tex, img_step, left, right, d, col_ssd);
\r
263 if (col_ssd_extra > 0)
\r
264 if (x_tex + BLOCK_W < cwidth)
\r
265 InitColSSD<RADIUS>(x_tex + BLOCK_W, y_tex, img_step, left, right, d, col_ssd_extra);
\r
267 __syncthreads(); //before MinSSD function
\r
269 if (X < cwidth - RADIUS && Y < cheight - RADIUS)
\r
271 uint2 minSSD = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd);
\r
272 if (minSSD.x < minSSDImage[0])
\r
274 disparImage[0] = (unsigned char)(d + minSSD.y);
\r
275 minSSDImage[0] = minSSD.x;
\r
279 for(int row = 1; row < end_row; row++)
\r
281 int idx1 = y_tex * img_step + x_tex;
\r
282 int idx2 = (y_tex + (2 * RADIUS + 1)) * img_step + x_tex;
\r
286 StepDown<RADIUS>(idx1, idx2, left, right, d, col_ssd);
\r
289 if (x_tex + BLOCK_W < cwidth)
\r
290 StepDown<RADIUS>(idx1, idx2, left + BLOCK_W, right + BLOCK_W, d, col_ssd_extra);
\r
294 __syncthreads(); //before MinSSD function
\r
296 if (X < cwidth - RADIUS && row < cheight - RADIUS - Y)
\r
298 int idx = row * cminSSD_step;
\r
299 uint2 minSSD = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd);
\r
300 if (minSSD.x < minSSDImage[idx])
\r
302 disparImage[disp.step * row] = (unsigned char)(d + minSSD.y);
\r
303 minSSDImage[idx] = minSSD.x;
\r
311 template<int RADIUS> void kernel_caller(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& disp, int maxdisp, cudaStream_t & stream)
\r
314 dim3 threads(BLOCK_W, 1, 1);
\r
316 grid.x = divUp(left.cols - maxdisp - 2 * RADIUS, BLOCK_W);
\r
317 grid.y = divUp(left.rows - 2 * RADIUS, ROWSperTHREAD);
\r
319 //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
\r
320 size_t smem_size = (BLOCK_W + N_DISPARITIES * (BLOCK_W + 2 * RADIUS)) * sizeof(unsigned int);
\r
322 stereoKernel<RADIUS><<<grid, threads, smem_size, stream>>>(left.data, right.data, left.step, disp, maxdisp);
\r
323 cudaSafeCall( cudaGetLastError() );
\r
326 cudaSafeCall( cudaDeviceSynchronize() );
\r
329 typedef void (*kernel_caller_t)(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& disp, int maxdisp, cudaStream_t & stream);
\r
331 const static kernel_caller_t callers[] =
\r
334 kernel_caller< 1>, kernel_caller< 2>, kernel_caller< 3>, kernel_caller< 4>, kernel_caller< 5>,
\r
335 kernel_caller< 6>, kernel_caller< 7>, kernel_caller< 8>, kernel_caller< 9>, kernel_caller<10>,
\r
336 kernel_caller<11>, kernel_caller<12>, kernel_caller<13>, kernel_caller<15>, kernel_caller<15>,
\r
337 kernel_caller<16>, kernel_caller<17>, kernel_caller<18>, kernel_caller<19>, kernel_caller<20>,
\r
338 kernel_caller<21>, kernel_caller<22>, kernel_caller<23>, kernel_caller<24>, kernel_caller<25>
\r
340 //0,0,0, 0,0,0, 0,0,kernel_caller<9>
\r
342 const int calles_num = sizeof(callers)/sizeof(callers[0]);
\r
344 void stereoBM_GPU(const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& disp, int maxdisp, int winsz, const DevMem2D_<unsigned int>& minSSD_buf, cudaStream_t& stream)
\r
346 int winsz2 = winsz >> 1;
\r
348 if (winsz2 == 0 || winsz2 >= calles_num)
\r
349 cv::gpu::error("Unsupported window size", __FILE__, __LINE__, "stereoBM_GPU");
\r
351 //cudaSafeCall( cudaFuncSetCacheConfig(&stereoKernel, cudaFuncCachePreferL1) );
\r
352 //cudaSafeCall( cudaFuncSetCacheConfig(&stereoKernel, cudaFuncCachePreferShared) );
\r
354 cudaSafeCall( cudaMemset2D(disp.data, disp.step, 0, disp.cols, disp.rows) );
\r
355 cudaSafeCall( cudaMemset2D(minSSD_buf.data, minSSD_buf.step, 0xFF, minSSD_buf.cols * minSSD_buf.elemSize(), disp.rows) );
\r
357 cudaSafeCall( cudaMemcpyToSymbol( cwidth, &left.cols, sizeof(left.cols) ) );
\r
358 cudaSafeCall( cudaMemcpyToSymbol( cheight, &left.rows, sizeof(left.rows) ) );
\r
359 cudaSafeCall( cudaMemcpyToSymbol( cminSSDImage, &minSSD_buf.data, sizeof(minSSD_buf.data) ) );
\r
361 size_t minssd_step = minSSD_buf.step/minSSD_buf.elemSize();
\r
362 cudaSafeCall( cudaMemcpyToSymbol( cminSSD_step, &minssd_step, sizeof(minssd_step) ) );
\r
364 callers[winsz2](left, right, disp, maxdisp, stream);
\r
367 //////////////////////////////////////////////////////////////////////////////////////////////////
\r
368 /////////////////////////////////////// Sobel Prefiler ///////////////////////////////////////////
\r
369 //////////////////////////////////////////////////////////////////////////////////////////////////
\r
371 texture<unsigned char, 2, cudaReadModeElementType> texForSobel;
\r
373 __global__ void prefilter_kernel(DevMem2Db output, int prefilterCap)
\r
375 int x = blockDim.x * blockIdx.x + threadIdx.x;
\r
376 int y = blockDim.y * blockIdx.y + threadIdx.y;
\r
378 if (x < output.cols && y < output.rows)
\r
380 int conv = (int)tex2D(texForSobel, x - 1, y - 1) * (-1) + (int)tex2D(texForSobel, x + 1, y - 1) * (1) +
\r
381 (int)tex2D(texForSobel, x - 1, y ) * (-2) + (int)tex2D(texForSobel, x + 1, y ) * (2) +
\r
382 (int)tex2D(texForSobel, x - 1, y + 1) * (-1) + (int)tex2D(texForSobel, x + 1, y + 1) * (1);
\r
385 conv = ::min(::min(::max(-prefilterCap, conv), prefilterCap) + prefilterCap, 255);
\r
386 output.ptr(y)[x] = conv & 0xFF;
\r
390 void prefilter_xsobel(const DevMem2Db& input, const DevMem2Db& output, int prefilterCap, cudaStream_t & stream)
\r
392 cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
\r
393 cudaSafeCall( cudaBindTexture2D( 0, texForSobel, input.data, desc, input.cols, input.rows, input.step ) );
\r
395 dim3 threads(16, 16, 1);
\r
396 dim3 grid(1, 1, 1);
\r
398 grid.x = divUp(input.cols, threads.x);
\r
399 grid.y = divUp(input.rows, threads.y);
\r
401 prefilter_kernel<<<grid, threads, 0, stream>>>(output, prefilterCap);
\r
402 cudaSafeCall( cudaGetLastError() );
\r
405 cudaSafeCall( cudaDeviceSynchronize() );
\r
407 cudaSafeCall( cudaUnbindTexture (texForSobel ) );
\r
411 //////////////////////////////////////////////////////////////////////////////////////////////////
\r
412 /////////////////////////////////// Textureness filtering ////////////////////////////////////////
\r
413 //////////////////////////////////////////////////////////////////////////////////////////////////
\r
415 texture<unsigned char, 2, cudaReadModeNormalizedFloat> texForTF;
\r
417 __device__ __forceinline__ float sobel(int x, int y)
\r
419 float conv = tex2D(texForTF, x - 1, y - 1) * (-1) + tex2D(texForTF, x + 1, y - 1) * (1) +
\r
420 tex2D(texForTF, x - 1, y ) * (-2) + tex2D(texForTF, x + 1, y ) * (2) +
\r
421 tex2D(texForTF, x - 1, y + 1) * (-1) + tex2D(texForTF, x + 1, y + 1) * (1);
\r
425 __device__ float CalcSums(float *cols, float *cols_cache, int winsz)
\r
429 int winsz2 = winsz/2;
\r
431 for(int i = 1; i <= winsz2; i++)
\r
434 cols_cache[0] = cache;
\r
438 if (threadIdx.x < blockDim.x - winsz2)
\r
439 cache2 = cols_cache[winsz2];
\r
441 for(int i = winsz2 + 1; i < winsz; i++)
\r
444 return cols[0] + cache + cache2;
\r
447 #define RpT (2 * ROWSperTHREAD) // got experimentally
\r
449 __global__ void textureness_kernel(DevMem2Db disp, int winsz, float threshold)
\r
451 int winsz2 = winsz/2;
\r
452 int n_dirty_pixels = (winsz2) * 2;
\r
454 extern __shared__ float cols_cache[];
\r
455 float *cols = cols_cache + blockDim.x + threadIdx.x;
\r
456 float *cols_extra = threadIdx.x < n_dirty_pixels ? cols + blockDim.x : 0;
\r
458 int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
459 int beg_row = blockIdx.y * RpT;
\r
460 int end_row = ::min(beg_row + RpT, disp.rows);
\r
467 float sum_extra = 0;
\r
469 for(int i = y - winsz2; i <= y + winsz2; ++i)
\r
471 sum += sobel(x - winsz2, i);
\r
473 sum_extra += sobel(x + blockDim.x - winsz2, i);
\r
477 *cols_extra = sum_extra;
\r
481 float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
\r
482 if (sum_win < threshold)
\r
483 disp.data[y * disp.step + x] = 0;
\r
487 for(int y = beg_row + 1; y < end_row; ++y)
\r
489 sum = sum - sobel(x - winsz2, y - winsz2 - 1) + sobel(x - winsz2, y + winsz2);
\r
494 sum_extra = sum_extra - sobel(x + blockDim.x - winsz2, y - winsz2 - 1) + sobel(x + blockDim.x - winsz2, y + winsz2);
\r
495 *cols_extra = sum_extra;
\r
499 float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
\r
500 if (sum_win < threshold)
\r
501 disp.data[y * disp.step + x] = 0;
\r
508 void postfilter_textureness(const DevMem2Db& input, int winsz, float avgTexturenessThreshold, const DevMem2Db& disp, cudaStream_t & stream)
\r
510 avgTexturenessThreshold *= winsz * winsz;
\r
512 texForTF.filterMode = cudaFilterModeLinear;
\r
513 texForTF.addressMode[0] = cudaAddressModeWrap;
\r
514 texForTF.addressMode[1] = cudaAddressModeWrap;
\r
516 cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
\r
517 cudaSafeCall( cudaBindTexture2D( 0, texForTF, input.data, desc, input.cols, input.rows, input.step ) );
\r
519 dim3 threads(128, 1, 1);
\r
520 dim3 grid(1, 1, 1);
\r
522 grid.x = divUp(input.cols, threads.x);
\r
523 grid.y = divUp(input.rows, RpT);
\r
525 size_t smem_size = (threads.x + threads.x + (winsz/2) * 2 ) * sizeof(float);
\r
526 textureness_kernel<<<grid, threads, smem_size, stream>>>(disp, winsz, avgTexturenessThreshold);
\r
527 cudaSafeCall( cudaGetLastError() );
\r
530 cudaSafeCall( cudaDeviceSynchronize() );
\r
532 cudaSafeCall( cudaUnbindTexture (texForTF) );
\r
534 } // namespace stereobm
\r
535 }}} // namespace cv { namespace gpu { namespace device
\r