--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other GpuMaterials provided with the distribution.\r
+//\r
+// * The name of the copyright holders may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "precomp.hpp"\r
+\r
+using namespace cv;\r
+using namespace cv::gpu;\r
+using namespace std;\r
+\r
+const float cv::gpu::StereoBeliefPropagation_GPU::DEFAULT_DISC_COST = 1.7f;\r
+const float cv::gpu::StereoBeliefPropagation_GPU::DEFAULT_DATA_COST = 10.0f;\r
+const float cv::gpu::StereoBeliefPropagation_GPU::DEFAULT_LAMBDA_COST = 0.07f;\r
+\r
+#if !defined (HAVE_CUDA)\r
+\r
+cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int, int, int, float, float, float) { throw_nogpu(); }\r
+\r
+void cv::gpu::StereoBeliefPropagation_GPU::operator() (const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); }\r
+\r
+#else /* !defined (HAVE_CUDA) */\r
+\r
+typedef DevMem2D_<float> DevMem2Df;\r
+\r
+namespace cv { namespace gpu { namespace impl {\r
+ extern "C" void load_constants(int ndisp, float disc_cost, float data_cost, float lambda);\r
+ extern "C" void comp_data_caller(const DevMem2D& l, const DevMem2D& r, DevMem2Df mdata);\r
+ extern "C" void data_down_kernel_caller(int dst_cols, int dst_rows, int src_rows, const DevMem2Df& src, DevMem2Df dst);\r
+ extern "C" void level_up(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2Df* mu, DevMem2Df* md, DevMem2Df* ml, DevMem2Df* mr);\r
+ extern "C" void call_all_iterations(int cols, int rows, int iters, DevMem2Df& u, DevMem2Df& d, DevMem2Df& l, DevMem2Df& r, const DevMem2Df& data);\r
+ extern "C" void output_caller(const DevMem2Df& u, const DevMem2Df& d, const DevMem2Df& l, const DevMem2Df& r, const DevMem2Df& data, DevMem2D disp);\r
+}}}\r
+\r
+cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int ndisp_, int iters_, int levels_, float disc_cost_, float data_cost_, float lambda_)\r
+ : ndisp(ndisp_), iters(iters_), levels(levels_), disc_cost(disc_cost_), data_cost(data_cost_), lambda(lambda_), datas(levels_) \r
+{\r
+ const int max_supported_ndisp = 1 << (sizeof(unsigned char) * 8);\r
+\r
+ CV_Assert(0 < ndisp && ndisp <= max_supported_ndisp);\r
+ CV_Assert(ndisp % 8 == 0);\r
+}\r
+\r
+void cv::gpu::StereoBeliefPropagation_GPU::operator()(const GpuMat& left, const GpuMat& right, GpuMat& disp)\r
+{ \r
+ CV_DbgAssert(left.cols == right.cols && left.rows == right.rows && left.type() == right.type() && left.type() == CV_8U);\r
+\r
+ const Scalar zero = Scalar::all(0);\r
+\r
+ int rows = left.rows;\r
+ int cols = left.cols;\r
+\r
+ int divisor = (int)pow(2.f, levels - 1.0f);\r
+ int lowest_cols = cols / divisor;\r
+ int lowest_rows = rows / divisor;\r
+ const int min_image_dim_size = 20;\r
+ CV_Assert(min(lowest_cols, lowest_rows) > min_image_dim_size); \r
+\r
+ disp.create(rows, cols, CV_8U);\r
+\r
+ u.create(rows * ndisp, cols, CV_32F); \r
+ d.create(rows * ndisp, cols, CV_32F); \r
+ l.create(rows * ndisp, cols, CV_32F); \r
+ r.create(rows * ndisp, cols, CV_32F); \r
+\r
+ if (levels & 1)\r
+ {\r
+ u = zero; //can clear less area\r
+ d = zero;\r
+ l = zero;\r
+ r = zero;\r
+ }\r
+\r
+ if (levels > 1)\r
+ {\r
+ int less_rows = (rows + 1) / 2;\r
+ int less_cols = (cols + 1) / 2;\r
+\r
+ u2.create(less_rows * ndisp, less_cols, CV_32F);\r
+ d2.create(less_rows * ndisp, less_cols, CV_32F);\r
+ l2.create(less_rows * ndisp, less_cols, CV_32F);\r
+ r2.create(less_rows * ndisp, less_cols, CV_32F);\r
+\r
+ if ((levels & 1) == 0)\r
+ {\r
+ u2 = zero;\r
+ d2 = zero;\r
+ l2 = zero;\r
+ r2 = zero; \r
+ }\r
+ } \r
+\r
+ impl::load_constants(ndisp, disc_cost, data_cost, lambda);\r
+ \r
+ vector<int> cols_all(levels);\r
+ vector<int> rows_all(levels);\r
+ vector<int> iters_all(levels);\r
+\r
+ cols_all[0] = cols;\r
+ rows_all[0] = rows;\r
+ iters_all[0] = iters;\r
+\r
+ datas[0].create(rows * ndisp, cols, CV_32F);\r
+ //datas[0] = Scalar(data_cost); //DOTO did in kernel, but not sure if correct\r
+\r
+ impl::comp_data_caller(left, right, datas.front());\r
+\r
+ for (int i = 1; i < levels; i++) \r
+ {\r
+ cols_all[i] = (cols_all[i-1] + 1)/2;\r
+ rows_all[i] = (rows_all[i-1] + 1)/2;\r
+\r
+ // this is difference from Felzenszwalb algorithm\r
+ // we reduce iters num for each next level\r
+ iters_all[i] = max(2 * iters_all[i-1] / 3, 1);\r
+\r
+ datas[i].create(rows_all[i] * ndisp, cols_all[i], CV_32F); \r
+\r
+ impl::data_down_kernel_caller(cols_all[i], rows_all[i], rows_all[i-1], datas[i-1], datas[i]);\r
+ }\r
+ \r
+ DevMem2D_<float> mus[] = {u, u2}; \r
+ DevMem2D_<float> mds[] = {d, d2};\r
+ DevMem2D_<float> mrs[] = {r, r2}; \r
+ DevMem2D_<float> mls[] = {l, l2};\r
+\r
+ int mem_idx = (levels & 1) ? 0 : 1;\r
+\r
+ for (int i = levels - 1; i >= 0; i--) // for lower level we have already computed messages by setting to zero\r
+ { \r
+ if (i != levels - 1) \r
+ impl::level_up(mem_idx, cols_all[i], rows_all[i], rows_all[i+1], mus, mds, mls, mrs);\r
+\r
+ impl::call_all_iterations(cols_all[i], rows_all[i], iters_all[i], mus[mem_idx], mds[mem_idx], mls[mem_idx], mrs[mem_idx], datas[i]);\r
+\r
+ mem_idx = (mem_idx + 1) & 1;\r
+ }\r
+\r
+ impl::output_caller(u, d, l, r, datas.front(), disp);\r
+}\r
+\r
+#endif /* !defined (HAVE_CUDA) */\r
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of the copyright holders may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "opencv2/gpu/devmem2d.hpp"\r
+#include "safe_call.hpp"\r
+\r
+using namespace cv::gpu;\r
+\r
+static inline int divUp(int a, int b) { return (a % b == 0) ? a/b : a/b + 1; }\r
+\r
+#ifndef FLT_MAX\r
+#define FLT_MAX 3.402823466e+38F\r
+#endif\r
+\r
+typedef unsigned char uchar;\r
+\r
+namespace beliefpropagation_gpu\r
+{ \r
+ __constant__ int cndisp;\r
+ __constant__ float cdisc_cost;\r
+ __constant__ float cdata_cost;\r
+ __constant__ float clambda;\r
+};\r
+\r
+///////////////////////////////////////////////////////////////\r
+////////////////// comp data /////////////////////////////////\r
+///////////////////////////////////////////////////////////////\r
+\r
+namespace beliefpropagation_gpu\r
+{\r
+ __global__ void comp_data_kernel(uchar* l, uchar* r, size_t step, float* data, size_t data_step, int cols, int rows) \r
+ {\r
+ int x = blockIdx.x * blockDim.x + threadIdx.x;\r
+ int y = blockIdx.y * blockDim.y + threadIdx.y;\r
+\r
+ if (y > 0 && y < rows - 1 && x > 0 && x < cols - 1)\r
+ {\r
+ uchar *ls = l + y * step + x; \r
+ uchar *rs = r + y * step + x; \r
+\r
+ float *ds = data + y * data_step + x;\r
+ size_t disp_step = data_step * rows;\r
+\r
+ for (int disp = 0; disp < cndisp; disp++) \r
+ {\r
+ if (x - disp >= 0)\r
+ {\r
+ int le = ls[0];\r
+ int re = rs[-disp];\r
+ float val = abs(le - re);\r
+ \r
+ ds[disp * disp_step] = clambda * fmin(val, cdata_cost);\r
+ }\r
+ else\r
+ {\r
+ ds[disp * disp_step] = cdata_cost;\r
+ }\r
+ }\r
+ }\r
+ }\r
+}\r
+\r
+namespace cv { namespace gpu { namespace impl {\r
+ extern "C" void load_constants(int ndisp, float disc_cost, float data_cost, float lambda)\r
+ {\r
+ cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cndisp, &ndisp, sizeof(ndisp)) );\r
+ cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cdisc_cost, &disc_cost, sizeof(disc_cost)) );\r
+ cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cdata_cost, &data_cost, sizeof(data_cost)) );\r
+ cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::clambda, &lambda, sizeof(lambda)) ); \r
+ }\r
+\r
+ extern "C" void comp_data_caller(const DevMem2D& l, const DevMem2D& r, DevMem2D_<float> mdata)\r
+ {\r
+ dim3 threads(32, 8, 1);\r
+ dim3 grid(1, 1, 1);\r
+\r
+ grid.x = divUp(l.cols, threads.x);\r
+ grid.y = divUp(l.rows, threads.y);\r
+\r
+ beliefpropagation_gpu::comp_data_kernel<<<grid, threads>>>(l.ptr, r.ptr, l.step, mdata.ptr, mdata.step/sizeof(float), l.cols, l.rows);\r
+ cudaSafeCall( cudaThreadSynchronize() );\r
+ }\r
+}}}\r
+\r
+///////////////////////////////////////////////////////////////\r
+////////////////// data_step_down ////////////////////////////\r
+///////////////////////////////////////////////////////////////\r
+\r
+namespace beliefpropagation_gpu\r
+{ \r
+ __global__ void data_down_kernel(int dst_cols, int dst_rows, int src_rows, float *src, size_t src_step, float *dst, size_t dst_step)\r
+ {\r
+ int x = blockIdx.x * blockDim.x + threadIdx.x;\r
+ int y = blockIdx.y * blockDim.y + threadIdx.y;\r
+\r
+ if (x < dst_cols && y < dst_rows)\r
+ {\r
+ const size_t dst_disp_step = dst_step * dst_rows;\r
+ const size_t src_disp_step = src_step * src_rows;\r
+\r
+ for (int d = 0; d < cndisp; ++d)\r
+ {\r
+ float dst_reg = src[d * src_disp_step + src_step * (2*y+0) + (2*x+0)];\r
+ dst_reg += src[d * src_disp_step + src_step * (2*y+1) + (2*x+0)];\r
+ dst_reg += src[d * src_disp_step + src_step * (2*y+0) + (2*x+1)];\r
+ dst_reg += src[d * src_disp_step + src_step * (2*y+1) + (2*x+1)];\r
+\r
+ dst[d * dst_disp_step + y * dst_step + x] = dst_reg;\r
+ }\r
+ }\r
+ }\r
+}\r
+\r
+namespace cv { namespace gpu { namespace impl {\r
+ extern "C" void data_down_kernel_caller(int dst_cols, int dst_rows, int src_rows, const DevMem2D_<float>& src, DevMem2D_<float> dst)\r
+ {\r
+ dim3 threads(32, 8, 1);\r
+ dim3 grid(1, 1, 1);\r
+\r
+ grid.x = divUp(dst_cols, threads.x);\r
+ grid.y = divUp(dst_rows, threads.y);\r
+\r
+ beliefpropagation_gpu::data_down_kernel<<<grid, threads>>>(dst_cols, dst_rows, src_rows, src.ptr, src.step/sizeof(float), dst.ptr, dst.step/sizeof(float));\r
+ cudaSafeCall( cudaThreadSynchronize() );\r
+ }\r
+}}}\r
+\r
+///////////////////////////////////////////////////////////////\r
+////////////////// level up messages ////////////////////////\r
+///////////////////////////////////////////////////////////////\r
+\r
+\r
+namespace beliefpropagation_gpu\r
+{ \r
+ __global__ void level_up_kernel(int dst_cols, int dst_rows, int src_rows, float *src, size_t src_step, float *dst, size_t dst_step)\r
+ {\r
+ int x = blockIdx.x * blockDim.x + threadIdx.x;\r
+ int y = blockIdx.y * blockDim.y + threadIdx.y; \r
+\r
+ if (x < dst_cols && y < dst_rows)\r
+ {\r
+ const size_t dst_disp_step = dst_step * dst_rows;\r
+ const size_t src_disp_step = src_step * src_rows;\r
+\r
+ float *dstr = dst + y * dst_step + x;\r
+ float *srcr = src + y/2 * src_step + x/2;\r
+\r
+ for (int d = 0; d < cndisp; ++d) \r
+ dstr[d * dst_disp_step] = srcr[d * src_disp_step];\r
+ }\r
+ }\r
+}\r
+\r
+namespace cv { namespace gpu { namespace impl {\r
+ extern "C" void level_up(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2D_<float>* mu, DevMem2D_<float>* md, DevMem2D_<float>* ml, DevMem2D_<float>* mr)\r
+ {\r
+ dim3 threads(32, 8, 1);\r
+ dim3 grid(1, 1, 1);\r
+\r
+ grid.x = divUp(dst_cols, threads.x);\r
+ grid.y = divUp(dst_rows, threads.y);\r
+\r
+ int src_idx = (dst_idx + 1) & 1;\r
+\r
+ beliefpropagation_gpu::level_up_kernel<<<grid, threads>>>(dst_cols, dst_rows, src_rows, mu[src_idx].ptr, mu[src_idx].step/sizeof(float), mu[dst_idx].ptr, mu[dst_idx].step/sizeof(float));\r
+ beliefpropagation_gpu::level_up_kernel<<<grid, threads>>>(dst_cols, dst_rows, src_rows, md[src_idx].ptr, md[src_idx].step/sizeof(float), md[dst_idx].ptr, md[dst_idx].step/sizeof(float));\r
+ beliefpropagation_gpu::level_up_kernel<<<grid, threads>>>(dst_cols, dst_rows, src_rows, ml[src_idx].ptr, ml[src_idx].step/sizeof(float), ml[dst_idx].ptr, ml[dst_idx].step/sizeof(float));\r
+ beliefpropagation_gpu::level_up_kernel<<<grid, threads>>>(dst_cols, dst_rows, src_rows, mr[src_idx].ptr, mr[src_idx].step/sizeof(float), mr[dst_idx].ptr, mr[dst_idx].step/sizeof(float));\r
+\r
+ cudaSafeCall( cudaThreadSynchronize() );\r
+ }\r
+}}}\r
+\r
+\r
+///////////////////////////////////////////////////////////////\r
+///////////////// Calcs all iterations ///////////////////////\r
+///////////////////////////////////////////////////////////////\r
+\r
+\r
+namespace beliefpropagation_gpu\r
+{\r
+ __device__ void calc_min_linear_penalty(float *dst, size_t step)\r
+ {\r
+ float prev = dst[0];\r
+ float cur;\r
+ for (int disp = 1; disp < cndisp; ++disp) \r
+ {\r
+ prev += 1.0f;\r
+ cur = dst[step * disp];\r
+ if (prev < cur)\r
+ cur = prev;\r
+ dst[step * disp] = prev = cur;\r
+ }\r
+\r
+ prev = dst[(cndisp - 1) * step];\r
+ for (int disp = cndisp - 2; disp >= 0; disp--) \r
+ {\r
+ prev += 1.0f;\r
+ cur = dst[step * disp];\r
+ if (prev < cur)\r
+ cur = prev;\r
+ dst[step * disp] = prev = cur; \r
+ }\r
+ }\r
+\r
+ __device__ void message(float *msg1, float *msg2, float *msg3, float *data, float *dst, size_t msg_disp_step, size_t data_disp_step)\r
+ {\r
+ float minimum = FLT_MAX;\r
+\r
+ for(int i = 0; i < cndisp; ++i)\r
+ {\r
+ float dst_reg = msg1[msg_disp_step * i] + msg2[msg_disp_step * i] + msg3[msg_disp_step * i] + data[data_disp_step * i];\r
+\r
+ if (dst_reg < minimum)\r
+ minimum = dst_reg;\r
+\r
+ dst[msg_disp_step * i] = dst_reg;\r
+\r
+ }\r
+\r
+ calc_min_linear_penalty(dst, msg_disp_step);\r
+\r
+ minimum += cdisc_cost;\r
+\r
+ float sum = 0;\r
+ for(int i = 0; i < cndisp; ++i)\r
+ {\r
+ float dst_reg = dst[msg_disp_step * i];\r
+ if (dst_reg > minimum)\r
+ {\r
+ dst[msg_disp_step * i] = dst_reg = minimum; \r
+ }\r
+ sum += dst_reg;\r
+ } \r
+ sum /= cndisp;\r
+\r
+ for(int i = 0; i < cndisp; ++i)\r
+ dst[msg_disp_step * i] -= sum;\r
+ }\r
+\r
+ __global__ void one_iteration(int t, float* u, float *d, float *l, float *r, size_t msg_step, float *data, size_t data_step, int cols, int rows)\r
+ {\r
+ int y = blockIdx.y * blockDim.y + threadIdx.y;\r
+ int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1);\r
+\r
+ if ( (y > 0) && (y < rows - 1) && (x > 0) && (x < cols - 1))\r
+ {\r
+ float *us = u + y * msg_step + x;\r
+ float *ds = d + y * msg_step + x;\r
+ float *ls = l + y * msg_step + x;\r
+ float *rs = r + y * msg_step + x;\r
+ float *dt = data + y * data_step + x;\r
+ size_t msg_disp_step = msg_step * rows;\r
+ size_t data_disp_step = data_step * rows;\r
+\r
+ message(us + msg_step, ls + 1, rs - 1, dt, us, msg_disp_step, data_disp_step);\r
+ message(ds - msg_step, ls + 1, rs - 1, dt, ds, msg_disp_step, data_disp_step);\r
+ message(us + msg_step, ds - msg_step, rs - 1, dt, rs, msg_disp_step, data_disp_step);\r
+ message(us + msg_step, ds - msg_step, ls + 1, dt, ls, msg_disp_step, data_disp_step); \r
+ }\r
+ }\r
+}\r
+\r
+namespace cv { namespace gpu { namespace impl {\r
+ extern "C" void call_all_iterations(int cols, int rows, int iters, DevMem2D_<float>& u, DevMem2D_<float>& d, DevMem2D_<float>& l, DevMem2D_<float>& r, const DevMem2D_<float>& data)\r
+ {\r
+ dim3 threads(32, 8, 1);\r
+ dim3 grid(1, 1, 1);\r
+\r
+ grid.x = divUp(cols, threads.x << 1);\r
+ grid.y = divUp(rows, threads.y);\r
+\r
+ for(int t = 0; t < iters; ++t)\r
+ beliefpropagation_gpu::one_iteration<<<grid, threads>>>(t, u.ptr, d.ptr, l.ptr, r.ptr, u.step/sizeof(float), data.ptr, data.step/sizeof(float), cols, rows); \r
+\r
+ cudaSafeCall( cudaThreadSynchronize() );\r
+ }\r
+}}}\r
+\r
+\r
+///////////////////////////////////////////////////////////////\r
+////////////////// Output caller /////////////////////////////\r
+///////////////////////////////////////////////////////////////\r
+\r
+namespace beliefpropagation_gpu\r
+{ \r
+ __global__ void output(int cols, int rows, float *u, float *d, float *l, float *r, float* data, size_t step, unsigned char *disp, size_t res_step) \r
+ { \r
+ int x = blockIdx.x * blockDim.x + threadIdx.x;\r
+ int y = blockIdx.y * blockDim.y + threadIdx.y;\r
+\r
+ if (y > 0 && y < rows - 1)\r
+ if (x > 0 && x < cols - 1)\r
+ {\r
+ float *us = u + (y + 1) * step + x;\r
+ float *ds = d + (y - 1) * step + x;\r
+ float *ls = l + y * step + (x + 1);\r
+ float *rs = r + y * step + (x - 1);\r
+ float *dt = data + y * step + x;\r
+\r
+ size_t disp_step = rows * step;\r
+\r
+ int best = 0;\r
+ float best_val = FLT_MAX;\r
+ for (int d = 0; d < cndisp; ++d) \r
+ {\r
+ float val = us[d * disp_step] + ds[d * disp_step] + ls[d * disp_step] + rs[d * disp_step] + dt[d * disp_step];\r
+\r
+ if (val < best_val) \r
+ {\r
+ best_val = val;\r
+ best = d;\r
+ }\r
+ }\r
+\r
+ disp[res_step * y + x] = best & 0xFF; \r
+ }\r
+ }\r
+}\r
+\r
+namespace cv { namespace gpu { namespace impl {\r
+ extern "C" void output_caller(const DevMem2D_<float>& u, const DevMem2D_<float>& d, const DevMem2D_<float>& l, const DevMem2D_<float>& r, const DevMem2D_<float>& data, DevMem2D disp)\r
+ { \r
+ dim3 threads(32, 8, 1);\r
+ dim3 grid(1, 1, 1);\r
+\r
+ grid.x = divUp(disp.cols, threads.x);\r
+ grid.y = divUp(disp.rows, threads.y);\r
+\r
+ beliefpropagation_gpu::output<<<grid, threads>>>(disp.cols, disp.rows, u.ptr, d.ptr, l.ptr, r.ptr, data.ptr, u.step/sizeof(float), disp.ptr, disp.step);\r
+ cudaSafeCall( cudaThreadSynchronize() );\r
+ }\r
+}}}
\ No newline at end of file