class CV_EXPORTS StereoBeliefPropagation_GPU\r
{\r
public:\r
+ enum { MSG_TYPE_AUTO, \r
+ MSG_TYPE_FLOAT, \r
+ MSG_TYPE_SHORT_SCALE_AUTO, \r
+ MSG_TYPE_SHORT_SCALE_MANUAL };\r
+\r
enum { DEFAULT_NDISP = 64 };\r
enum { DEFAULT_ITERS = 5 };\r
enum { DEFAULT_LEVELS = 5 };\r
\r
//! the default constructor\r
- explicit StereoBeliefPropagation_GPU(int ndisp = DEFAULT_NDISP, \r
- int iters = DEFAULT_ITERS, \r
- int levels = DEFAULT_LEVELS);\r
- //! the full constructor taking the number of disparities, number of BP iterations on first level,\r
- //! number of levels, truncation of discontinuity cost, truncation of data cost and weighting of data cost.\r
- StereoBeliefPropagation_GPU(int ndisp, int iters, int levels, float disc_cost, float data_cost, float lambda);\r
+ explicit StereoBeliefPropagation_GPU(int ndisp_ = DEFAULT_NDISP, \r
+ int iters_ = DEFAULT_ITERS, \r
+ int levels_ = DEFAULT_LEVELS,\r
+ int msg_type_ = MSG_TYPE_AUTO,\r
+ float msg_scale = 1.0f);\r
+ //! the full constructor taking the number of disparities, number of BP iterations on each level,\r
+ //! number of levels, truncation of data cost, data weight, \r
+ //! truncation of discontinuity cost and discontinuity single jump\r
+ StereoBeliefPropagation_GPU(int ndisp_, int iters_, int levels_, \r
+ float max_data_term_, float data_weight_,\r
+ float max_disc_term_, float disc_single_jump_,\r
+ int msg_type_ = MSG_TYPE_AUTO,\r
+ float msg_scale = 1.0f);\r
\r
//! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair,\r
- //! if disparity is empty output type will be CV_32S else output type will be disparity.type().\r
+ //! if disparity is empty output type will be CV_16S else output type will be disparity.type().\r
void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity);\r
\r
//! Acync version\r
int iters;\r
int levels;\r
\r
- float disc_cost;\r
- float data_cost;\r
- float lambda;\r
+ float max_data_term; \r
+ float data_weight;\r
+ float max_disc_term; \r
+ float disc_single_jump;\r
+ \r
+ int msg_type;\r
+ float msg_scale;\r
private:\r
GpuMat u, d, l, r, u2, d2, l2, r2;\r
std::vector<GpuMat> datas; \r
\r
#if !defined (HAVE_CUDA)\r
\r
-cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int, int, int) { throw_nogpu(); }\r
-cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int, int, int, float, float, float) { throw_nogpu(); }\r
+cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int, int, int, int, float) { throw_nogpu(); }\r
+cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int, int, int, float, float, float, float, int, float) { throw_nogpu(); }\r
\r
void cv::gpu::StereoBeliefPropagation_GPU::operator()(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); }\r
void cv::gpu::StereoBeliefPropagation_GPU::operator()(const GpuMat&, const GpuMat&, GpuMat&, const CudaStream&) { throw_nogpu(); }\r
\r
#else /* !defined (HAVE_CUDA) */\r
\r
-static const float DEFAULT_DISC_COST = 1.7f;\r
-static const float DEFAULT_DATA_COST = 10.0f;\r
-static const float DEFAULT_LAMBDA_COST = 0.07f;\r
-\r
-typedef DevMem2D_<float> DevMem2Df;\r
-typedef DevMem2D_<int> DevMem2Di;\r
+const float DEFAULT_MAX_DATA_TERM = 10.0f;\r
+const float DEFAULT_DATA_WEIGHT = 0.07f;\r
+const float DEFAULT_MAX_DISC_TERM = 1.7f;\r
+const float DEFAULT_DISC_SINGLE_JUMP = 1.0f;\r
\r
namespace cv { namespace gpu { namespace impl {\r
- extern "C" void load_constants(int ndisp, float disc_cost, float data_cost, float lambda);\r
- extern "C" void comp_data_caller(const DevMem2D& l, const DevMem2D& r, DevMem2Df mdata, const cudaStream_t& stream);\r
- extern "C" void data_down_kernel_caller(int dst_cols, int dst_rows, int src_rows, const DevMem2Df& src, DevMem2Df dst, const cudaStream_t& stream);\r
- extern "C" void level_up(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2Df* mu, DevMem2Df* md, DevMem2Df* ml, DevMem2Df* mr, const cudaStream_t& stream);\r
- extern "C" void call_all_iterations(int cols, int rows, int iters, DevMem2Df& u, DevMem2Df& d, DevMem2Df& l, DevMem2Df& r, const DevMem2Df& data, const cudaStream_t& stream);\r
- extern "C" void output_caller(const DevMem2Df& u, const DevMem2Df& d, const DevMem2Df& l, const DevMem2Df& r, const DevMem2Df& data, DevMem2Di disp, const cudaStream_t& stream);\r
+ void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump);\r
+ void comp_data(int msgType, const DevMem2D& l, const DevMem2D& r, DevMem2D mdata, const cudaStream_t& stream);\r
+ void data_step_down(int dst_cols, int dst_rows, int src_rows, int msgType, const DevMem2D& src, DevMem2D dst, const cudaStream_t& stream);\r
+ void level_up_messages(int dst_idx, int dst_cols, int dst_rows, int src_rows, int msgType, DevMem2D* mus, DevMem2D* mds, DevMem2D* mls, DevMem2D* mrs, const cudaStream_t& stream);\r
+ void calc_all_iterations(int cols, int rows, int iters, int msgType, DevMem2D& u, DevMem2D& d, DevMem2D& l, DevMem2D& r, const DevMem2D& data, const cudaStream_t& stream);\r
+ void output(int msgType, const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, DevMem2D disp, const cudaStream_t& stream);\r
}}}\r
\r
-cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int ndisp_, int iters_, int levels_)\r
- : ndisp(ndisp_), iters(iters_), levels(levels_), disc_cost(DEFAULT_DISC_COST), data_cost(DEFAULT_DATA_COST), lambda(DEFAULT_LAMBDA_COST), datas(levels_)\r
+cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int ndisp_, int iters_, int levels_, int msg_type_, float msg_scale_)\r
+ : ndisp(ndisp_), iters(iters_), levels(levels_), \r
+ max_data_term(DEFAULT_MAX_DATA_TERM), data_weight(DEFAULT_DATA_WEIGHT), \r
+ max_disc_term(DEFAULT_MAX_DISC_TERM), disc_single_jump(DEFAULT_DISC_SINGLE_JUMP), \r
+ msg_type(msg_type_), msg_scale(msg_scale_), datas(levels_)\r
+{\r
+ CV_Assert(0 < ndisp && 0 < iters && 0 < levels);\r
+}\r
+\r
+cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int ndisp_, int iters_, int levels_, float max_data_term_, float data_weight_, float max_disc_term_, float disc_single_jump_, int msg_type_, float msg_scale_)\r
+ : ndisp(ndisp_), iters(iters_), levels(levels_), \r
+ max_data_term(max_data_term_), data_weight(data_weight_), \r
+ max_disc_term(max_disc_term_), disc_single_jump(disc_single_jump_), \r
+ msg_type(msg_type_), msg_scale(msg_scale_), datas(levels_)\r
{\r
- CV_Assert(0 < ndisp);\r
- CV_Assert(ndisp % 8 == 0);\r
+ CV_Assert(0 < ndisp && 0 < iters && 0 < levels);\r
}\r
\r
-cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int ndisp_, int iters_, int levels_, float disc_cost_, float data_cost_, float lambda_)\r
- : ndisp(ndisp_), iters(iters_), levels(levels_), disc_cost(disc_cost_), data_cost(data_cost_), lambda(lambda_), datas(levels_)\r
+static bool checkMsgOverflow(int levels, float max_data_term, float data_weight, float max_disc_term, float msg_scale)\r
{\r
- CV_Assert(0 < ndisp);\r
- CV_Assert(ndisp % 8 == 0);\r
+ float maxV = ceil(max_disc_term * msg_scale);\r
+ float maxD = ceil(max_data_term * data_weight * msg_scale);\r
+\r
+ float maxMsg = maxV + (maxD * pow(4.0f, (float)levels));\r
+ maxMsg = maxV + (maxD * pow(4.0f, (float)levels)) + 3 * maxMsg;\r
+\r
+ return (maxMsg > numeric_limits<short>::max());\r
}\r
\r
-static void stereo_bp_gpu_operator(int ndisp, int iters, int levels, float disc_cost, float data_cost, float lambda,\r
+static void stereo_bp_gpu_operator(int ndisp, int iters, int levels, \r
+ float max_data_term, float data_weight, float max_disc_term, float disc_single_jump,\r
+ int msg_type, float& msg_scale,\r
GpuMat& u, GpuMat& d, GpuMat& l, GpuMat& r,\r
GpuMat& u2, GpuMat& d2, GpuMat& l2, GpuMat& r2,\r
vector<GpuMat>& datas, GpuMat& out,\r
const int min_image_dim_size = 2;\r
CV_Assert(min(lowest_cols, lowest_rows) > min_image_dim_size);\r
\r
- u.create(rows * ndisp, cols, CV_32F);\r
- d.create(rows * ndisp, cols, CV_32F);\r
- l.create(rows * ndisp, cols, CV_32F);\r
- r.create(rows * ndisp, cols, CV_32F);\r
+ switch (msg_type)\r
+ {\r
+ case StereoBeliefPropagation_GPU::MSG_TYPE_AUTO:\r
+ if (!checkMsgOverflow(levels, max_data_term, data_weight, max_disc_term, 100.0f))\r
+ {\r
+ msg_type = CV_16S;\r
+ msg_scale = 100.0f;\r
+ }\r
+ else if (!checkMsgOverflow(levels, max_data_term, data_weight, max_disc_term, 64.0f))\r
+ {\r
+ msg_type = CV_16S;\r
+ msg_scale = 64.0f;\r
+ }\r
+ else if (!checkMsgOverflow(levels, max_data_term, data_weight, max_disc_term, 32.0f))\r
+ {\r
+ msg_type = CV_16S;\r
+ msg_scale = 32.0f;\r
+ }\r
+ else if (!checkMsgOverflow(levels, max_data_term, data_weight, max_disc_term, 16.0f))\r
+ {\r
+ msg_type = CV_16S;\r
+ msg_scale = 16.0f;\r
+ }\r
+ else if (!checkMsgOverflow(levels, max_data_term, data_weight, max_disc_term, 10.0f))\r
+ {\r
+ msg_type = CV_16S;\r
+ msg_scale = 10.0f;\r
+ }\r
+ else\r
+ {\r
+ msg_type = CV_32F;\r
+ msg_scale = 1.0f;\r
+ }\r
+ break;\r
+ case StereoBeliefPropagation_GPU::MSG_TYPE_FLOAT:\r
+ msg_type = CV_32F;\r
+ msg_scale = 1.0f;\r
+ break;\r
+ case StereoBeliefPropagation_GPU::MSG_TYPE_SHORT_SCALE_AUTO:\r
+ msg_type = CV_16S;\r
+ if (!checkMsgOverflow(levels, max_data_term, data_weight, max_disc_term, 100.0f))\r
+ msg_scale = 100.0f;\r
+ else if (!checkMsgOverflow(levels, max_data_term, data_weight, max_disc_term, 64.0f))\r
+ msg_scale = 64.0f;\r
+ else if (!checkMsgOverflow(levels, max_data_term, data_weight, max_disc_term, 32.0f))\r
+ msg_scale = 32.0f;\r
+ else if (!checkMsgOverflow(levels, max_data_term, data_weight, max_disc_term, 16.0f))\r
+ msg_scale = 16.0f;\r
+ else\r
+ msg_scale = 10.0f;\r
+ break;\r
+ case StereoBeliefPropagation_GPU::MSG_TYPE_SHORT_SCALE_MANUAL:\r
+ msg_type = CV_16S;\r
+ break;\r
+ default:\r
+ cv::gpu::error("Unsupported message type", __FILE__, __LINE__);\r
+ }\r
+\r
+ u.create(rows * ndisp, cols, msg_type);\r
+ d.create(rows * ndisp, cols, msg_type);\r
+ l.create(rows * ndisp, cols, msg_type);\r
+ r.create(rows * ndisp, cols, msg_type);\r
\r
if (levels & 1)\r
{\r
- u = zero; //can clear less area\r
+ //can clear less area\r
+ u = zero;\r
d = zero;\r
l = zero;\r
r = zero;\r
int less_rows = (rows + 1) / 2;\r
int less_cols = (cols + 1) / 2;\r
\r
- u2.create(less_rows * ndisp, less_cols, CV_32F);\r
- d2.create(less_rows * ndisp, less_cols, CV_32F);\r
- l2.create(less_rows * ndisp, less_cols, CV_32F);\r
- r2.create(less_rows * ndisp, less_cols, CV_32F);\r
+ u2.create(less_rows * ndisp, less_cols, msg_type);\r
+ d2.create(less_rows * ndisp, less_cols, msg_type);\r
+ l2.create(less_rows * ndisp, less_cols, msg_type);\r
+ r2.create(less_rows * ndisp, less_cols, msg_type);\r
\r
if ((levels & 1) == 0)\r
{\r
}\r
}\r
\r
- impl::load_constants(ndisp, disc_cost, data_cost, lambda);\r
+ impl::load_constants(ndisp, max_data_term, msg_scale * data_weight, msg_scale * max_disc_term, msg_scale * disc_single_jump);\r
\r
datas.resize(levels);\r
\r
- AutoBuffer<int> cols_all_buf(levels);\r
- AutoBuffer<int> rows_all_buf(levels);\r
- AutoBuffer<int> iters_all_buf(levels);\r
+ AutoBuffer<int> buf(levels << 1);\r
\r
- int *cols_all = cols_all_buf;\r
- int *rows_all = rows_all_buf;\r
- int *iters_all = iters_all_buf;\r
+ int* cols_all = buf;\r
+ int* rows_all = cols_all + levels;\r
\r
cols_all[0] = cols;\r
rows_all[0] = rows;\r
- iters_all[0] = iters;\r
\r
- datas[0].create(rows * ndisp, cols, CV_32F);\r
- //datas[0] = Scalar(data_cost); //DOTO did in kernel, but not sure if correct\r
+ datas[0].create(rows * ndisp, cols, msg_type);\r
\r
- impl::comp_data_caller(left, right, datas.front(), stream);\r
+ impl::comp_data(msg_type, left, right, datas.front(), stream);\r
\r
for (int i = 1; i < levels; i++)\r
{\r
- cols_all[i] = (cols_all[i-1] + 1)/2;\r
- rows_all[i] = (rows_all[i-1] + 1)/2;\r
-\r
- // this is difference from Felzenszwalb algorithm\r
- // we reduce iters num for each next level\r
- iters_all[i] = max(2 * iters_all[i-1] / 3, 1);\r
+ cols_all[i] = (cols_all[i-1] + 1) / 2;\r
+ rows_all[i] = (rows_all[i-1] + 1) / 2;\r
\r
- datas[i].create(rows_all[i] * ndisp, cols_all[i], CV_32F);\r
+ datas[i].create(rows_all[i] * ndisp, cols_all[i], msg_type);\r
\r
- impl::data_down_kernel_caller(cols_all[i], rows_all[i], rows_all[i-1], datas[i-1], datas[i], stream);\r
+ impl::data_step_down(cols_all[i], rows_all[i], rows_all[i-1], msg_type, datas[i-1], datas[i], stream);\r
}\r
\r
- DevMem2D_<float> mus[] = {u, u2};\r
- DevMem2D_<float> mds[] = {d, d2};\r
- DevMem2D_<float> mrs[] = {r, r2};\r
- DevMem2D_<float> mls[] = {l, l2};\r
+ DevMem2D mus[] = {u, u2};\r
+ DevMem2D mds[] = {d, d2};\r
+ DevMem2D mrs[] = {r, r2};\r
+ DevMem2D mls[] = {l, l2};\r
\r
int mem_idx = (levels & 1) ? 0 : 1;\r
\r
- for (int i = levels - 1; i >= 0; i--) // for lower level we have already computed messages by setting to zero\r
+ for (int i = levels - 1; i >= 0; i--)\r
{\r
+ // for lower level we have already computed messages by setting to zero\r
if (i != levels - 1)\r
- impl::level_up(mem_idx, cols_all[i], rows_all[i], rows_all[i+1], mus, mds, mls, mrs, stream);\r
+ impl::level_up_messages(mem_idx, cols_all[i], rows_all[i], rows_all[i+1], msg_type, mus, mds, mls, mrs, stream);\r
\r
- impl::call_all_iterations(cols_all[i], rows_all[i], iters_all[i], mus[mem_idx], mds[mem_idx], mls[mem_idx], mrs[mem_idx], datas[i], stream);\r
+ impl::calc_all_iterations(cols_all[i], rows_all[i], iters, msg_type, mus[mem_idx], mds[mem_idx], mls[mem_idx], mrs[mem_idx], datas[i], stream);\r
\r
mem_idx = (mem_idx + 1) & 1;\r
}\r
\r
if (disp.empty())\r
- disp.create(rows, cols, CV_32S);\r
+ disp.create(rows, cols, CV_16S);\r
\r
- if (disp.type() == CV_32S)\r
+ if (disp.type() == CV_16S)\r
{\r
disp = zero;\r
- impl::output_caller(u, d, l, r, datas.front(), disp, stream);\r
+ impl::output(msg_type, u, d, l, r, datas.front(), disp, stream);\r
}\r
else\r
{\r
- out.create(rows, cols, CV_32S);\r
+ out.create(rows, cols, CV_16S);\r
out = zero;\r
\r
- impl::output_caller(u, d, l, r, datas.front(), out, stream);\r
+ impl::output(msg_type, u, d, l, r, datas.front(), out, stream);\r
\r
out.convertTo(disp, disp.type());\r
}\r
\r
void cv::gpu::StereoBeliefPropagation_GPU::operator()(const GpuMat& left, const GpuMat& right, GpuMat& disp)\r
{\r
- ::stereo_bp_gpu_operator(ndisp, iters, levels, disc_cost, data_cost, lambda, u, d, l, r, u2, d2, l2, r2, datas, out, left, right, disp, 0);\r
+ ::stereo_bp_gpu_operator(ndisp, iters, levels, max_data_term, data_weight, max_disc_term, disc_single_jump, msg_type, msg_scale, u, d, l, r, u2, d2, l2, r2, datas, out, left, right, disp, 0);\r
}\r
\r
void cv::gpu::StereoBeliefPropagation_GPU::operator()(const GpuMat& left, const GpuMat& right, GpuMat& disp, const CudaStream& stream)\r
{\r
- ::stereo_bp_gpu_operator(ndisp, iters, levels, disc_cost, data_cost, lambda, u, d, l, r, u2, d2, l2, r2, datas, out, left, right, disp, StreamAccessor::getStream(stream));\r
+ ::stereo_bp_gpu_operator(ndisp, iters, levels, max_data_term, data_weight, max_disc_term, disc_single_jump, msg_type, msg_scale, u, d, l, r, u2, d2, l2, r2, datas, out, left, right, disp, StreamAccessor::getStream(stream));\r
}\r
\r
bool cv::gpu::StereoBeliefPropagation_GPU::checkIfGpuCallReasonable()\r
//M*/\r
\r
#include "opencv2/gpu/devmem2d.hpp"\r
+#include "saturate_cast.hpp"\r
#include "safe_call.hpp"\r
\r
using namespace cv::gpu;\r
\r
-static inline int divUp(int a, int b) { return (a % b == 0) ? a/b : a/b + 1; }\r
-\r
#ifndef FLT_MAX\r
#define FLT_MAX 3.402823466e+38F\r
#endif\r
\r
-typedef unsigned char uchar;\r
+///////////////////////////////////////////////////////////////\r
+/////////////////////// load constants ////////////////////////\r
+///////////////////////////////////////////////////////////////\r
\r
namespace beliefpropagation_gpu\r
-{ \r
+{\r
__constant__ int cndisp;\r
- __constant__ float cdisc_cost;\r
- __constant__ float cdata_cost;\r
- __constant__ float clambda;\r
+ __constant__ float cmax_data_term;\r
+ __constant__ float cdata_weight;\r
+ __constant__ float cmax_disc_term;\r
+ __constant__ float cdisc_single_jump;\r
};\r
\r
+namespace cv { namespace gpu { namespace impl {\r
+ void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump)\r
+ {\r
+ cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cndisp, &ndisp, sizeof(int )) );\r
+ cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cmax_data_term, &max_data_term, sizeof(float)) );\r
+ cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cdata_weight, &data_weight, sizeof(float)) );\r
+ cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cmax_disc_term, &max_disc_term, sizeof(float)) );\r
+ cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cdisc_single_jump, &disc_single_jump, sizeof(float)) ); \r
+ }\r
+}}}\r
+\r
///////////////////////////////////////////////////////////////\r
-////////////////// comp data /////////////////////////////////\r
+////////////////////////// comp data //////////////////////////\r
///////////////////////////////////////////////////////////////\r
\r
namespace beliefpropagation_gpu\r
{\r
- __global__ void comp_data_kernel(uchar* l, uchar* r, size_t step, float* data, size_t data_step, int cols, int rows) \r
+ template <typename T>\r
+ __global__ void comp_data(uchar* l, uchar* r, size_t step, T* data, size_t data_step, int cols, int rows) \r
{\r
int x = blockIdx.x * blockDim.x + threadIdx.x;\r
int y = blockIdx.y * blockDim.y + threadIdx.y;\r
\r
- if (y > 0 && y < rows - 1 && x > 0 && x < cols - 1)\r
+ if (y < rows && x < cols)\r
{\r
- uchar *ls = l + y * step + x; \r
- uchar *rs = r + y * step + x; \r
+ uchar* ls = l + y * step + x; \r
+ uchar* rs = r + y * step + x; \r
\r
- float *ds = data + y * data_step + x;\r
+ T* ds = data + y * data_step + x;\r
size_t disp_step = data_step * rows;\r
\r
for (int disp = 0; disp < cndisp; disp++) \r
int re = rs[-disp];\r
float val = abs(le - re);\r
\r
- ds[disp * disp_step] = clambda * fmin(val, cdata_cost);\r
+ ds[disp * disp_step] = saturate_cast<T>(fmin(cdata_weight * val, cdata_weight * cmax_data_term));\r
}\r
else\r
{\r
- ds[disp * disp_step] = cdata_cost;\r
+ ds[disp * disp_step] = saturate_cast<T>(cdata_weight * cmax_data_term);\r
}\r
}\r
}\r
}\r
\r
namespace cv { namespace gpu { namespace impl {\r
- extern "C" void load_constants(int ndisp, float disc_cost, float data_cost, float lambda)\r
- {\r
- cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cndisp, &ndisp, sizeof(ndisp)) );\r
- cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cdisc_cost, &disc_cost, sizeof(disc_cost)) );\r
- cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cdata_cost, &data_cost, sizeof(data_cost)) );\r
- cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::clambda, &lambda, sizeof(lambda)) ); \r
- }\r
-\r
- extern "C" void comp_data_caller(const DevMem2D& l, const DevMem2D& r, DevMem2D_<float> mdata, const cudaStream_t& stream)\r
+ typedef void (*CompDataFunc)(const DevMem2D& l, const DevMem2D& r, DevMem2D mdata, const cudaStream_t& stream);
+
+ template<typename T>
+ void comp_data_(const DevMem2D& l, const DevMem2D& r, DevMem2D mdata, const cudaStream_t& stream)
{\r
dim3 threads(32, 8, 1);\r
dim3 grid(1, 1, 1);\r
\r
grid.x = divUp(l.cols, threads.x);\r
grid.y = divUp(l.rows, threads.y);\r
+ \r
+ beliefpropagation_gpu::comp_data<T><<<grid, threads, 0, stream>>>(l.ptr, r.ptr, l.step, (T*)mdata.ptr, mdata.step/sizeof(T), l.cols, l.rows);
+
+ if (stream == 0)
+ cudaSafeCall( cudaThreadSynchronize() );
+ }\r
\r
- if (stream == 0)\r
- {\r
- beliefpropagation_gpu::comp_data_kernel<<<grid, threads>>>(l.ptr, r.ptr, l.step, mdata.ptr, mdata.step/sizeof(float), l.cols, l.rows);\r
- //cudaSafeCall( cudaThreadSynchronize() );\r
- }\r
- else\r
- {\r
- beliefpropagation_gpu::comp_data_kernel<<<grid, threads, 0, stream>>>(l.ptr, r.ptr, l.step, mdata.ptr, mdata.step/sizeof(float), l.cols, l.rows);\r
- }\r
+ void comp_data(int msgType, const DevMem2D& l, const DevMem2D& r, DevMem2D mdata, const cudaStream_t& stream)\r
+ {\r
+ static CompDataFunc tab[8] =
+ {
+ 0, // uchar
+ 0, // schar
+ 0, // ushort
+ comp_data_<short>, // short
+ 0, // int
+ comp_data_<float>, // float
+ 0, // double
+ 0 // user type
+ };
+
+ CompDataFunc func = tab[msgType];
+ if (func == 0)
+ cv::gpu::error("Unsupported message type", __FILE__, __LINE__);
+ func(l, r, mdata, stream);\r
}\r
}}}\r
\r
///////////////////////////////////////////////////////////////\r
-////////////////// data_step_down ////////////////////////////\r
+//////////////////////// data step down ///////////////////////\r
///////////////////////////////////////////////////////////////\r
\r
namespace beliefpropagation_gpu\r
-{ \r
- __global__ void data_down_kernel(int dst_cols, int dst_rows, int src_rows, float *src, size_t src_step, float *dst, size_t dst_step)\r
+{\r
+ template <typename T>\r
+ __global__ void data_step_down(int dst_cols, int dst_rows, int src_rows, const T* src, size_t src_step, T* dst, size_t dst_step)\r
{\r
int x = blockIdx.x * blockDim.x + threadIdx.x;\r
int y = blockIdx.y * blockDim.y + threadIdx.y;\r
dst_reg += src[d * src_disp_step + src_step * (2*y+0) + (2*x+1)];\r
dst_reg += src[d * src_disp_step + src_step * (2*y+1) + (2*x+1)];\r
\r
- dst[d * dst_disp_step + y * dst_step + x] = dst_reg;\r
+ dst[d * dst_disp_step + y * dst_step + x] = saturate_cast<T>(dst_reg);\r
}\r
}\r
}\r
}\r
\r
namespace cv { namespace gpu { namespace impl {\r
- extern "C" void data_down_kernel_caller(int dst_cols, int dst_rows, int src_rows, const DevMem2D_<float>& src, DevMem2D_<float> dst, const cudaStream_t& stream)\r
+ typedef void (*DataStepDownFunc)(int dst_cols, int dst_rows, int src_rows, const DevMem2D& src, DevMem2D dst, const cudaStream_t& stream);
+
+ template<typename T>
+ void data_step_down_(int dst_cols, int dst_rows, int src_rows, const DevMem2D& src, DevMem2D dst, const cudaStream_t& stream)
{\r
dim3 threads(32, 8, 1);\r
dim3 grid(1, 1, 1);\r
grid.x = divUp(dst_cols, threads.x);\r
grid.y = divUp(dst_rows, threads.y);\r
\r
- if (stream == 0)\r
- {\r
- beliefpropagation_gpu::data_down_kernel<<<grid, threads>>>(dst_cols, dst_rows, src_rows, src.ptr, src.step/sizeof(float), dst.ptr, dst.step/sizeof(float));\r
- //cudaSafeCall( cudaThreadSynchronize() );\r
- }\r
- else\r
- {\r
- beliefpropagation_gpu::data_down_kernel<<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, src.ptr, src.step/sizeof(float), dst.ptr, dst.step/sizeof(float));\r
- }\r
+ beliefpropagation_gpu::data_step_down<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (const T*)src.ptr, src.step/sizeof(T), (T*)dst.ptr, dst.step/sizeof(T));
+
+ if (stream == 0)
+ cudaSafeCall( cudaThreadSynchronize() );
+ }\r
+\r
+ void data_step_down(int dst_cols, int dst_rows, int src_rows, int msgType, const DevMem2D& src, DevMem2D dst, const cudaStream_t& stream)\r
+ {\r
+ static DataStepDownFunc tab[8] =
+ {
+ 0, // uchar
+ 0, // schar
+ 0, // ushort
+ data_step_down_<short>, // short
+ 0, // int
+ data_step_down_<float>, // float
+ 0, // double
+ 0 // user type
+ };
+
+ DataStepDownFunc func = tab[msgType];
+ if (func == 0)
+ cv::gpu::error("Unsupported message type", __FILE__, __LINE__);
+ func(dst_cols, dst_rows, src_rows, src, dst, stream);\r
}\r
}}}\r
\r
///////////////////////////////////////////////////////////////\r
-////////////////// level up messages ////////////////////////\r
+/////////////////// level up messages ////////////////////////\r
///////////////////////////////////////////////////////////////\r
\r
-\r
namespace beliefpropagation_gpu\r
-{ \r
- __global__ void level_up_kernel(int dst_cols, int dst_rows, int src_rows, float *src, size_t src_step, float *dst, size_t dst_step)\r
+{\r
+ template <typename T>\r
+ __global__ void level_up_message(int dst_cols, int dst_rows, int src_rows, const T* src, size_t src_step, T* dst, size_t dst_step)\r
{\r
int x = blockIdx.x * blockDim.x + threadIdx.x;\r
int y = blockIdx.y * blockDim.y + threadIdx.y; \r
const size_t dst_disp_step = dst_step * dst_rows;\r
const size_t src_disp_step = src_step * src_rows;\r
\r
- float *dstr = dst + y * dst_step + x;\r
- float *srcr = src + y/2 * src_step + x/2;\r
+ T* dstr = dst + y * dst_step + x;\r
+ const T* srcr = src + y/2 * src_step + x/2;\r
\r
for (int d = 0; d < cndisp; ++d) \r
dstr[d * dst_disp_step] = srcr[d * src_disp_step];\r
}\r
\r
namespace cv { namespace gpu { namespace impl {\r
- extern "C" void level_up(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2D_<float>* mu, DevMem2D_<float>* md, DevMem2D_<float>* ml, DevMem2D_<float>* mr, const cudaStream_t& stream)\r
+ typedef void (*LevelUpMessagesFunc)(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2D* mus, DevMem2D* mds, DevMem2D* mls, DevMem2D* mrs, const cudaStream_t& stream);
+
+ template<typename T>
+ void level_up_messages_(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2D* mus, DevMem2D* mds, DevMem2D* mls, DevMem2D* mrs, const cudaStream_t& stream)
{\r
dim3 threads(32, 8, 1);\r
dim3 grid(1, 1, 1);\r
\r
int src_idx = (dst_idx + 1) & 1;\r
\r
- if (stream == 0)\r
- {\r
- beliefpropagation_gpu::level_up_kernel<<<grid, threads>>>(dst_cols, dst_rows, src_rows, mu[src_idx].ptr, mu[src_idx].step/sizeof(float), mu[dst_idx].ptr, mu[dst_idx].step/sizeof(float));\r
- beliefpropagation_gpu::level_up_kernel<<<grid, threads>>>(dst_cols, dst_rows, src_rows, md[src_idx].ptr, md[src_idx].step/sizeof(float), md[dst_idx].ptr, md[dst_idx].step/sizeof(float));\r
- beliefpropagation_gpu::level_up_kernel<<<grid, threads>>>(dst_cols, dst_rows, src_rows, ml[src_idx].ptr, ml[src_idx].step/sizeof(float), ml[dst_idx].ptr, ml[dst_idx].step/sizeof(float));\r
- beliefpropagation_gpu::level_up_kernel<<<grid, threads>>>(dst_cols, dst_rows, src_rows, mr[src_idx].ptr, mr[src_idx].step/sizeof(float), mr[dst_idx].ptr, mr[dst_idx].step/sizeof(float));\r
- //cudaSafeCall( cudaThreadSynchronize() );\r
- }\r
- else\r
- {\r
- beliefpropagation_gpu::level_up_kernel<<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, mu[src_idx].ptr, mu[src_idx].step/sizeof(float), mu[dst_idx].ptr, mu[dst_idx].step/sizeof(float));\r
- beliefpropagation_gpu::level_up_kernel<<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, md[src_idx].ptr, md[src_idx].step/sizeof(float), md[dst_idx].ptr, md[dst_idx].step/sizeof(float));\r
- beliefpropagation_gpu::level_up_kernel<<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, ml[src_idx].ptr, ml[src_idx].step/sizeof(float), ml[dst_idx].ptr, ml[dst_idx].step/sizeof(float));\r
- beliefpropagation_gpu::level_up_kernel<<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, mr[src_idx].ptr, mr[src_idx].step/sizeof(float), mr[dst_idx].ptr, mr[dst_idx].step/sizeof(float));\r
- }\r
+ beliefpropagation_gpu::level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (const T*)mus[src_idx].ptr, mus[src_idx].step/sizeof(T), (T*)mus[dst_idx].ptr, mus[dst_idx].step/sizeof(T));\r
+ beliefpropagation_gpu::level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (const T*)mds[src_idx].ptr, mds[src_idx].step/sizeof(T), (T*)mds[dst_idx].ptr, mds[dst_idx].step/sizeof(T));\r
+ beliefpropagation_gpu::level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (const T*)mls[src_idx].ptr, mls[src_idx].step/sizeof(T), (T*)mls[dst_idx].ptr, mls[dst_idx].step/sizeof(T));\r
+ beliefpropagation_gpu::level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (const T*)mrs[src_idx].ptr, mrs[src_idx].step/sizeof(T), (T*)mrs[dst_idx].ptr, mrs[dst_idx].step/sizeof(T));
+
+ if (stream == 0)
+ cudaSafeCall( cudaThreadSynchronize() );
}\r
-}}}\r
\r
+ void level_up_messages(int dst_idx, int dst_cols, int dst_rows, int src_rows, int msgType, DevMem2D* mus, DevMem2D* mds, DevMem2D* mls, DevMem2D* mrs, const cudaStream_t& stream)\r
+ {\r
+ static LevelUpMessagesFunc tab[8] =
+ {
+ 0, // uchar
+ 0, // schar
+ 0, // ushort
+ level_up_messages_<short>, // short
+ 0, // int
+ level_up_messages_<float>, // float
+ 0, // double
+ 0 // user type
+ };
+
+ LevelUpMessagesFunc func = tab[msgType];
+ if (func == 0)
+ cv::gpu::error("Unsupported message type", __FILE__, __LINE__);
+ func(dst_idx, dst_cols, dst_rows, src_rows, mus, mds, mls, mrs, stream);\r
+ }\r
+}}}\r
\r
///////////////////////////////////////////////////////////////\r
-///////////////// Calcs all iterations ///////////////////////\r
+//////////////////// calc all iterations /////////////////////\r
///////////////////////////////////////////////////////////////\r
\r
-\r
namespace beliefpropagation_gpu\r
{\r
- __device__ void calc_min_linear_penalty(float *dst, size_t step)\r
+ template <typename T>\r
+ __device__ void calc_min_linear_penalty(T* dst, size_t step)\r
{\r
float prev = dst[0];\r
float cur;\r
for (int disp = 1; disp < cndisp; ++disp) \r
{\r
- prev += 1.0f;\r
+ prev += cdisc_single_jump;\r
cur = dst[step * disp];\r
if (prev < cur)\r
+ {\r
cur = prev;\r
- dst[step * disp] = prev = cur;\r
+ dst[step * disp] = saturate_cast<T>(prev);\r
+ }\r
+ prev = cur;\r
}\r
\r
prev = dst[(cndisp - 1) * step];\r
for (int disp = cndisp - 2; disp >= 0; disp--) \r
{\r
- prev += 1.0f;\r
+ prev += cdisc_single_jump;\r
cur = dst[step * disp];\r
if (prev < cur)\r
+ {\r
cur = prev;\r
- dst[step * disp] = prev = cur; \r
+ dst[step * disp] = saturate_cast<T>(prev);\r
+ }\r
+ prev = cur; \r
}\r
}\r
\r
- __device__ void message(float *msg1, float *msg2, float *msg3, float *data, float *dst, size_t msg_disp_step, size_t data_disp_step)\r
+ template <typename T>\r
+ __device__ void message(const T* msg1, const T* msg2, const T* msg3, const T* data, T* dst, size_t msg_disp_step, size_t data_disp_step)\r
{\r
float minimum = FLT_MAX;\r
\r
for(int i = 0; i < cndisp; ++i)\r
{\r
- float dst_reg = msg1[msg_disp_step * i] + msg2[msg_disp_step * i] + msg3[msg_disp_step * i] + data[data_disp_step * i];\r
+ float dst_reg = msg1[msg_disp_step * i];\r
+ dst_reg += msg2[msg_disp_step * i];\r
+ dst_reg += msg3[msg_disp_step * i];\r
+ dst_reg += data[data_disp_step * i];\r
\r
if (dst_reg < minimum)\r
minimum = dst_reg;\r
\r
- dst[msg_disp_step * i] = dst_reg;\r
-\r
+ dst[msg_disp_step * i] = saturate_cast<T>(dst_reg);\r
}\r
\r
calc_min_linear_penalty(dst, msg_disp_step);\r
\r
- minimum += cdisc_cost;\r
+ minimum += cmax_disc_term;\r
\r
float sum = 0;\r
for(int i = 0; i < cndisp; ++i)\r
float dst_reg = dst[msg_disp_step * i];\r
if (dst_reg > minimum)\r
{\r
- dst[msg_disp_step * i] = dst_reg = minimum; \r
+ dst_reg = minimum;\r
+ dst[msg_disp_step * i] = saturate_cast<T>(minimum);\r
}\r
sum += dst_reg;\r
} \r
dst[msg_disp_step * i] -= sum;\r
}\r
\r
- __global__ void one_iteration(int t, float* u, float *d, float *l, float *r, size_t msg_step, float *data, size_t data_step, int cols, int rows)\r
+ template <typename T>\r
+ __global__ void one_iteration(int t, T* u, T* d, T* l, T* r, size_t msg_step, const T* data, size_t data_step, int cols, int rows)\r
{\r
int y = blockIdx.y * blockDim.y + threadIdx.y;\r
int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1);\r
\r
if ( (y > 0) && (y < rows - 1) && (x > 0) && (x < cols - 1))\r
{\r
- float *us = u + y * msg_step + x;\r
- float *ds = d + y * msg_step + x;\r
- float *ls = l + y * msg_step + x;\r
- float *rs = r + y * msg_step + x;\r
- float *dt = data + y * data_step + x;\r
+ T* us = u + y * msg_step + x;\r
+ T* ds = d + y * msg_step + x;\r
+ T* ls = l + y * msg_step + x;\r
+ T* rs = r + y * msg_step + x;\r
+ const T* dt = data + y * data_step + x;\r
+\r
size_t msg_disp_step = msg_step * rows;\r
size_t data_disp_step = data_step * rows;\r
\r
}\r
\r
namespace cv { namespace gpu { namespace impl {\r
- extern "C" void call_all_iterations(int cols, int rows, int iters, DevMem2D_<float>& u, DevMem2D_<float>& d, DevMem2D_<float>& l, DevMem2D_<float>& r, const DevMem2D_<float>& data, const cudaStream_t& stream)\r
+ typedef void (*CalcAllIterationFunc)(int cols, int rows, int iters, DevMem2D& u, DevMem2D& d, DevMem2D& l, DevMem2D& r, const DevMem2D& data, const cudaStream_t& stream);
+
+ template<typename T>
+ void calc_all_iterations_(int cols, int rows, int iters, DevMem2D& u, DevMem2D& d, DevMem2D& l, DevMem2D& r, const DevMem2D& data, const cudaStream_t& stream)
{\r
dim3 threads(32, 8, 1);\r
dim3 grid(1, 1, 1);\r
grid.x = divUp(cols, threads.x << 1);\r
grid.y = divUp(rows, threads.y);\r
\r
- if (stream == 0)\r
+ for(int t = 0; t < iters; ++t)\r
{\r
- for(int t = 0; t < iters; ++t)\r
- beliefpropagation_gpu::one_iteration<<<grid, threads>>>(t, u.ptr, d.ptr, l.ptr, r.ptr, u.step/sizeof(float), data.ptr, data.step/sizeof(float), cols, rows);\r
- //cudaSafeCall( cudaThreadSynchronize() );\r
- }\r
- else\r
- {\r
- for(int t = 0; t < iters; ++t)\r
- beliefpropagation_gpu::one_iteration<<<grid, threads, 0, stream>>>(t, u.ptr, d.ptr, l.ptr, r.ptr, u.step/sizeof(float), data.ptr, data.step/sizeof(float), cols, rows);\r
- }\r
+ beliefpropagation_gpu::one_iteration<T><<<grid, threads, 0, stream>>>(t, (T*)u.ptr, (T*)d.ptr, (T*)l.ptr, (T*)r.ptr, u.step/sizeof(T), (const T*)data.ptr, data.step/sizeof(T), cols, rows);
+
+ if (stream == 0)
+ cudaSafeCall( cudaThreadSynchronize() );
+ }
}\r
-}}}\r
\r
+ void calc_all_iterations(int cols, int rows, int iters, int msgType, DevMem2D& u, DevMem2D& d, DevMem2D& l, DevMem2D& r, const DevMem2D& data, const cudaStream_t& stream)\r
+ {\r
+ static CalcAllIterationFunc tab[8] =
+ {
+ 0, // uchar
+ 0, // schar
+ 0, // ushort
+ calc_all_iterations_<short>, // short
+ 0, // int
+ calc_all_iterations_<float>, // float
+ 0, // double
+ 0 // user type
+ };
+
+ CalcAllIterationFunc func = tab[msgType];
+ if (func == 0)
+ cv::gpu::error("Unsupported message type", __FILE__, __LINE__);
+ func(cols, rows, iters, u, d, l, r, data, stream);\r
+ }\r
+}}}\r
\r
///////////////////////////////////////////////////////////////\r
-////////////////// Output caller /////////////////////////////\r
+/////////////////////////// output ////////////////////////////\r
///////////////////////////////////////////////////////////////\r
\r
namespace beliefpropagation_gpu\r
-{ \r
- __global__ void output(int cols, int rows, float *u, float *d, float *l, float *r, float* data, size_t step, int *disp, size_t res_step) \r
+{\r
+ template <typename T>\r
+ __global__ void output(int cols, int rows, const T* u, const T* d, const T* l, const T* r, const T* data, size_t step, short* disp, size_t res_step) \r
{ \r
int x = blockIdx.x * blockDim.x + threadIdx.x;\r
int y = blockIdx.y * blockDim.y + threadIdx.y;\r
\r
if (y > 0 && y < rows - 1 && x > 0 && x < cols - 1)\r
{\r
- float *us = u + (y + 1) * step + x;\r
- float *ds = d + (y - 1) * step + x;\r
- float *ls = l + y * step + (x + 1);\r
- float *rs = r + y * step + (x - 1);\r
- float *dt = data + y * step + x;\r
+ const T* us = u + (y + 1) * step + x;\r
+ const T* ds = d + (y - 1) * step + x;\r
+ const T* ls = l + y * step + (x + 1);\r
+ const T* rs = r + y * step + (x - 1);\r
+ const T* dt = data + y * step + x;\r
\r
size_t disp_step = rows * step;\r
\r
float best_val = FLT_MAX;\r
for (int d = 0; d < cndisp; ++d) \r
{\r
- float val = us[d * disp_step] + ds[d * disp_step] + ls[d * disp_step] + rs[d * disp_step] + dt[d * disp_step];\r
+ float val = us[d * disp_step];\r
+ val += ds[d * disp_step];\r
+ val += ls[d * disp_step];\r
+ val += rs[d * disp_step];\r
+ val += dt[d * disp_step];\r
\r
if (val < best_val) \r
{\r
}\r
}\r
\r
- disp[res_step * y + x] = best; \r
+ disp[res_step * y + x] = saturate_cast<short>(best);\r
}\r
}\r
}\r
\r
namespace cv { namespace gpu { namespace impl {\r
- extern "C" void output_caller(const DevMem2D_<float>& u, const DevMem2D_<float>& d, const DevMem2D_<float>& l, const DevMem2D_<float>& r, const DevMem2D_<float>& data, DevMem2D_<int> disp, const cudaStream_t& stream)\r
- { \r
+ typedef void (*OutputFunc)(const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, DevMem2D disp, const cudaStream_t& stream);
+
+ template<typename T>
+ void output_(const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, DevMem2D disp, const cudaStream_t& stream)
+ {\r
dim3 threads(32, 8, 1);\r
dim3 grid(1, 1, 1);\r
\r
grid.x = divUp(disp.cols, threads.x);\r
grid.y = divUp(disp.rows, threads.y);\r
\r
- if (stream == 0)\r
- {\r
- beliefpropagation_gpu::output<<<grid, threads>>>(disp.cols, disp.rows, u.ptr, d.ptr, l.ptr, r.ptr, data.ptr, u.step/sizeof(float), disp.ptr, disp.step/sizeof(int));\r
- cudaSafeCall( cudaThreadSynchronize() );\r
- }\r
- else\r
- { \r
- beliefpropagation_gpu::output<<<grid, threads, 0, stream>>>(disp.cols, disp.rows, u.ptr, d.ptr, l.ptr, r.ptr, data.ptr, u.step/sizeof(float), disp.ptr, disp.step/sizeof(int));\r
- }\r
+ beliefpropagation_gpu::output<T><<<grid, threads, 0, stream>>>(disp.cols, disp.rows, (const T*)u.ptr, (const T*)d.ptr, (const T*)l.ptr, (const T*)r.ptr, (const T*)data.ptr, u.step/sizeof(T), (short*)disp.ptr, disp.step/sizeof(short));
+
+ if (stream == 0)
+ cudaSafeCall( cudaThreadSynchronize() );
+ }\r
+\r
+ void output(int msgType, const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, DevMem2D disp, const cudaStream_t& stream)\r
+ { \r
+ static OutputFunc tab[8] =
+ {
+ 0, // uchar
+ 0, // schar
+ 0, // ushort
+ output_<short>, // short
+ 0, // int
+ output_<float>, // float
+ 0, // double
+ 0 // user type
+ };
+
+ OutputFunc func = tab[msgType];
+ if (func == 0)
+ cv::gpu::error("Unsupported message type", __FILE__, __LINE__);
+ func(u, d, l, r, data, disp, stream);\r
}\r
}}}
\ No newline at end of file
{\r
namespace gpu\r
{\r
- template<typename _Tp> __device__ _Tp saturate_cast(uchar v) { return _Tp(v); }\r
- template<typename _Tp> __device__ _Tp saturate_cast(schar v) { return _Tp(v); }\r
- template<typename _Tp> __device__ _Tp saturate_cast(ushort v) { return _Tp(v); }\r
- template<typename _Tp> __device__ _Tp saturate_cast(short v) { return _Tp(v); }\r
- template<typename _Tp> __device__ _Tp saturate_cast(uint v) { return _Tp(v); }\r
- template<typename _Tp> __device__ _Tp saturate_cast(int v) { return _Tp(v); }\r
- template<typename _Tp> __device__ _Tp saturate_cast(float v) { return _Tp(v); }\r
- template<typename _Tp> __device__ _Tp saturate_cast(double v) { return _Tp(v); }\r
+ // To fix link error: this func already defined in other obj file\r
+ namespace \r
+ {\r
+ template<typename _Tp> __device__ _Tp saturate_cast(uchar v) { return _Tp(v); }\r
+ template<typename _Tp> __device__ _Tp saturate_cast(schar v) { return _Tp(v); }\r
+ template<typename _Tp> __device__ _Tp saturate_cast(ushort v) { return _Tp(v); }\r
+ template<typename _Tp> __device__ _Tp saturate_cast(short v) { return _Tp(v); }\r
+ template<typename _Tp> __device__ _Tp saturate_cast(uint v) { return _Tp(v); }\r
+ template<typename _Tp> __device__ _Tp saturate_cast(int v) { return _Tp(v); }\r
+ template<typename _Tp> __device__ _Tp saturate_cast(float v) { return _Tp(v); }\r
+ template<typename _Tp> __device__ _Tp saturate_cast(double v) { return _Tp(v); }\r
\r
- template<> __device__ uchar saturate_cast<uchar>(schar v)\r
- { return (uchar)max((int)v, 0); }\r
- template<> __device__ uchar saturate_cast<uchar>(ushort v)\r
- { return (uchar)min((uint)v, (uint)UCHAR_MAX); }\r
- template<> __device__ uchar saturate_cast<uchar>(int v)\r
- { return (uchar)((uint)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); }\r
- template<> __device__ uchar saturate_cast<uchar>(uint v)\r
- { return (uchar)min(v, (uint)UCHAR_MAX); }\r
- template<> __device__ uchar saturate_cast<uchar>(short v)\r
- { return saturate_cast<uchar>((uint)v); }\r
+ template<> __device__ uchar saturate_cast<uchar>(schar v)\r
+ { return (uchar)max((int)v, 0); }\r
+ template<> __device__ uchar saturate_cast<uchar>(ushort v)\r
+ { return (uchar)min((uint)v, (uint)UCHAR_MAX); }\r
+ template<> __device__ uchar saturate_cast<uchar>(int v)\r
+ { return (uchar)((uint)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); }\r
+ template<> __device__ uchar saturate_cast<uchar>(uint v)\r
+ { return (uchar)min(v, (uint)UCHAR_MAX); }\r
+ template<> __device__ uchar saturate_cast<uchar>(short v)\r
+ { return saturate_cast<uchar>((uint)v); }\r
\r
- template<> __device__ uchar saturate_cast<uchar>(float v)\r
- { int iv = __float2int_rn(v); return saturate_cast<uchar>(iv); }\r
- template<> __device__ uchar saturate_cast<uchar>(double v)\r
- {\r
- #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130\r
- int iv = __double2int_rn(v); return saturate_cast<uchar>(iv);\r
- #else\r
- return saturate_cast<uchar>((float)v);\r
- #endif\r
- }\r
+ template<> __device__ uchar saturate_cast<uchar>(float v)\r
+ { int iv = __float2int_rn(v); return saturate_cast<uchar>(iv); }\r
+ template<> __device__ uchar saturate_cast<uchar>(double v)\r
+ {\r
+ #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130\r
+ int iv = __double2int_rn(v); return saturate_cast<uchar>(iv);\r
+ #else\r
+ return saturate_cast<uchar>((float)v);\r
+ #endif\r
+ }\r
\r
- template<> __device__ schar saturate_cast<schar>(uchar v)\r
- { return (schar)min((int)v, SCHAR_MAX); }\r
- template<> __device__ schar saturate_cast<schar>(ushort v)\r
- { return (schar)min((uint)v, (uint)SCHAR_MAX); }\r
- template<> __device__ schar saturate_cast<schar>(int v)\r
- {\r
- return (schar)((uint)(v-SCHAR_MIN) <= (uint)UCHAR_MAX ?\r
- v : v > 0 ? SCHAR_MAX : SCHAR_MIN);\r
- }\r
- template<> __device__ schar saturate_cast<schar>(short v)\r
- { return saturate_cast<schar>((int)v); }\r
- template<> __device__ schar saturate_cast<schar>(uint v)\r
- { return (schar)min(v, (uint)SCHAR_MAX); }\r
+ template<> __device__ schar saturate_cast<schar>(uchar v)\r
+ { return (schar)min((int)v, SCHAR_MAX); }\r
+ template<> __device__ schar saturate_cast<schar>(ushort v)\r
+ { return (schar)min((uint)v, (uint)SCHAR_MAX); }\r
+ template<> __device__ schar saturate_cast<schar>(int v)\r
+ {\r
+ return (schar)((uint)(v-SCHAR_MIN) <= (uint)UCHAR_MAX ?\r
+ v : v > 0 ? SCHAR_MAX : SCHAR_MIN);\r
+ }\r
+ template<> __device__ schar saturate_cast<schar>(short v)\r
+ { return saturate_cast<schar>((int)v); }\r
+ template<> __device__ schar saturate_cast<schar>(uint v)\r
+ { return (schar)min(v, (uint)SCHAR_MAX); }\r
\r
- template<> __device__ schar saturate_cast<schar>(float v)\r
- { int iv = __float2int_rn(v); return saturate_cast<schar>(iv); }\r
- template<> __device__ schar saturate_cast<schar>(double v)\r
- { \r
- #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130\r
- int iv = __double2int_rn(v); return saturate_cast<schar>(iv);\r
- #else\r
- return saturate_cast<schar>((float)v);\r
- #endif\r
- }\r
+ template<> __device__ schar saturate_cast<schar>(float v)\r
+ { int iv = __float2int_rn(v); return saturate_cast<schar>(iv); }\r
+ template<> __device__ schar saturate_cast<schar>(double v)\r
+ { \r
+ #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130\r
+ int iv = __double2int_rn(v); return saturate_cast<schar>(iv);\r
+ #else\r
+ return saturate_cast<schar>((float)v);\r
+ #endif\r
+ }\r
\r
- template<> __device__ ushort saturate_cast<ushort>(schar v)\r
- { return (ushort)max((int)v, 0); }\r
- template<> __device__ ushort saturate_cast<ushort>(short v)\r
- { return (ushort)max((int)v, 0); }\r
- template<> __device__ ushort saturate_cast<ushort>(int v)\r
- { return (ushort)((uint)v <= (uint)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); }\r
- template<> __device__ ushort saturate_cast<ushort>(uint v)\r
- { return (ushort)min(v, (uint)USHRT_MAX); }\r
- template<> __device__ ushort saturate_cast<ushort>(float v)\r
- { int iv = __float2int_rn(v); return saturate_cast<ushort>(iv); }\r
- template<> __device__ ushort saturate_cast<ushort>(double v)\r
- { \r
- #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130\r
- int iv = __double2int_rn(v); return saturate_cast<ushort>(iv);\r
- #else\r
- return saturate_cast<ushort>((float)v);\r
- #endif\r
- }\r
+ template<> __device__ ushort saturate_cast<ushort>(schar v)\r
+ { return (ushort)max((int)v, 0); }\r
+ template<> __device__ ushort saturate_cast<ushort>(short v)\r
+ { return (ushort)max((int)v, 0); }\r
+ template<> __device__ ushort saturate_cast<ushort>(int v)\r
+ { return (ushort)((uint)v <= (uint)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); }\r
+ template<> __device__ ushort saturate_cast<ushort>(uint v)\r
+ { return (ushort)min(v, (uint)USHRT_MAX); }\r
+ template<> __device__ ushort saturate_cast<ushort>(float v)\r
+ { int iv = __float2int_rn(v); return saturate_cast<ushort>(iv); }\r
+ template<> __device__ ushort saturate_cast<ushort>(double v)\r
+ { \r
+ #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130\r
+ int iv = __double2int_rn(v); return saturate_cast<ushort>(iv);\r
+ #else\r
+ return saturate_cast<ushort>((float)v);\r
+ #endif\r
+ }\r
\r
- template<> __device__ short saturate_cast<short>(ushort v)\r
- { return (short)min((int)v, SHRT_MAX); }\r
- template<> __device__ short saturate_cast<short>(int v)\r
- {\r
- return (short)((uint)(v - SHRT_MIN) <= (uint)USHRT_MAX ?\r
- v : v > 0 ? SHRT_MAX : SHRT_MIN);\r
- }\r
- template<> __device__ short saturate_cast<short>(uint v)\r
- { return (short)min(v, (uint)SHRT_MAX); }\r
- template<> __device__ short saturate_cast<short>(float v)\r
- { int iv = __float2int_rn(v); return saturate_cast<short>(iv); }\r
- template<> __device__ short saturate_cast<short>(double v)\r
- { \r
- #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130\r
- int iv = __double2int_rn(v); return saturate_cast<short>(iv);\r
- #else\r
- return saturate_cast<short>((float)v);\r
- #endif\r
- }\r
+ template<> __device__ short saturate_cast<short>(ushort v)\r
+ { return (short)min((int)v, SHRT_MAX); }\r
+ template<> __device__ short saturate_cast<short>(int v)\r
+ {\r
+ return (short)((uint)(v - SHRT_MIN) <= (uint)USHRT_MAX ?\r
+ v : v > 0 ? SHRT_MAX : SHRT_MIN);\r
+ }\r
+ template<> __device__ short saturate_cast<short>(uint v)\r
+ { return (short)min(v, (uint)SHRT_MAX); }\r
+ template<> __device__ short saturate_cast<short>(float v)\r
+ { int iv = __float2int_rn(v); return saturate_cast<short>(iv); }\r
+ template<> __device__ short saturate_cast<short>(double v)\r
+ { \r
+ #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130\r
+ int iv = __double2int_rn(v); return saturate_cast<short>(iv);\r
+ #else\r
+ return saturate_cast<short>((float)v);\r
+ #endif\r
+ }\r
\r
- template<> __device__ int saturate_cast<int>(float v) { return __float2int_rn(v); }\r
- template<> __device__ int saturate_cast<int>(double v) \r
- {\r
- #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130 \r
- return __double2int_rn(v);\r
- #else\r
- return saturate_cast<int>((float)v);\r
- #endif\r
- }\r
+ template<> __device__ int saturate_cast<int>(float v) { return __float2int_rn(v); }\r
+ template<> __device__ int saturate_cast<int>(double v) \r
+ {\r
+ #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130 \r
+ return __double2int_rn(v);\r
+ #else\r
+ return saturate_cast<int>((float)v);\r
+ #endif\r
+ }\r
\r
- template<> __device__ uint saturate_cast<uint>(float v){ return __float2uint_rn(v); }\r
- template<> __device__ uint saturate_cast<uint>(double v) \r
- { \r
- #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130\r
- return __double2uint_rn(v);\r
- #else\r
- return saturate_cast<uint>((float)v);\r
- #endif\r
+ template<> __device__ uint saturate_cast<uint>(float v){ return __float2uint_rn(v); }\r
+ template<> __device__ uint saturate_cast<uint>(double v) \r
+ { \r
+ #if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130\r
+ return __double2uint_rn(v);\r
+ #else\r
+ return saturate_cast<uint>((float)v);\r
+ #endif\r
+ }\r
}\r
}\r
}\r