1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other GpuMaterials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or bpied warranties, including, but not limited to, the bpied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
44 #include "mcwutil.hpp"
47 using namespace cv::ocl;
49 #if !defined (HAVE_OPENCL)
51 void cv::ocl::PyrLKOpticalFlow::sparse(const oclMat &, const oclMat &, const oclMat &, oclMat &, oclMat &, oclMat &) { }
52 void cv::ocl::PyrLKOpticalFlow::dense(const oclMat &, const oclMat &, oclMat &, oclMat &, oclMat *) { }
54 #else /* !defined (HAVE_OPENCL) */
60 ///////////////////////////OpenCL kernel strings///////////////////////////
61 extern const char *pyrlk;
62 extern const char *pyrlk_no_image;
63 extern const char *operator_setTo;
64 extern const char *operator_convertTo;
65 extern const char *operator_copyToM;
66 extern const char *arithm_mul;
67 extern const char *pyr_down;
88 void calcPatchSize(cv::Size winSize, int cn, dim3 &block, dim3 &patch, bool isDeviceArch11)
92 if (winSize.width > 32 && winSize.width > 2 * winSize.height)
94 block.x = isDeviceArch11 ? 16 : 32;
100 block.y = isDeviceArch11 ? 8 : 16;
103 patch.x = (winSize.width + block.x - 1) / block.x;
104 patch.y = (winSize.height + block.y - 1) / block.y;
106 block.z = patch.z = 1;
110 inline int divUp(int total, int grain)
112 return (total + grain - 1) / grain;
115 ///////////////////////////////////////////////////////////////////////////
116 //////////////////////////////// ConvertTo ////////////////////////////////
117 ///////////////////////////////////////////////////////////////////////////
118 static void convert_run_cus(const oclMat &src, oclMat &dst, double alpha, double beta)
120 string kernelName = "convert_to_S";
122 idxStr << src.depth();
123 kernelName += idxStr.str();
124 float alpha_f = (float)alpha, beta_f = (float)beta;
125 CV_DbgAssert(src.rows == dst.rows && src.cols == dst.cols);
126 vector<pair<size_t , const void *> > args;
127 size_t localThreads[3] = {16, 16, 1};
128 size_t globalThreads[3];
129 globalThreads[0] = (dst.cols + localThreads[0] - 1) / localThreads[0] * localThreads[0];
130 globalThreads[1] = (dst.rows + localThreads[1] - 1) / localThreads[1] * localThreads[1];
131 globalThreads[2] = 1;
132 int dststep_in_pixel = dst.step / dst.elemSize(), dstoffset_in_pixel = dst.offset / dst.elemSize();
133 int srcstep_in_pixel = src.step / src.elemSize(), srcoffset_in_pixel = src.offset / src.elemSize();
134 if(dst.type() == CV_8UC1)
136 globalThreads[0] = ((dst.cols + 4) / 4 + localThreads[0]) / localThreads[0] * localThreads[0];
138 args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data ));
139 args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data ));
140 args.push_back( make_pair( sizeof(cl_int) , (void *)&src.cols ));
141 args.push_back( make_pair( sizeof(cl_int) , (void *)&src.rows ));
142 args.push_back( make_pair( sizeof(cl_int) , (void *)&srcstep_in_pixel ));
143 args.push_back( make_pair( sizeof(cl_int) , (void *)&srcoffset_in_pixel ));
144 args.push_back( make_pair( sizeof(cl_int) , (void *)&dststep_in_pixel ));
145 args.push_back( make_pair( sizeof(cl_int) , (void *)&dstoffset_in_pixel ));
146 args.push_back( make_pair( sizeof(cl_float) , (void *)&alpha_f ));
147 args.push_back( make_pair( sizeof(cl_float) , (void *)&beta_f ));
148 openCLExecuteKernel2(dst.clCxt , &operator_convertTo, kernelName, globalThreads,
149 localThreads, args, dst.oclchannels(), dst.depth(), CLFLUSH);
151 void convertTo( const oclMat &src, oclMat &m, int rtype, double alpha = 1, double beta = 0 );
152 void convertTo( const oclMat &src, oclMat &dst, int rtype, double alpha, double beta )
154 //cout << "cv::ocl::oclMat::convertTo()" << endl;
156 bool noScale = fabs(alpha - 1) < std::numeric_limits<double>::epsilon()
157 && fabs(beta) < std::numeric_limits<double>::epsilon();
162 rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), src.oclchannels());
164 int sdepth = src.depth(), ddepth = CV_MAT_DEPTH(rtype);
165 if( sdepth == ddepth && noScale )
172 const oclMat *psrc = &src;
173 if( sdepth != ddepth && psrc == &dst )
174 psrc = &(temp = src);
176 dst.create( src.size(), rtype );
177 convert_run_cus(*psrc, dst, alpha, beta);
180 ///////////////////////////////////////////////////////////////////////////
181 //////////////////////////////// setTo ////////////////////////////////////
182 ///////////////////////////////////////////////////////////////////////////
183 //oclMat &operator = (const Scalar &s)
185 // //cout << "cv::ocl::oclMat::=" << endl;
189 static void set_to_withoutmask_run_cus(const oclMat &dst, const Scalar &scalar, string kernelName)
191 vector<pair<size_t , const void *> > args;
193 size_t localThreads[3] = {16, 16, 1};
194 size_t globalThreads[3];
195 globalThreads[0] = (dst.cols + localThreads[0] - 1) / localThreads[0] * localThreads[0];
196 globalThreads[1] = (dst.rows + localThreads[1] - 1) / localThreads[1] * localThreads[1];
197 globalThreads[2] = 1;
198 int step_in_pixel = dst.step / dst.elemSize(), offset_in_pixel = dst.offset / dst.elemSize();
199 if(dst.type() == CV_8UC1)
201 globalThreads[0] = ((dst.cols + 4) / 4 + localThreads[0] - 1) / localThreads[0] * localThreads[0];
203 char compile_option[32];
217 val.uval.s[0] = saturate_cast<uchar>(scalar.val[0]);
218 val.uval.s[1] = saturate_cast<uchar>(scalar.val[1]);
219 val.uval.s[2] = saturate_cast<uchar>(scalar.val[2]);
220 val.uval.s[3] = saturate_cast<uchar>(scalar.val[3]);
221 switch(dst.oclchannels())
224 sprintf(compile_option, "-D GENTYPE=uchar");
225 args.push_back( make_pair( sizeof(cl_uchar) , (void *)&val.uval.s[0] ));
228 sprintf(compile_option, "-D GENTYPE=uchar4");
229 args.push_back( make_pair( sizeof(cl_uchar4) , (void *)&val.uval ));
232 CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
236 val.cval.s[0] = saturate_cast<char>(scalar.val[0]);
237 val.cval.s[1] = saturate_cast<char>(scalar.val[1]);
238 val.cval.s[2] = saturate_cast<char>(scalar.val[2]);
239 val.cval.s[3] = saturate_cast<char>(scalar.val[3]);
240 switch(dst.oclchannels())
243 sprintf(compile_option, "-D GENTYPE=char");
244 args.push_back( make_pair( sizeof(cl_char) , (void *)&val.cval.s[0] ));
247 sprintf(compile_option, "-D GENTYPE=char4");
248 args.push_back( make_pair( sizeof(cl_char4) , (void *)&val.cval ));
251 CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
255 val.usval.s[0] = saturate_cast<ushort>(scalar.val[0]);
256 val.usval.s[1] = saturate_cast<ushort>(scalar.val[1]);
257 val.usval.s[2] = saturate_cast<ushort>(scalar.val[2]);
258 val.usval.s[3] = saturate_cast<ushort>(scalar.val[3]);
259 switch(dst.oclchannels())
262 sprintf(compile_option, "-D GENTYPE=ushort");
263 args.push_back( make_pair( sizeof(cl_ushort) , (void *)&val.usval.s[0] ));
266 sprintf(compile_option, "-D GENTYPE=ushort4");
267 args.push_back( make_pair( sizeof(cl_ushort4) , (void *)&val.usval ));
270 CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
274 val.shval.s[0] = saturate_cast<short>(scalar.val[0]);
275 val.shval.s[1] = saturate_cast<short>(scalar.val[1]);
276 val.shval.s[2] = saturate_cast<short>(scalar.val[2]);
277 val.shval.s[3] = saturate_cast<short>(scalar.val[3]);
278 switch(dst.oclchannels())
281 sprintf(compile_option, "-D GENTYPE=short");
282 args.push_back( make_pair( sizeof(cl_short) , (void *)&val.shval.s[0] ));
285 sprintf(compile_option, "-D GENTYPE=short4");
286 args.push_back( make_pair( sizeof(cl_short4) , (void *)&val.shval ));
289 CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
293 val.ival.s[0] = saturate_cast<int>(scalar.val[0]);
294 val.ival.s[1] = saturate_cast<int>(scalar.val[1]);
295 val.ival.s[2] = saturate_cast<int>(scalar.val[2]);
296 val.ival.s[3] = saturate_cast<int>(scalar.val[3]);
297 switch(dst.oclchannels())
300 sprintf(compile_option, "-D GENTYPE=int");
301 args.push_back( make_pair( sizeof(cl_int) , (void *)&val.ival.s[0] ));
304 sprintf(compile_option, "-D GENTYPE=int2");
306 i2val.s[0] = val.ival.s[0];
307 i2val.s[1] = val.ival.s[1];
308 args.push_back( make_pair( sizeof(cl_int2) , (void *)&i2val ));
311 sprintf(compile_option, "-D GENTYPE=int4");
312 args.push_back( make_pair( sizeof(cl_int4) , (void *)&val.ival ));
315 CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
319 val.fval.s[0] = (float)scalar.val[0];
320 val.fval.s[1] = (float)scalar.val[1];
321 val.fval.s[2] = (float)scalar.val[2];
322 val.fval.s[3] = (float)scalar.val[3];
323 switch(dst.oclchannels())
326 sprintf(compile_option, "-D GENTYPE=float");
327 args.push_back( make_pair( sizeof(cl_float) , (void *)&val.fval.s[0] ));
330 sprintf(compile_option, "-D GENTYPE=float4");
331 args.push_back( make_pair( sizeof(cl_float4) , (void *)&val.fval ));
334 CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
338 val.dval.s[0] = scalar.val[0];
339 val.dval.s[1] = scalar.val[1];
340 val.dval.s[2] = scalar.val[2];
341 val.dval.s[3] = scalar.val[3];
342 switch(dst.oclchannels())
345 sprintf(compile_option, "-D GENTYPE=double");
346 args.push_back( make_pair( sizeof(cl_double) , (void *)&val.dval.s[0] ));
349 sprintf(compile_option, "-D GENTYPE=double4");
350 args.push_back( make_pair( sizeof(cl_double4) , (void *)&val.dval ));
353 CV_Error(CV_StsUnsupportedFormat, "unsupported channels");
357 CV_Error(CV_StsUnsupportedFormat, "unknown depth");
359 #ifdef CL_VERSION_1_2
360 if(dst.offset == 0 && dst.cols == dst.wholecols)
362 clEnqueueFillBuffer(dst.clCxt->impl->clCmdQueue, (cl_mem)dst.data, args[0].second, args[0].first, 0, dst.step * dst.rows, 0, NULL, NULL);
366 args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data ));
367 args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.cols ));
368 args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.rows ));
369 args.push_back( make_pair( sizeof(cl_int) , (void *)&step_in_pixel ));
370 args.push_back( make_pair( sizeof(cl_int) , (void *)&offset_in_pixel));
371 openCLExecuteKernel2(dst.clCxt , &operator_setTo, kernelName, globalThreads,
372 localThreads, args, -1, -1, compile_option, CLFLUSH);
375 args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data ));
376 args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.cols ));
377 args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.rows ));
378 args.push_back( make_pair( sizeof(cl_int) , (void *)&step_in_pixel ));
379 args.push_back( make_pair( sizeof(cl_int) , (void *)&offset_in_pixel));
380 openCLExecuteKernel2(dst.clCxt , &operator_setTo, kernelName, globalThreads,
381 localThreads, args, -1, -1, compile_option, CLFLUSH);
385 static oclMat &setTo(oclMat &src, const Scalar &scalar)
387 CV_Assert( src.depth() >= 0 && src.depth() <= 6 );
388 CV_DbgAssert( !src.empty());
390 if(src.type() == CV_8UC1)
392 set_to_withoutmask_run_cus(src, scalar, "set_to_without_mask_C1_D0");
396 set_to_withoutmask_run_cus(src, scalar, "set_to_without_mask");
402 ///////////////////////////////////////////////////////////////////////////
403 ////////////////////////////////// CopyTo /////////////////////////////////
404 ///////////////////////////////////////////////////////////////////////////
405 // static void copy_to_with_mask_cus(const oclMat &src, oclMat &dst, const oclMat &mask, string kernelName)
407 // CV_DbgAssert( dst.rows == mask.rows && dst.cols == mask.cols &&
408 // src.rows == dst.rows && src.cols == dst.cols
409 // && mask.type() == CV_8UC1);
411 // vector<pair<size_t , const void *> > args;
413 // std::string string_types[4][7] = {{"uchar", "char", "ushort", "short", "int", "float", "double"},
414 // {"uchar2", "char2", "ushort2", "short2", "int2", "float2", "double2"},
415 // {"uchar3", "char3", "ushort3", "short3", "int3", "float3", "double3"},
416 // {"uchar4", "char4", "ushort4", "short4", "int4", "float4", "double4"}
418 // char compile_option[32];
419 // sprintf(compile_option, "-D GENTYPE=%s", string_types[dst.oclchannels() - 1][dst.depth()].c_str());
420 // size_t localThreads[3] = {16, 16, 1};
421 // size_t globalThreads[3];
423 // globalThreads[0] = divUp(dst.cols, localThreads[0]) * localThreads[0];
424 // globalThreads[1] = divUp(dst.rows, localThreads[1]) * localThreads[1];
425 // globalThreads[2] = 1;
427 // int dststep_in_pixel = dst.step / dst.elemSize(), dstoffset_in_pixel = dst.offset / dst.elemSize();
428 // int srcstep_in_pixel = src.step / src.elemSize(), srcoffset_in_pixel = src.offset / src.elemSize();
430 // args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data ));
431 // args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data ));
432 // args.push_back( make_pair( sizeof(cl_mem) , (void *)&mask.data ));
433 // args.push_back( make_pair( sizeof(cl_int) , (void *)&src.cols ));
434 // args.push_back( make_pair( sizeof(cl_int) , (void *)&src.rows ));
435 // args.push_back( make_pair( sizeof(cl_int) , (void *)&srcstep_in_pixel ));
436 // args.push_back( make_pair( sizeof(cl_int) , (void *)&srcoffset_in_pixel ));
437 // args.push_back( make_pair( sizeof(cl_int) , (void *)&dststep_in_pixel ));
438 // args.push_back( make_pair( sizeof(cl_int) , (void *)&dstoffset_in_pixel ));
439 // args.push_back( make_pair( sizeof(cl_int) , (void *)&mask.step ));
440 // args.push_back( make_pair( sizeof(cl_int) , (void *)&mask.offset ));
442 // openCLExecuteKernel2(dst.clCxt , &operator_copyToM, kernelName, globalThreads,
443 // localThreads, args, -1, -1, compile_option, CLFLUSH);
446 static void copyTo(const oclMat &src, oclMat &m )
448 CV_DbgAssert(!src.empty());
449 m.create(src.size(), src.type());
450 openCLCopyBuffer2D(src.clCxt, m.data, m.step, m.offset,
451 src.data, src.step, src.cols * src.elemSize(), src.rows, src.offset);
454 // static void copyTo(const oclMat &src, oclMat &mat, const oclMat &mask)
462 // mat.create(src.size(), src.type());
463 // copy_to_with_mask_cus(src, mat, mask, "copy_to_with_mask");
467 static void arithmetic_run(const oclMat &src1, oclMat &dst, string kernelName, const char **kernelString, void *_scalar)
469 if(src1.clCxt -> impl -> double_support == 0 && src1.type() == CV_64F)
471 CV_Error(CV_GpuNotSupported, "Selected device don't support double\r\n");
475 //dst.create(src1.size(), src1.type());
476 //CV_Assert(src1.cols == src2.cols && src2.cols == dst.cols &&
477 // src1.rows == src2.rows && src2.rows == dst.rows);
478 CV_Assert(src1.cols == dst.cols &&
479 src1.rows == dst.rows);
481 CV_Assert(src1.type() == dst.type());
482 CV_Assert(src1.depth() != CV_8S);
484 Context *clCxt = src1.clCxt;
485 //int channels = dst.channels();
486 //int depth = dst.depth();
488 //int vector_lengths[4][7] = {{4, 0, 4, 4, 1, 1, 1},
489 // {4, 0, 4, 4, 1, 1, 1},
490 // {4, 0, 4, 4, 1, 1, 1},
491 // {4, 0, 4, 4, 1, 1, 1}
494 //size_t vector_length = vector_lengths[channels-1][depth];
495 //int offset_cols = (dst.offset / dst.elemSize1()) & (vector_length - 1);
496 //int cols = divUp(dst.cols * channels + offset_cols, vector_length);
498 size_t localThreads[3] = { 16, 16, 1 };
499 //size_t globalThreads[3] = { divUp(cols, localThreads[0]) * localThreads[0],
500 // divUp(dst.rows, localThreads[1]) * localThreads[1],
503 size_t globalThreads[3] = { src1.cols,
508 int dst_step1 = dst.cols * dst.elemSize();
509 vector<pair<size_t , const void *> > args;
510 args.push_back( make_pair( sizeof(cl_mem), (void *)&src1.data ));
511 args.push_back( make_pair( sizeof(cl_int), (void *)&src1.step ));
512 args.push_back( make_pair( sizeof(cl_int), (void *)&src1.offset ));
513 //args.push_back( make_pair( sizeof(cl_mem), (void *)&src2.data ));
514 //args.push_back( make_pair( sizeof(cl_int), (void *)&src2.step ));
515 //args.push_back( make_pair( sizeof(cl_int), (void *)&src2.offset ));
516 args.push_back( make_pair( sizeof(cl_mem), (void *)&dst.data ));
517 args.push_back( make_pair( sizeof(cl_int), (void *)&dst.step ));
518 args.push_back( make_pair( sizeof(cl_int), (void *)&dst.offset ));
519 args.push_back( make_pair( sizeof(cl_int), (void *)&src1.rows ));
520 args.push_back( make_pair( sizeof(cl_int), (void *)&src1.cols ));
521 args.push_back( make_pair( sizeof(cl_int), (void *)&dst_step1 ));
523 //if(_scalar != NULL)
525 float scalar1 = *((float *)_scalar);
526 args.push_back( make_pair( sizeof(float), (float *)&scalar1 ));
529 openCLExecuteKernel2(clCxt, kernelString, kernelName, globalThreads, localThreads, args, -1, src1.depth(), CLFLUSH);
532 static void multiply_cus(const oclMat &src1, oclMat &dst, float scalar)
534 arithmetic_run(src1, dst, "arithm_muls", &arithm_mul, (void *)(&scalar));
537 static void pyrdown_run_cus(const oclMat &src, const oclMat &dst)
540 CV_Assert(src.type() == dst.type());
541 CV_Assert(src.depth() != CV_8S);
543 Context *clCxt = src.clCxt;
545 string kernelName = "pyrDown";
547 size_t localThreads[3] = { 256, 1, 1 };
548 size_t globalThreads[3] = { src.cols, dst.rows, 1};
550 vector<pair<size_t , const void *> > args;
551 args.push_back( make_pair( sizeof(cl_mem), (void *)&src.data ));
552 args.push_back( make_pair( sizeof(cl_int), (void *)&src.step ));
553 args.push_back( make_pair( sizeof(cl_int), (void *)&src.rows));
554 args.push_back( make_pair( sizeof(cl_int), (void *)&src.cols));
555 args.push_back( make_pair( sizeof(cl_mem), (void *)&dst.data ));
556 args.push_back( make_pair( sizeof(cl_int), (void *)&dst.step ));
557 args.push_back( make_pair( sizeof(cl_int), (void *)&dst.cols));
559 openCLExecuteKernel2(clCxt, &pyr_down, kernelName, globalThreads, localThreads, args, src.oclchannels(), src.depth(), CLFLUSH);
562 static void pyrDown_cus(const oclMat &src, oclMat &dst)
564 CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
566 dst.create((src.rows + 1) / 2, (src.cols + 1) / 2, src.type());
568 pyrdown_run_cus(src, dst);
572 //struct MultiplyScalar
574 // MultiplyScalar(double val_, double scale_) : val(val_), scale(scale_) {}
575 // double operator ()(double a) const
577 // return (scale * a * val);
580 // const double scale;
583 //void callF(const oclMat& src, oclMat& dst, MultiplyScalar op, int mask)
587 // src.download(srcTemp);
588 // dst.download(dstTemp);
593 // for(i = 0; i < srcTemp.rows; i++)
595 // for(j = 0; j < srcTemp.cols; j++)
597 // for(k = 0; k < srcTemp.channels(); k++)
599 // ((float*)dstTemp.data)[srcTemp.channels() * (i * srcTemp.rows + j) + k] = (float)op(((float*)srcTemp.data)[srcTemp.channels() * (i * srcTemp.rows + j) + k]);
607 //static inline bool isAligned(const unsigned char* ptr, size_t size)
609 // return reinterpret_cast<size_t>(ptr) % size == 0;
612 //static inline bool isAligned(size_t step, size_t size)
614 // return step % size == 0;
617 //void callT(const oclMat& src, oclMat& dst, MultiplyScalar op, int mask)
619 // if (!isAligned(src.data, 4 * sizeof(double)) || !isAligned(src.step, 4 * sizeof(double)) ||
620 // !isAligned(dst.data, 4 * sizeof(double)) || !isAligned(dst.step, 4 * sizeof(double)))
622 // callF(src, dst, op, mask);
628 // src.download(srcTemp);
629 // dst.download(dstTemp);
635 // for(i = 0; i < srcTemp.rows; i++)
637 // const double* srcRow = (const double*)srcTemp.data + i * srcTemp.rows;
638 // double* dstRow = (double*)dstTemp.data + i * dstTemp.rows;;
640 // for(j = 0; j < srcTemp.cols; j++)
642 // x_shifted = j * 4;
644 // if(x_shifted + 4 - 1 < srcTemp.cols)
646 // dstRow[x_shifted ] = op(srcRow[x_shifted ]);
647 // dstRow[x_shifted + 1] = op(srcRow[x_shifted + 1]);
648 // dstRow[x_shifted + 2] = op(srcRow[x_shifted + 2]);
649 // dstRow[x_shifted + 3] = op(srcRow[x_shifted + 3]);
653 // for (int real_x = x_shifted; real_x < srcTemp.cols; ++real_x)
655 // ((float*)dstTemp.data)[i * srcTemp.rows + real_x] = op(((float*)srcTemp.data)[i * srcTemp.rows + real_x]);
662 //void multiply(const oclMat& src1, double val, oclMat& dst, double scale = 1.0f);
663 //void multiply(const oclMat& src1, double val, oclMat& dst, double scale)
665 // MultiplyScalar op(val, scale);
666 // //if(src1.channels() == 1 && dst.channels() == 1)
668 // // callT(src1, dst, op, 0);
672 // callF(src1, dst, op, 0);
676 static cl_mem bindTexture(const oclMat &mat, int depth, int channels)
679 cl_image_format format;
683 format.image_channel_data_type = CL_UNSIGNED_INT8;
687 format.image_channel_data_type = CL_FLOAT;
691 format.image_channel_order = CL_R;
693 else if(channels == 3)
695 format.image_channel_order = CL_RGB;
697 else if(channels == 4)
699 format.image_channel_order = CL_RGBA;
701 #ifdef CL_VERSION_1_2
703 desc.image_type = CL_MEM_OBJECT_IMAGE2D;
704 desc.image_width = mat.step / mat.elemSize();
705 desc.image_height = mat.rows;
706 desc.image_depth = 0;
707 desc.image_array_size = 1;
708 desc.image_row_pitch = 0;
709 desc.image_slice_pitch = 0;
711 desc.num_mip_levels = 0;
712 desc.num_samples = 0;
713 texture = clCreateImage(mat.clCxt->impl->clContext, CL_MEM_READ_WRITE, &format, &desc, NULL, &err);
715 texture = clCreateImage2D(
716 mat.clCxt->impl->clContext,
719 mat.step / mat.elemSize(),
725 size_t origin[] = { 0, 0, 0 };
726 size_t region[] = { mat.step / mat.elemSize(), mat.rows, 1 };
727 clEnqueueCopyBufferToImage(mat.clCxt->impl->clCmdQueue, (cl_mem)mat.data, texture, 0, origin, region, 0, NULL, 0);
733 static void releaseTexture(cl_mem texture)
738 static void lkSparse_run(oclMat &I, oclMat &J,
739 const oclMat &prevPts, oclMat &nextPts, oclMat &status, oclMat& err, bool /*GET_MIN_EIGENVALS*/, int ptcount,
740 int level, /*dim3 block, */dim3 patch, Size winSize, int iters)
742 Context *clCxt = I.clCxt;
743 char platform[256] = {0};
745 clGetDeviceInfo(clCxt->impl->devices, CL_DEVICE_PLATFORM, sizeof(pid), &pid, NULL);
746 clGetPlatformInfo(pid, CL_PLATFORM_NAME, 256, platform, NULL);
747 std::string namestr = platform;
748 bool isImageSupported = true;
749 if(namestr.find("NVIDIA")!=string::npos || namestr.find("Intel")!=string::npos)
750 isImageSupported = false;
752 int elemCntPerRow = I.step / I.elemSize();
754 string kernelName = "lkSparse";
757 size_t localThreads[3] = { 8, isImageSupported?8:32, 1 };
758 size_t globalThreads[3] = { 8 * ptcount, isImageSupported?8:32, 1};
760 int cn = I.oclchannels();
772 vector<pair<size_t , const void *> > args;
775 if (isImageSupported)
777 ITex = bindTexture(I, I.depth(), cn);
778 JTex = bindTexture(J, J.depth(), cn);
782 ITex = (cl_mem)I.data;
783 JTex = (cl_mem)J.data;
786 args.push_back( make_pair( sizeof(cl_mem), (void *)&ITex ));
787 args.push_back( make_pair( sizeof(cl_mem), (void *)&JTex ));
788 //cl_mem clmD = clCreateBuffer(clCxt, CL_MEM_READ_WRITE, ptcount * sizeof(float), NULL, NULL);
789 args.push_back( make_pair( sizeof(cl_mem), (void *)&prevPts.data ));
790 args.push_back( make_pair( sizeof(cl_int), (void *)&prevPts.step ));
791 args.push_back( make_pair( sizeof(cl_mem), (void *)&nextPts.data ));
792 args.push_back( make_pair( sizeof(cl_int), (void *)&nextPts.step ));
793 args.push_back( make_pair( sizeof(cl_mem), (void *)&status.data ));
794 args.push_back( make_pair( sizeof(cl_mem), (void *)&err.data ));
795 args.push_back( make_pair( sizeof(cl_int), (void *)&level ));
796 args.push_back( make_pair( sizeof(cl_int), (void *)&I.rows ));
797 args.push_back( make_pair( sizeof(cl_int), (void *)&I.cols ));
798 if (!isImageSupported)
800 args.push_back( make_pair( sizeof(cl_int), (void *)&elemCntPerRow ) );
802 args.push_back( make_pair( sizeof(cl_int), (void *)&patch.x ));
803 args.push_back( make_pair( sizeof(cl_int), (void *)&patch.y ));
804 args.push_back( make_pair( sizeof(cl_int), (void *)&cn ));
805 args.push_back( make_pair( sizeof(cl_int), (void *)&winSize.width ));
806 args.push_back( make_pair( sizeof(cl_int), (void *)&winSize.height ));
807 args.push_back( make_pair( sizeof(cl_int), (void *)&iters ));
808 args.push_back( make_pair( sizeof(cl_char), (void *)&calcErr ));
809 //args.push_back( make_pair( sizeof(cl_char), (void *)&GET_MIN_EIGENVALS ));
811 if (isImageSupported)
813 openCLExecuteKernel2(clCxt, &pyrlk, kernelName, globalThreads, localThreads, args, I.oclchannels(), I.depth(), CLFLUSH);
815 releaseTexture(ITex);
816 releaseTexture(JTex);
820 //printf("Warning: The image2d_t is not supported by the device. Using alternative method!\n");
821 openCLExecuteKernel2(clCxt, &pyrlk_no_image, kernelName, globalThreads, localThreads, args, I.oclchannels(), I.depth(), CLFLUSH);
825 void cv::ocl::PyrLKOpticalFlow::sparse(const oclMat &prevImg, const oclMat &nextImg, const oclMat &prevPts, oclMat &nextPts, oclMat &status, oclMat *err)
831 //if (err) err->release();
835 derivLambda = std::min(std::max(derivLambda, 0.0), 1.0);
837 iters = std::min(std::max(iters, 0), 100);
839 const int cn = prevImg.oclchannels();
842 calcPatchSize(winSize, cn, block, patch, isDeviceArch11_);
844 CV_Assert(derivLambda >= 0);
845 CV_Assert(maxLevel >= 0 && winSize.width > 2 && winSize.height > 2);
846 CV_Assert(prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type());
847 CV_Assert(patch.x > 0 && patch.x < 6 && patch.y > 0 && patch.y < 6);
848 CV_Assert(prevPts.rows == 1 && prevPts.type() == CV_32FC2);
851 CV_Assert(nextPts.size() == prevPts.size() && nextPts.type() == CV_32FC2);
853 ensureSizeIsEnough(1, prevPts.cols, prevPts.type(), nextPts);
855 oclMat temp1 = (useInitialFlow ? nextPts : prevPts).reshape(1);
856 oclMat temp2 = nextPts.reshape(1);
857 //oclMat scalar(temp1.rows, temp1.cols, temp1.type(), Scalar(1.0f / (1 << maxLevel) / 2.0f));
858 multiply_cus(temp1, temp2, 1.0f / (1 << maxLevel) / 2.0f);
859 //::multiply(temp1, 1.0f / (1 << maxLevel) / 2.0f, temp2);
861 ensureSizeIsEnough(1, prevPts.cols, CV_8UC1, status);
862 //status.setTo(Scalar::all(1));
863 setTo(status, Scalar::all(1));
868 err = new oclMat(1, prevPts.cols, CV_32FC1);
872 ensureSizeIsEnough(1, prevPts.cols, CV_32FC1, *err);
873 //ensureSizeIsEnough(1, prevPts.cols, CV_32FC1, err);
875 // build the image pyramids.
877 prevPyr_.resize(maxLevel + 1);
878 nextPyr_.resize(maxLevel + 1);
880 if (cn == 1 || cn == 4)
882 //prevImg.convertTo(prevPyr_[0], CV_32F);
883 //nextImg.convertTo(nextPyr_[0], CV_32F);
884 convertTo(prevImg, prevPyr_[0], CV_32F);
885 convertTo(nextImg, nextPyr_[0], CV_32F);
890 // cvtColor(prevImg, buf_, COLOR_BGR2BGRA);
891 // buf_.convertTo(prevPyr_[0], CV_32F);
893 // cvtColor(nextImg, buf_, COLOR_BGR2BGRA);
894 // buf_.convertTo(nextPyr_[0], CV_32F);
897 for (int level = 1; level <= maxLevel; ++level)
899 pyrDown_cus(prevPyr_[level - 1], prevPyr_[level]);
900 pyrDown_cus(nextPyr_[level - 1], nextPyr_[level]);
903 // dI/dx ~ Ix, dI/dy ~ Iy
905 for (int level = maxLevel; level >= 0; level--)
907 lkSparse_run(prevPyr_[level], nextPyr_[level],
908 prevPts, nextPts, status, *err, getMinEigenVals, prevPts.cols,
909 level, /*block, */patch, winSize, iters);
912 clFinish(prevImg.clCxt->impl->clCmdQueue);
918 static void lkDense_run(oclMat &I, oclMat &J, oclMat &u, oclMat &v,
919 oclMat &prevU, oclMat &prevV, oclMat *err, Size winSize, int iters)
921 Context *clCxt = I.clCxt;
922 bool isImageSupported = clCxt->impl->devName.find("Intel(R) HD Graphics") == string::npos;
923 int elemCntPerRow = I.step / I.elemSize();
925 string kernelName = "lkDense";
927 size_t localThreads[3] = { 16, 16, 1 };
928 size_t globalThreads[3] = { I.cols, I.rows, 1};
930 int cn = I.oclchannels();
945 if (isImageSupported)
947 ITex = bindTexture(I, I.depth(), cn);
948 JTex = bindTexture(J, J.depth(), cn);
952 ITex = (cl_mem)I.data;
953 JTex = (cl_mem)J.data;
956 //int2 halfWin = {(winSize.width - 1) / 2, (winSize.height - 1) / 2};
957 //const int patchWidth = 16 + 2 * halfWin.x;
958 //const int patchHeight = 16 + 2 * halfWin.y;
959 //size_t smem_size = 3 * patchWidth * patchHeight * sizeof(int);
961 vector<pair<size_t , const void *> > args;
963 args.push_back( make_pair( sizeof(cl_mem), (void *)&ITex ));
964 args.push_back( make_pair( sizeof(cl_mem), (void *)&JTex ));
966 args.push_back( make_pair( sizeof(cl_mem), (void *)&u.data ));
967 args.push_back( make_pair( sizeof(cl_int), (void *)&u.step ));
968 args.push_back( make_pair( sizeof(cl_mem), (void *)&v.data ));
969 args.push_back( make_pair( sizeof(cl_int), (void *)&v.step ));
970 args.push_back( make_pair( sizeof(cl_mem), (void *)&prevU.data ));
971 args.push_back( make_pair( sizeof(cl_int), (void *)&prevU.step ));
972 args.push_back( make_pair( sizeof(cl_mem), (void *)&prevV.data ));
973 args.push_back( make_pair( sizeof(cl_int), (void *)&prevV.step ));
974 args.push_back( make_pair( sizeof(cl_int), (void *)&I.rows ));
975 args.push_back( make_pair( sizeof(cl_int), (void *)&I.cols ));
976 //args.push_back( make_pair( sizeof(cl_mem), (void *)&(*err).data ));
977 //args.push_back( make_pair( sizeof(cl_int), (void *)&(*err).step ));
978 if (!isImageSupported)
980 args.push_back( make_pair( sizeof(cl_int), (void *)&elemCntPerRow ) );
982 args.push_back( make_pair( sizeof(cl_int), (void *)&winSize.width ));
983 args.push_back( make_pair( sizeof(cl_int), (void *)&winSize.height ));
984 args.push_back( make_pair( sizeof(cl_int), (void *)&iters ));
985 args.push_back( make_pair( sizeof(cl_char), (void *)&calcErr ));
987 if (isImageSupported)
989 openCLExecuteKernel2(clCxt, &pyrlk, kernelName, globalThreads, localThreads, args, I.oclchannels(), I.depth(), CLFLUSH);
991 releaseTexture(ITex);
992 releaseTexture(JTex);
996 //printf("Warning: The image2d_t is not supported by the device. Using alternative method!\n");
997 openCLExecuteKernel2(clCxt, &pyrlk_no_image, kernelName, globalThreads, localThreads, args, I.oclchannels(), I.depth(), CLFLUSH);
1001 void cv::ocl::PyrLKOpticalFlow::dense(const oclMat &prevImg, const oclMat &nextImg, oclMat &u, oclMat &v, oclMat *err)
1003 CV_Assert(prevImg.type() == CV_8UC1);
1004 CV_Assert(prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type());
1005 CV_Assert(maxLevel >= 0);
1006 CV_Assert(winSize.width > 2 && winSize.height > 2);
1009 err->create(prevImg.size(), CV_32FC1);
1011 prevPyr_.resize(maxLevel + 1);
1012 nextPyr_.resize(maxLevel + 1);
1014 prevPyr_[0] = prevImg;
1015 //nextImg.convertTo(nextPyr_[0], CV_32F);
1016 convertTo(nextImg, nextPyr_[0], CV_32F);
1018 for (int level = 1; level <= maxLevel; ++level)
1020 pyrDown_cus(prevPyr_[level - 1], prevPyr_[level]);
1021 pyrDown_cus(nextPyr_[level - 1], nextPyr_[level]);
1024 ensureSizeIsEnough(prevImg.size(), CV_32FC1, uPyr_[0]);
1025 ensureSizeIsEnough(prevImg.size(), CV_32FC1, vPyr_[0]);
1026 ensureSizeIsEnough(prevImg.size(), CV_32FC1, uPyr_[1]);
1027 ensureSizeIsEnough(prevImg.size(), CV_32FC1, vPyr_[1]);
1028 //uPyr_[1].setTo(Scalar::all(0));
1029 //vPyr_[1].setTo(Scalar::all(0));
1030 setTo(uPyr_[1], Scalar::all(0));
1031 setTo(vPyr_[1], Scalar::all(0));
1033 Size winSize2i(winSize.width, winSize.height);
1037 for (int level = maxLevel; level >= 0; level--)
1039 int idx2 = (idx + 1) & 1;
1041 lkDense_run(prevPyr_[level], nextPyr_[level], uPyr_[idx], vPyr_[idx], uPyr_[idx2], vPyr_[idx2],
1042 level == 0 ? err : 0, winSize2i, iters);
1048 //uPyr_[idx].copyTo(u);
1049 //vPyr_[idx].copyTo(v);
1050 copyTo(uPyr_[idx], u);
1051 copyTo(vPyr_[idx], v);
1053 clFinish(prevImg.clCxt->impl->clCmdQueue);
1056 #endif /* !defined (HAVE_CUDA) */