1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
44 #include "opencl_kernels.hpp"
49 /****************************************************************************************\
51 \****************************************************************************************/
53 template<typename T> static void
54 split_( const T* src, T** dst, int len, int cn )
56 int k = cn % 4 ? cn % 4 : 4;
61 for( i = j = 0; i < len; i++, j += cn )
66 T *dst0 = dst[0], *dst1 = dst[1];
67 for( i = j = 0; i < len; i++, j += cn )
75 T *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2];
76 for( i = j = 0; i < len; i++, j += cn )
85 T *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2], *dst3 = dst[3];
86 for( i = j = 0; i < len; i++, j += cn )
88 dst0[i] = src[j]; dst1[i] = src[j+1];
89 dst2[i] = src[j+2]; dst3[i] = src[j+3];
93 for( ; k < cn; k += 4 )
95 T *dst0 = dst[k], *dst1 = dst[k+1], *dst2 = dst[k+2], *dst3 = dst[k+3];
96 for( i = 0, j = k; i < len; i++, j += cn )
98 dst0[i] = src[j]; dst1[i] = src[j+1];
99 dst2[i] = src[j+2]; dst3[i] = src[j+3];
104 template<typename T> static void
105 merge_( const T** src, T* dst, int len, int cn )
107 int k = cn % 4 ? cn % 4 : 4;
111 const T* src0 = src[0];
112 for( i = j = 0; i < len; i++, j += cn )
117 const T *src0 = src[0], *src1 = src[1];
118 for( i = j = 0; i < len; i++, j += cn )
126 const T *src0 = src[0], *src1 = src[1], *src2 = src[2];
127 for( i = j = 0; i < len; i++, j += cn )
136 const T *src0 = src[0], *src1 = src[1], *src2 = src[2], *src3 = src[3];
137 for( i = j = 0; i < len; i++, j += cn )
139 dst[j] = src0[i]; dst[j+1] = src1[i];
140 dst[j+2] = src2[i]; dst[j+3] = src3[i];
144 for( ; k < cn; k += 4 )
146 const T *src0 = src[k], *src1 = src[k+1], *src2 = src[k+2], *src3 = src[k+3];
147 for( i = 0, j = k; i < len; i++, j += cn )
149 dst[j] = src0[i]; dst[j+1] = src1[i];
150 dst[j+2] = src2[i]; dst[j+3] = src3[i];
155 static void split8u(const uchar* src, uchar** dst, int len, int cn )
157 split_(src, dst, len, cn);
160 static void split16u(const ushort* src, ushort** dst, int len, int cn )
162 split_(src, dst, len, cn);
165 static void split32s(const int* src, int** dst, int len, int cn )
167 split_(src, dst, len, cn);
170 static void split64s(const int64* src, int64** dst, int len, int cn )
172 split_(src, dst, len, cn);
175 static void merge8u(const uchar** src, uchar* dst, int len, int cn )
177 merge_(src, dst, len, cn);
180 static void merge16u(const ushort** src, ushort* dst, int len, int cn )
182 merge_(src, dst, len, cn);
185 static void merge32s(const int** src, int* dst, int len, int cn )
187 merge_(src, dst, len, cn);
190 static void merge64s(const int64** src, int64* dst, int len, int cn )
192 merge_(src, dst, len, cn);
195 typedef void (*SplitFunc)(const uchar* src, uchar** dst, int len, int cn);
196 typedef void (*MergeFunc)(const uchar** src, uchar* dst, int len, int cn);
198 static SplitFunc getSplitFunc(int depth)
200 static SplitFunc splitTab[] =
202 (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split16u), (SplitFunc)GET_OPTIMIZED(split16u),
203 (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split64s), 0
206 return splitTab[depth];
209 static MergeFunc getMergeFunc(int depth)
211 static MergeFunc mergeTab[] =
213 (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge16u), (MergeFunc)GET_OPTIMIZED(merge16u),
214 (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge64s), 0
217 return mergeTab[depth];
222 void cv::split(const Mat& src, Mat* mv)
224 int k, depth = src.depth(), cn = src.channels();
231 SplitFunc func = getSplitFunc(depth);
232 CV_Assert( func != 0 );
234 int esz = (int)src.elemSize(), esz1 = (int)src.elemSize1();
235 int blocksize0 = (BLOCK_SIZE + esz-1)/esz;
236 AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16);
237 const Mat** arrays = (const Mat**)(uchar*)_buf;
238 uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16);
241 for( k = 0; k < cn; k++ )
243 mv[k].create(src.dims, src.size, depth);
244 arrays[k+1] = &mv[k];
247 NAryMatIterator it(arrays, ptrs, cn+1);
248 int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0);
250 for( size_t i = 0; i < it.nplanes; i++, ++it )
252 for( int j = 0; j < total; j += blocksize )
254 int bsz = std::min(total - j, blocksize);
255 func( ptrs[0], &ptrs[1], bsz, cn );
257 if( j + blocksize < total )
260 for( k = 0; k < cn; k++ )
261 ptrs[k+1] += bsz*esz1;
271 static bool ocl_split( InputArray _m, OutputArrayOfArrays _mv )
273 int type = _m.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
275 String dstargs, dstdecl, processelem;
276 for (int i = 0; i < cn; ++i)
278 dstargs += format("DECLARE_DST_PARAM(%d)", i);
279 dstdecl += format("DECLARE_DATA(%d)", i);
280 processelem += format("PROCESS_ELEM(%d)", i);
283 ocl::Kernel k("split", ocl::core::split_merge_oclsrc,
284 format("-D T=%s -D OP_SPLIT -D cn=%d -D DECLARE_DST_PARAMS=%s "
285 "-D DECLARE_DATA_N=%s -D PROCESS_ELEMS_N=%s",
286 ocl::memopTypeToStr(depth), cn, dstargs.c_str(),
287 dstdecl.c_str(), processelem.c_str()));
291 Size size = _m.size();
292 _mv.create(cn, 1, depth);
293 for (int i = 0; i < cn; ++i)
294 _mv.create(size, depth, i);
296 std::vector<UMat> dst;
297 _mv.getUMatVector(dst);
299 int argidx = k.set(0, ocl::KernelArg::ReadOnly(_m.getUMat()));
300 for (int i = 0; i < cn; ++i)
301 argidx = k.set(argidx, ocl::KernelArg::WriteOnlyNoSize(dst[i]));
303 size_t globalsize[2] = { size.width, size.height };
304 return k.run(2, globalsize, NULL, false);
311 void cv::split(InputArray _m, OutputArrayOfArrays _mv)
313 CV_OCL_RUN(_m.dims() <= 2 && _mv.isUMatVector(),
323 CV_Assert( !_mv.fixedType() || _mv.empty() || _mv.type() == m.depth() );
325 Size size = m.size();
326 int depth = m.depth(), cn = m.channels();
327 _mv.create(cn, 1, depth);
328 for (int i = 0; i < cn; ++i)
329 _mv.create(size, depth, i);
331 std::vector<Mat> dst;
332 _mv.getMatVector(dst);
337 void cv::merge(const Mat* mv, size_t n, OutputArray _dst)
339 CV_Assert( mv && n > 0 );
341 int depth = mv[0].depth();
346 for( i = 0; i < n; i++ )
348 CV_Assert(mv[i].size == mv[0].size && mv[i].depth() == depth);
349 allch1 = allch1 && mv[i].channels() == 1;
350 cn += mv[i].channels();
353 CV_Assert( 0 < cn && cn <= CV_CN_MAX );
354 _dst.create(mv[0].dims, mv[0].size, CV_MAKETYPE(depth, cn));
355 Mat dst = _dst.getMat();
365 AutoBuffer<int> pairs(cn*2);
368 for( i = 0, j = 0; i < n; i++, j += ni )
370 ni = mv[i].channels();
371 for( k = 0; k < ni; k++ )
373 pairs[(j+k)*2] = j + k;
374 pairs[(j+k)*2+1] = j + k;
377 mixChannels( mv, n, &dst, 1, &pairs[0], cn );
381 size_t esz = dst.elemSize(), esz1 = dst.elemSize1();
382 int blocksize0 = (int)((BLOCK_SIZE + esz-1)/esz);
383 AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16);
384 const Mat** arrays = (const Mat**)(uchar*)_buf;
385 uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16);
388 for( k = 0; k < cn; k++ )
389 arrays[k+1] = &mv[k];
391 NAryMatIterator it(arrays, ptrs, cn+1);
392 int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0);
393 MergeFunc func = getMergeFunc(depth);
395 for( i = 0; i < it.nplanes; i++, ++it )
397 for( int j = 0; j < total; j += blocksize )
399 int bsz = std::min(total - j, blocksize);
400 func( (const uchar**)&ptrs[1], ptrs[0], bsz, cn );
402 if( j + blocksize < total )
405 for( int t = 0; t < cn; t++ )
406 ptrs[t+1] += bsz*esz1;
416 static bool ocl_merge( InputArrayOfArrays _mv, OutputArray _dst )
418 std::vector<UMat> src, ksrc;
419 _mv.getUMatVector(src);
420 CV_Assert(!src.empty());
422 int type = src[0].type(), depth = CV_MAT_DEPTH(type);
423 Size size = src[0].size();
425 for (size_t i = 0, srcsize = src.size(); i < srcsize; ++i)
427 int itype = src[i].type(), icn = CV_MAT_CN(itype), idepth = CV_MAT_DEPTH(itype),
428 esz1 = CV_ELEM_SIZE1(idepth);
432 CV_Assert(size == src[i].size() && depth == idepth);
434 for (int cn = 0; cn < icn; ++cn)
437 tsrc.offset += cn * esz1;
438 ksrc.push_back(tsrc);
441 int dcn = (int)ksrc.size();
443 String srcargs, srcdecl, processelem, cndecl;
444 for (int i = 0; i < dcn; ++i)
446 srcargs += format("DECLARE_SRC_PARAM(%d)", i);
447 srcdecl += format("DECLARE_DATA(%d)", i);
448 processelem += format("PROCESS_ELEM(%d)", i);
449 cndecl += format(" -D scn%d=%d", i, ksrc[i].channels());
452 ocl::Kernel k("merge", ocl::core::split_merge_oclsrc,
453 format("-D OP_MERGE -D cn=%d -D T=%s -D DECLARE_SRC_PARAMS_N=%s"
454 " -D DECLARE_DATA_N=%s -D PROCESS_ELEMS_N=%s%s",
455 dcn, ocl::memopTypeToStr(depth), srcargs.c_str(),
456 srcdecl.c_str(), processelem.c_str(), cndecl.c_str()));
460 _dst.create(size, CV_MAKE_TYPE(depth, dcn));
461 UMat dst = _dst.getUMat();
464 for (int i = 0; i < dcn; ++i)
465 argidx = k.set(argidx, ocl::KernelArg::ReadOnlyNoSize(ksrc[i]));
466 k.set(argidx, ocl::KernelArg::WriteOnly(dst));
468 size_t globalsize[2] = { dst.cols, dst.rows };
469 return k.run(2, globalsize, NULL, false);
476 void cv::merge(InputArrayOfArrays _mv, OutputArray _dst)
478 CV_OCL_RUN(_mv.isUMatVector() && _dst.isUMat(),
479 ocl_merge(_mv, _dst))
482 _mv.getMatVector(mv);
483 merge(!mv.empty() ? &mv[0] : 0, mv.size(), _dst);
486 /****************************************************************************************\
487 * Generalized split/merge: mixing channels *
488 \****************************************************************************************/
493 template<typename T> static void
494 mixChannels_( const T** src, const int* sdelta,
495 T** dst, const int* ddelta,
496 int len, int npairs )
499 for( k = 0; k < npairs; k++ )
503 int ds = sdelta[k], dd = ddelta[k];
506 for( i = 0; i <= len - 2; i += 2, s += ds*2, d += dd*2 )
508 T t0 = s[0], t1 = s[ds];
509 d[0] = t0; d[dd] = t1;
516 for( i = 0; i <= len - 2; i += 2, d += dd*2 )
525 static void mixChannels8u( const uchar** src, const int* sdelta,
526 uchar** dst, const int* ddelta,
527 int len, int npairs )
529 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
532 static void mixChannels16u( const ushort** src, const int* sdelta,
533 ushort** dst, const int* ddelta,
534 int len, int npairs )
536 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
539 static void mixChannels32s( const int** src, const int* sdelta,
540 int** dst, const int* ddelta,
541 int len, int npairs )
543 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
546 static void mixChannels64s( const int64** src, const int* sdelta,
547 int64** dst, const int* ddelta,
548 int len, int npairs )
550 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
553 typedef void (*MixChannelsFunc)( const uchar** src, const int* sdelta,
554 uchar** dst, const int* ddelta, int len, int npairs );
556 static MixChannelsFunc getMixchFunc(int depth)
558 static MixChannelsFunc mixchTab[] =
560 (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels16u,
561 (MixChannelsFunc)mixChannels16u, (MixChannelsFunc)mixChannels32s, (MixChannelsFunc)mixChannels32s,
562 (MixChannelsFunc)mixChannels64s, 0
565 return mixchTab[depth];
570 void cv::mixChannels( const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, const int* fromTo, size_t npairs )
574 CV_Assert( src && nsrcs > 0 && dst && ndsts > 0 && fromTo && npairs > 0 );
576 size_t i, j, k, esz1 = dst[0].elemSize1();
577 int depth = dst[0].depth();
579 AutoBuffer<uchar> buf((nsrcs + ndsts + 1)*(sizeof(Mat*) + sizeof(uchar*)) + npairs*(sizeof(uchar*)*2 + sizeof(int)*6));
580 const Mat** arrays = (const Mat**)(uchar*)buf;
581 uchar** ptrs = (uchar**)(arrays + nsrcs + ndsts);
582 const uchar** srcs = (const uchar**)(ptrs + nsrcs + ndsts + 1);
583 uchar** dsts = (uchar**)(srcs + npairs);
584 int* tab = (int*)(dsts + npairs);
585 int *sdelta = (int*)(tab + npairs*4), *ddelta = sdelta + npairs;
587 for( i = 0; i < nsrcs; i++ )
589 for( i = 0; i < ndsts; i++ )
590 arrays[i + nsrcs] = &dst[i];
591 ptrs[nsrcs + ndsts] = 0;
593 for( i = 0; i < npairs; i++ )
595 int i0 = fromTo[i*2], i1 = fromTo[i*2+1];
598 for( j = 0; j < nsrcs; i0 -= src[j].channels(), j++ )
599 if( i0 < src[j].channels() )
601 CV_Assert(j < nsrcs && src[j].depth() == depth);
602 tab[i*4] = (int)j; tab[i*4+1] = (int)(i0*esz1);
603 sdelta[i] = src[j].channels();
607 tab[i*4] = (int)(nsrcs + ndsts); tab[i*4+1] = 0;
611 for( j = 0; j < ndsts; i1 -= dst[j].channels(), j++ )
612 if( i1 < dst[j].channels() )
614 CV_Assert(i1 >= 0 && j < ndsts && dst[j].depth() == depth);
615 tab[i*4+2] = (int)(j + nsrcs); tab[i*4+3] = (int)(i1*esz1);
616 ddelta[i] = dst[j].channels();
619 NAryMatIterator it(arrays, ptrs, (int)(nsrcs + ndsts));
620 int total = (int)it.size, blocksize = std::min(total, (int)((BLOCK_SIZE + esz1-1)/esz1));
621 MixChannelsFunc func = getMixchFunc(depth);
623 for( i = 0; i < it.nplanes; i++, ++it )
625 for( k = 0; k < npairs; k++ )
627 srcs[k] = ptrs[tab[k*4]] + tab[k*4+1];
628 dsts[k] = ptrs[tab[k*4+2]] + tab[k*4+3];
631 for( int t = 0; t < total; t += blocksize )
633 int bsz = std::min(total - t, blocksize);
634 func( srcs, sdelta, dsts, ddelta, bsz, (int)npairs );
636 if( t + blocksize < total )
637 for( k = 0; k < npairs; k++ )
639 srcs[k] += blocksize*sdelta[k]*esz1;
640 dsts[k] += blocksize*ddelta[k]*esz1;
650 static void getUMatIndex(const std::vector<UMat> & um, int cn, int & idx, int & cnidx)
652 int totalChannels = 0;
653 for (size_t i = 0, size = um.size(); i < size; ++i)
655 int ccn = um[i].channels();
656 totalChannels += ccn;
658 if (totalChannels == cn)
664 else if (totalChannels > cn)
667 cnidx = i == 0 ? cn : (cn - totalChannels + ccn);
675 static bool ocl_mixChannels(InputArrayOfArrays _src, InputOutputArrayOfArrays _dst,
676 const int* fromTo, size_t npairs)
678 std::vector<UMat> src, dst;
679 _src.getUMatVector(src);
680 _dst.getUMatVector(dst);
682 size_t nsrc = src.size(), ndst = dst.size();
683 CV_Assert(nsrc > 0 && ndst > 0);
685 Size size = src[0].size();
686 int depth = src[0].depth(), esz = CV_ELEM_SIZE(depth);
688 for (size_t i = 1, ssize = src.size(); i < ssize; ++i)
689 CV_Assert(src[i].size() == size && src[i].depth() == depth);
690 for (size_t i = 0, dsize = dst.size(); i < dsize; ++i)
691 CV_Assert(dst[i].size() == size && dst[i].depth() == depth);
693 String declsrc, decldst, declproc, declcn;
694 std::vector<UMat> srcargs(npairs), dstargs(npairs);
696 for (size_t i = 0; i < npairs; ++i)
698 int scn = fromTo[i<<1], dcn = fromTo[(i<<1) + 1];
699 int src_idx, src_cnidx, dst_idx, dst_cnidx;
701 getUMatIndex(src, scn, src_idx, src_cnidx);
702 getUMatIndex(dst, dcn, dst_idx, dst_cnidx);
704 CV_Assert(dst_idx >= 0 && src_idx >= 0);
706 srcargs[i] = src[src_idx];
707 srcargs[i].offset += src_cnidx * esz;
709 dstargs[i] = dst[dst_idx];
710 dstargs[i].offset += dst_cnidx * esz;
712 declsrc += format("DECLARE_INPUT_MAT(%d)", i);
713 decldst += format("DECLARE_OUTPUT_MAT(%d)", i);
714 declproc += format("PROCESS_ELEM(%d)", i);
715 declcn += format(" -D scn%d=%d -D dcn%d=%d", i, src[src_idx].channels(), i, dst[dst_idx].channels());
718 ocl::Kernel k("mixChannels", ocl::core::mixchannels_oclsrc,
719 format("-D T=%s -D DECLARE_INPUT_MATS=%s -D DECLARE_OUTPUT_MATS=%s"
720 " -D PROCESS_ELEMS=%s%s", ocl::memopTypeToStr(depth),
721 declsrc.c_str(), decldst.c_str(), declproc.c_str(), declcn.c_str()));
726 for (size_t i = 0; i < npairs; ++i)
727 argindex = k.set(argindex, ocl::KernelArg::ReadOnlyNoSize(srcargs[i]));
728 for (size_t i = 0; i < npairs; ++i)
729 argindex = k.set(argindex, ocl::KernelArg::WriteOnlyNoSize(dstargs[i]));
730 k.set(k.set(argindex, size.height), size.width);
732 size_t globalsize[2] = { size.width, size.height };
733 return k.run(2, globalsize, NULL, false);
740 void cv::mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,
741 const int* fromTo, size_t npairs)
743 if (npairs == 0 || fromTo == NULL)
746 CV_OCL_RUN(dst.isUMatVector(),
747 ocl_mixChannels(src, dst, fromTo, npairs))
749 bool src_is_mat = src.kind() != _InputArray::STD_VECTOR_MAT &&
750 src.kind() != _InputArray::STD_VECTOR_VECTOR &&
751 src.kind() != _InputArray::STD_VECTOR_UMAT;
752 bool dst_is_mat = dst.kind() != _InputArray::STD_VECTOR_MAT &&
753 dst.kind() != _InputArray::STD_VECTOR_VECTOR &&
754 dst.kind() != _InputArray::STD_VECTOR_UMAT;
756 int nsrc = src_is_mat ? 1 : (int)src.total();
757 int ndst = dst_is_mat ? 1 : (int)dst.total();
759 CV_Assert(nsrc > 0 && ndst > 0);
760 cv::AutoBuffer<Mat> _buf(nsrc + ndst);
762 for( i = 0; i < nsrc; i++ )
763 buf[i] = src.getMat(src_is_mat ? -1 : i);
764 for( i = 0; i < ndst; i++ )
765 buf[nsrc + i] = dst.getMat(dst_is_mat ? -1 : i);
766 mixChannels(&buf[0], nsrc, &buf[nsrc], ndst, fromTo, npairs);
769 void cv::mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,
770 const std::vector<int>& fromTo)
775 CV_OCL_RUN(dst.isUMatVector(),
776 ocl_mixChannels(src, dst, &fromTo[0], fromTo.size()>>1))
778 bool src_is_mat = src.kind() != _InputArray::STD_VECTOR_MAT &&
779 src.kind() != _InputArray::STD_VECTOR_VECTOR &&
780 src.kind() != _InputArray::STD_VECTOR_UMAT;
781 bool dst_is_mat = dst.kind() != _InputArray::STD_VECTOR_MAT &&
782 dst.kind() != _InputArray::STD_VECTOR_VECTOR &&
783 dst.kind() != _InputArray::STD_VECTOR_UMAT;
785 int nsrc = src_is_mat ? 1 : (int)src.total();
786 int ndst = dst_is_mat ? 1 : (int)dst.total();
788 CV_Assert(fromTo.size()%2 == 0 && nsrc > 0 && ndst > 0);
789 cv::AutoBuffer<Mat> _buf(nsrc + ndst);
791 for( i = 0; i < nsrc; i++ )
792 buf[i] = src.getMat(src_is_mat ? -1 : i);
793 for( i = 0; i < ndst; i++ )
794 buf[nsrc + i] = dst.getMat(dst_is_mat ? -1 : i);
795 mixChannels(&buf[0], nsrc, &buf[nsrc], ndst, &fromTo[0], fromTo.size()/2);
798 void cv::extractChannel(InputArray _src, OutputArray _dst, int coi)
800 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
801 CV_Assert( 0 <= coi && coi < cn );
802 int ch[] = { coi, 0 };
804 if (ocl::useOpenCL() && _src.dims() <= 2 && _dst.isUMat())
806 UMat src = _src.getUMat();
807 _dst.create(src.dims, &src.size[0], depth);
808 UMat dst = _dst.getUMat();
809 mixChannels(std::vector<UMat>(1, src), std::vector<UMat>(1, dst), ch, 1);
813 Mat src = _src.getMat();
814 _dst.create(src.dims, &src.size[0], depth);
815 Mat dst = _dst.getMat();
816 mixChannels(&src, 1, &dst, 1, ch, 1);
819 void cv::insertChannel(InputArray _src, InputOutputArray _dst, int coi)
821 int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), scn = CV_MAT_CN(stype);
822 int dtype = _dst.type(), ddepth = CV_MAT_DEPTH(dtype), dcn = CV_MAT_CN(dtype);
823 CV_Assert( _src.sameSize(_dst) && sdepth == ddepth );
824 CV_Assert( 0 <= coi && coi < dcn && scn == 1 );
826 int ch[] = { 0, coi };
827 if (ocl::useOpenCL() && _src.dims() <= 2 && _dst.isUMat())
829 UMat src = _src.getUMat(), dst = _dst.getUMat();
830 mixChannels(std::vector<UMat>(1, src), std::vector<UMat>(1, dst), ch, 1);
834 Mat src = _src.getMat(), dst = _dst.getMat();
835 mixChannels(&src, 1, &dst, 1, ch, 1);
838 /****************************************************************************************\
839 * convertScale[Abs] *
840 \****************************************************************************************/
845 template<typename T, typename DT, typename WT> static void
846 cvtScaleAbs_( const T* src, size_t sstep,
847 DT* dst, size_t dstep, Size size,
850 sstep /= sizeof(src[0]);
851 dstep /= sizeof(dst[0]);
853 for( ; size.height--; src += sstep, dst += dstep )
856 #if CV_ENABLE_UNROLLED
857 for( ; x <= size.width - 4; x += 4 )
860 t0 = saturate_cast<DT>(std::abs(src[x]*scale + shift));
861 t1 = saturate_cast<DT>(std::abs(src[x+1]*scale + shift));
862 dst[x] = t0; dst[x+1] = t1;
863 t0 = saturate_cast<DT>(std::abs(src[x+2]*scale + shift));
864 t1 = saturate_cast<DT>(std::abs(src[x+3]*scale + shift));
865 dst[x+2] = t0; dst[x+3] = t1;
868 for( ; x < size.width; x++ )
869 dst[x] = saturate_cast<DT>(std::abs(src[x]*scale + shift));
874 template<typename T, typename DT, typename WT> static void
875 cvtScale_( const T* src, size_t sstep,
876 DT* dst, size_t dstep, Size size,
879 sstep /= sizeof(src[0]);
880 dstep /= sizeof(dst[0]);
882 for( ; size.height--; src += sstep, dst += dstep )
885 #if CV_ENABLE_UNROLLED
886 for( ; x <= size.width - 4; x += 4 )
889 t0 = saturate_cast<DT>(src[x]*scale + shift);
890 t1 = saturate_cast<DT>(src[x+1]*scale + shift);
891 dst[x] = t0; dst[x+1] = t1;
892 t0 = saturate_cast<DT>(src[x+2]*scale + shift);
893 t1 = saturate_cast<DT>(src[x+3]*scale + shift);
894 dst[x+2] = t0; dst[x+3] = t1;
898 for( ; x < size.width; x++ )
899 dst[x] = saturate_cast<DT>(src[x]*scale + shift);
903 //vz optimized template specialization
905 cvtScale_<short, short, float>( const short* src, size_t sstep,
906 short* dst, size_t dstep, Size size,
907 float scale, float shift )
909 sstep /= sizeof(src[0]);
910 dstep /= sizeof(dst[0]);
912 for( ; size.height--; src += sstep, dst += dstep )
918 __m128 scale128 = _mm_set1_ps (scale);
919 __m128 shift128 = _mm_set1_ps (shift);
920 for(; x <= size.width - 8; x += 8 )
922 __m128i r0 = _mm_loadl_epi64((const __m128i*)(src + x));
923 __m128i r1 = _mm_loadl_epi64((const __m128i*)(src + x + 4));
924 __m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16));
925 __m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r1, r1), 16));
926 rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128);
927 rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128);
928 r0 = _mm_cvtps_epi32(rf0);
929 r1 = _mm_cvtps_epi32(rf1);
930 r0 = _mm_packs_epi32(r0, r1);
931 _mm_storeu_si128((__m128i*)(dst + x), r0);
936 for(; x < size.width; x++ )
937 dst[x] = saturate_cast<short>(src[x]*scale + shift);
942 cvtScale_<short, int, float>( const short* src, size_t sstep,
943 int* dst, size_t dstep, Size size,
944 float scale, float shift )
946 sstep /= sizeof(src[0]);
947 dstep /= sizeof(dst[0]);
949 for( ; size.height--; src += sstep, dst += dstep )
956 __m128 scale128 = _mm_set1_ps (scale);
957 __m128 shift128 = _mm_set1_ps (shift);
958 for(; x <= size.width - 8; x += 8 )
960 __m128i r0 = _mm_loadl_epi64((const __m128i*)(src + x));
961 __m128i r1 = _mm_loadl_epi64((const __m128i*)(src + x + 4));
962 __m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16));
963 __m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r1, r1), 16));
964 rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128);
965 rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128);
966 r0 = _mm_cvtps_epi32(rf0);
967 r1 = _mm_cvtps_epi32(rf1);
969 _mm_storeu_si128((__m128i*)(dst + x), r0);
970 _mm_storeu_si128((__m128i*)(dst + x + 4), r1);
975 //We will wait Haswell
978 if(USE_AVX)//2X - bad variant
980 ////TODO:AVX implementation (optimization?) required
981 __m256 scale256 = _mm256_set1_ps (scale);
982 __m256 shift256 = _mm256_set1_ps (shift);
983 for(; x <= size.width - 8; x += 8 )
985 __m256i buf = _mm256_set_epi32((int)(*(src+x+7)),(int)(*(src+x+6)),(int)(*(src+x+5)),(int)(*(src+x+4)),(int)(*(src+x+3)),(int)(*(src+x+2)),(int)(*(src+x+1)),(int)(*(src+x)));
986 __m256 r0 = _mm256_add_ps( _mm256_mul_ps(_mm256_cvtepi32_ps (buf), scale256), shift256);
987 __m256i res = _mm256_cvtps_epi32(r0);
988 _mm256_storeu_si256 ((__m256i*)(dst+x), res);
993 for(; x < size.width; x++ )
994 dst[x] = saturate_cast<int>(src[x]*scale + shift);
998 template<typename T, typename DT> static void
999 cvt_( const T* src, size_t sstep,
1000 DT* dst, size_t dstep, Size size )
1002 sstep /= sizeof(src[0]);
1003 dstep /= sizeof(dst[0]);
1005 for( ; size.height--; src += sstep, dst += dstep )
1008 #if CV_ENABLE_UNROLLED
1009 for( ; x <= size.width - 4; x += 4 )
1012 t0 = saturate_cast<DT>(src[x]);
1013 t1 = saturate_cast<DT>(src[x+1]);
1014 dst[x] = t0; dst[x+1] = t1;
1015 t0 = saturate_cast<DT>(src[x+2]);
1016 t1 = saturate_cast<DT>(src[x+3]);
1017 dst[x+2] = t0; dst[x+3] = t1;
1020 for( ; x < size.width; x++ )
1021 dst[x] = saturate_cast<DT>(src[x]);
1025 //vz optimized template specialization, test Core_ConvertScale/ElemWiseTest
1027 cvt_<float, short>( const float* src, size_t sstep,
1028 short* dst, size_t dstep, Size size )
1030 sstep /= sizeof(src[0]);
1031 dstep /= sizeof(dst[0]);
1033 for( ; size.height--; src += sstep, dst += dstep )
1038 for( ; x <= size.width - 8; x += 8 )
1040 __m128 src128 = _mm_loadu_ps (src + x);
1041 __m128i src_int128 = _mm_cvtps_epi32 (src128);
1043 src128 = _mm_loadu_ps (src + x + 4);
1044 __m128i src1_int128 = _mm_cvtps_epi32 (src128);
1046 src1_int128 = _mm_packs_epi32(src_int128, src1_int128);
1047 _mm_storeu_si128((__m128i*)(dst + x),src1_int128);
1051 for( ; x < size.width; x++ )
1052 dst[x] = saturate_cast<short>(src[x]);
1058 template<typename T> static void
1059 cpy_( const T* src, size_t sstep, T* dst, size_t dstep, Size size )
1061 sstep /= sizeof(src[0]);
1062 dstep /= sizeof(dst[0]);
1064 for( ; size.height--; src += sstep, dst += dstep )
1065 memcpy(dst, src, size.width*sizeof(src[0]));
1068 #define DEF_CVT_SCALE_ABS_FUNC(suffix, tfunc, stype, dtype, wtype) \
1069 static void cvtScaleAbs##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1070 dtype* dst, size_t dstep, Size size, double* scale) \
1072 tfunc(src, sstep, dst, dstep, size, (wtype)scale[0], (wtype)scale[1]); \
1075 #define DEF_CVT_SCALE_FUNC(suffix, stype, dtype, wtype) \
1076 static void cvtScale##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1077 dtype* dst, size_t dstep, Size size, double* scale) \
1079 cvtScale_(src, sstep, dst, dstep, size, (wtype)scale[0], (wtype)scale[1]); \
1082 #if defined(HAVE_IPP)
1083 #define DEF_CVT_FUNC_F(suffix, stype, dtype, ippFavor) \
1084 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1085 dtype* dst, size_t dstep, Size size, double*) \
1089 if (ippiConvert_##ippFavor(src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height)) >= 0) \
1091 setIppErrorStatus(); \
1093 cvt_(src, sstep, dst, dstep, size); \
1096 #define DEF_CVT_FUNC_F2(suffix, stype, dtype, ippFavor) \
1097 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1098 dtype* dst, size_t dstep, Size size, double*) \
1102 if (ippiConvert_##ippFavor(src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height), ippRndFinancial, 0) >= 0) \
1104 setIppErrorStatus(); \
1106 cvt_(src, sstep, dst, dstep, size); \
1109 #define DEF_CVT_FUNC_F(suffix, stype, dtype, ippFavor) \
1110 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1111 dtype* dst, size_t dstep, Size size, double*) \
1113 cvt_(src, sstep, dst, dstep, size); \
1115 #define DEF_CVT_FUNC_F2 DEF_CVT_FUNC_F
1118 #define DEF_CVT_FUNC(suffix, stype, dtype) \
1119 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1120 dtype* dst, size_t dstep, Size size, double*) \
1122 cvt_(src, sstep, dst, dstep, size); \
1125 #define DEF_CPY_FUNC(suffix, stype) \
1126 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1127 stype* dst, size_t dstep, Size size, double*) \
1129 cpy_(src, sstep, dst, dstep, size); \
1133 DEF_CVT_SCALE_ABS_FUNC(8u, cvtScaleAbs_, uchar, uchar, float)
1134 DEF_CVT_SCALE_ABS_FUNC(8s8u, cvtScaleAbs_, schar, uchar, float)
1135 DEF_CVT_SCALE_ABS_FUNC(16u8u, cvtScaleAbs_, ushort, uchar, float)
1136 DEF_CVT_SCALE_ABS_FUNC(16s8u, cvtScaleAbs_, short, uchar, float)
1137 DEF_CVT_SCALE_ABS_FUNC(32s8u, cvtScaleAbs_, int, uchar, float)
1138 DEF_CVT_SCALE_ABS_FUNC(32f8u, cvtScaleAbs_, float, uchar, float)
1139 DEF_CVT_SCALE_ABS_FUNC(64f8u, cvtScaleAbs_, double, uchar, float)
1141 DEF_CVT_SCALE_FUNC(8u, uchar, uchar, float)
1142 DEF_CVT_SCALE_FUNC(8s8u, schar, uchar, float)
1143 DEF_CVT_SCALE_FUNC(16u8u, ushort, uchar, float)
1144 DEF_CVT_SCALE_FUNC(16s8u, short, uchar, float)
1145 DEF_CVT_SCALE_FUNC(32s8u, int, uchar, float)
1146 DEF_CVT_SCALE_FUNC(32f8u, float, uchar, float)
1147 DEF_CVT_SCALE_FUNC(64f8u, double, uchar, float)
1149 DEF_CVT_SCALE_FUNC(8u8s, uchar, schar, float)
1150 DEF_CVT_SCALE_FUNC(8s, schar, schar, float)
1151 DEF_CVT_SCALE_FUNC(16u8s, ushort, schar, float)
1152 DEF_CVT_SCALE_FUNC(16s8s, short, schar, float)
1153 DEF_CVT_SCALE_FUNC(32s8s, int, schar, float)
1154 DEF_CVT_SCALE_FUNC(32f8s, float, schar, float)
1155 DEF_CVT_SCALE_FUNC(64f8s, double, schar, float)
1157 DEF_CVT_SCALE_FUNC(8u16u, uchar, ushort, float)
1158 DEF_CVT_SCALE_FUNC(8s16u, schar, ushort, float)
1159 DEF_CVT_SCALE_FUNC(16u, ushort, ushort, float)
1160 DEF_CVT_SCALE_FUNC(16s16u, short, ushort, float)
1161 DEF_CVT_SCALE_FUNC(32s16u, int, ushort, float)
1162 DEF_CVT_SCALE_FUNC(32f16u, float, ushort, float)
1163 DEF_CVT_SCALE_FUNC(64f16u, double, ushort, float)
1165 DEF_CVT_SCALE_FUNC(8u16s, uchar, short, float)
1166 DEF_CVT_SCALE_FUNC(8s16s, schar, short, float)
1167 DEF_CVT_SCALE_FUNC(16u16s, ushort, short, float)
1168 DEF_CVT_SCALE_FUNC(16s, short, short, float)
1169 DEF_CVT_SCALE_FUNC(32s16s, int, short, float)
1170 DEF_CVT_SCALE_FUNC(32f16s, float, short, float)
1171 DEF_CVT_SCALE_FUNC(64f16s, double, short, float)
1173 DEF_CVT_SCALE_FUNC(8u32s, uchar, int, float)
1174 DEF_CVT_SCALE_FUNC(8s32s, schar, int, float)
1175 DEF_CVT_SCALE_FUNC(16u32s, ushort, int, float)
1176 DEF_CVT_SCALE_FUNC(16s32s, short, int, float)
1177 DEF_CVT_SCALE_FUNC(32s, int, int, double)
1178 DEF_CVT_SCALE_FUNC(32f32s, float, int, float)
1179 DEF_CVT_SCALE_FUNC(64f32s, double, int, double)
1181 DEF_CVT_SCALE_FUNC(8u32f, uchar, float, float)
1182 DEF_CVT_SCALE_FUNC(8s32f, schar, float, float)
1183 DEF_CVT_SCALE_FUNC(16u32f, ushort, float, float)
1184 DEF_CVT_SCALE_FUNC(16s32f, short, float, float)
1185 DEF_CVT_SCALE_FUNC(32s32f, int, float, double)
1186 DEF_CVT_SCALE_FUNC(32f, float, float, float)
1187 DEF_CVT_SCALE_FUNC(64f32f, double, float, double)
1189 DEF_CVT_SCALE_FUNC(8u64f, uchar, double, double)
1190 DEF_CVT_SCALE_FUNC(8s64f, schar, double, double)
1191 DEF_CVT_SCALE_FUNC(16u64f, ushort, double, double)
1192 DEF_CVT_SCALE_FUNC(16s64f, short, double, double)
1193 DEF_CVT_SCALE_FUNC(32s64f, int, double, double)
1194 DEF_CVT_SCALE_FUNC(32f64f, float, double, double)
1195 DEF_CVT_SCALE_FUNC(64f, double, double, double)
1197 DEF_CPY_FUNC(8u, uchar)
1198 DEF_CVT_FUNC_F(8s8u, schar, uchar, 8s8u_C1Rs)
1199 DEF_CVT_FUNC_F(16u8u, ushort, uchar, 16u8u_C1R)
1200 DEF_CVT_FUNC_F(16s8u, short, uchar, 16s8u_C1R)
1201 DEF_CVT_FUNC_F(32s8u, int, uchar, 32s8u_C1R)
1202 DEF_CVT_FUNC_F2(32f8u, float, uchar, 32f8u_C1RSfs)
1203 DEF_CVT_FUNC(64f8u, double, uchar)
1205 DEF_CVT_FUNC_F2(8u8s, uchar, schar, 8u8s_C1RSfs)
1206 DEF_CVT_FUNC_F2(16u8s, ushort, schar, 16u8s_C1RSfs)
1207 DEF_CVT_FUNC_F2(16s8s, short, schar, 16s8s_C1RSfs)
1208 DEF_CVT_FUNC_F(32s8s, int, schar, 32s8s_C1R)
1209 DEF_CVT_FUNC_F2(32f8s, float, schar, 32f8s_C1RSfs)
1210 DEF_CVT_FUNC(64f8s, double, schar)
1212 DEF_CVT_FUNC_F(8u16u, uchar, ushort, 8u16u_C1R)
1213 DEF_CVT_FUNC_F(8s16u, schar, ushort, 8s16u_C1Rs)
1214 DEF_CPY_FUNC(16u, ushort)
1215 DEF_CVT_FUNC_F(16s16u, short, ushort, 16s16u_C1Rs)
1216 DEF_CVT_FUNC_F2(32s16u, int, ushort, 32s16u_C1RSfs)
1217 DEF_CVT_FUNC_F2(32f16u, float, ushort, 32f16u_C1RSfs)
1218 DEF_CVT_FUNC(64f16u, double, ushort)
1220 DEF_CVT_FUNC_F(8u16s, uchar, short, 8u16s_C1R)
1221 DEF_CVT_FUNC_F(8s16s, schar, short, 8s16s_C1R)
1222 DEF_CVT_FUNC_F2(16u16s, ushort, short, 16u16s_C1RSfs)
1223 DEF_CVT_FUNC_F2(32s16s, int, short, 32s16s_C1RSfs)
1224 DEF_CVT_FUNC_F2(32f16s, float, short, 32f16s_C1RSfs)
1225 DEF_CVT_FUNC(64f16s, double, short)
1227 DEF_CVT_FUNC_F(8u32s, uchar, int, 8u32s_C1R)
1228 DEF_CVT_FUNC_F(8s32s, schar, int, 8s32s_C1R)
1229 DEF_CVT_FUNC_F(16u32s, ushort, int, 16u32s_C1R)
1230 DEF_CVT_FUNC_F(16s32s, short, int, 16s32s_C1R)
1231 DEF_CPY_FUNC(32s, int)
1232 DEF_CVT_FUNC_F2(32f32s, float, int, 32f32s_C1RSfs)
1233 DEF_CVT_FUNC(64f32s, double, int)
1235 DEF_CVT_FUNC_F(8u32f, uchar, float, 8u32f_C1R)
1236 DEF_CVT_FUNC_F(8s32f, schar, float, 8s32f_C1R)
1237 DEF_CVT_FUNC_F(16u32f, ushort, float, 16u32f_C1R)
1238 DEF_CVT_FUNC_F(16s32f, short, float, 16s32f_C1R)
1239 DEF_CVT_FUNC_F(32s32f, int, float, 32s32f_C1R)
1240 DEF_CVT_FUNC(64f32f, double, float)
1242 DEF_CVT_FUNC(8u64f, uchar, double)
1243 DEF_CVT_FUNC(8s64f, schar, double)
1244 DEF_CVT_FUNC(16u64f, ushort, double)
1245 DEF_CVT_FUNC(16s64f, short, double)
1246 DEF_CVT_FUNC(32s64f, int, double)
1247 DEF_CVT_FUNC(32f64f, float, double)
1248 DEF_CPY_FUNC(64s, int64)
1250 static BinaryFunc getCvtScaleAbsFunc(int depth)
1252 static BinaryFunc cvtScaleAbsTab[] =
1254 (BinaryFunc)cvtScaleAbs8u, (BinaryFunc)cvtScaleAbs8s8u, (BinaryFunc)cvtScaleAbs16u8u,
1255 (BinaryFunc)cvtScaleAbs16s8u, (BinaryFunc)cvtScaleAbs32s8u, (BinaryFunc)cvtScaleAbs32f8u,
1256 (BinaryFunc)cvtScaleAbs64f8u, 0
1259 return cvtScaleAbsTab[depth];
1262 BinaryFunc getConvertFunc(int sdepth, int ddepth)
1264 static BinaryFunc cvtTab[][8] =
1267 (BinaryFunc)(cvt8u), (BinaryFunc)GET_OPTIMIZED(cvt8s8u), (BinaryFunc)GET_OPTIMIZED(cvt16u8u),
1268 (BinaryFunc)GET_OPTIMIZED(cvt16s8u), (BinaryFunc)GET_OPTIMIZED(cvt32s8u), (BinaryFunc)GET_OPTIMIZED(cvt32f8u),
1269 (BinaryFunc)GET_OPTIMIZED(cvt64f8u), 0
1272 (BinaryFunc)GET_OPTIMIZED(cvt8u8s), (BinaryFunc)cvt8u, (BinaryFunc)GET_OPTIMIZED(cvt16u8s),
1273 (BinaryFunc)GET_OPTIMIZED(cvt16s8s), (BinaryFunc)GET_OPTIMIZED(cvt32s8s), (BinaryFunc)GET_OPTIMIZED(cvt32f8s),
1274 (BinaryFunc)GET_OPTIMIZED(cvt64f8s), 0
1277 (BinaryFunc)GET_OPTIMIZED(cvt8u16u), (BinaryFunc)GET_OPTIMIZED(cvt8s16u), (BinaryFunc)cvt16u,
1278 (BinaryFunc)GET_OPTIMIZED(cvt16s16u), (BinaryFunc)GET_OPTIMIZED(cvt32s16u), (BinaryFunc)GET_OPTIMIZED(cvt32f16u),
1279 (BinaryFunc)GET_OPTIMIZED(cvt64f16u), 0
1282 (BinaryFunc)GET_OPTIMIZED(cvt8u16s), (BinaryFunc)GET_OPTIMIZED(cvt8s16s), (BinaryFunc)GET_OPTIMIZED(cvt16u16s),
1283 (BinaryFunc)cvt16u, (BinaryFunc)GET_OPTIMIZED(cvt32s16s), (BinaryFunc)GET_OPTIMIZED(cvt32f16s),
1284 (BinaryFunc)GET_OPTIMIZED(cvt64f16s), 0
1287 (BinaryFunc)GET_OPTIMIZED(cvt8u32s), (BinaryFunc)GET_OPTIMIZED(cvt8s32s), (BinaryFunc)GET_OPTIMIZED(cvt16u32s),
1288 (BinaryFunc)GET_OPTIMIZED(cvt16s32s), (BinaryFunc)cvt32s, (BinaryFunc)GET_OPTIMIZED(cvt32f32s),
1289 (BinaryFunc)GET_OPTIMIZED(cvt64f32s), 0
1292 (BinaryFunc)GET_OPTIMIZED(cvt8u32f), (BinaryFunc)GET_OPTIMIZED(cvt8s32f), (BinaryFunc)GET_OPTIMIZED(cvt16u32f),
1293 (BinaryFunc)GET_OPTIMIZED(cvt16s32f), (BinaryFunc)GET_OPTIMIZED(cvt32s32f), (BinaryFunc)cvt32s,
1294 (BinaryFunc)GET_OPTIMIZED(cvt64f32f), 0
1297 (BinaryFunc)GET_OPTIMIZED(cvt8u64f), (BinaryFunc)GET_OPTIMIZED(cvt8s64f), (BinaryFunc)GET_OPTIMIZED(cvt16u64f),
1298 (BinaryFunc)GET_OPTIMIZED(cvt16s64f), (BinaryFunc)GET_OPTIMIZED(cvt32s64f), (BinaryFunc)GET_OPTIMIZED(cvt32f64f),
1299 (BinaryFunc)(cvt64s), 0
1302 0, 0, 0, 0, 0, 0, 0, 0
1306 return cvtTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
1309 static BinaryFunc getConvertScaleFunc(int sdepth, int ddepth)
1311 static BinaryFunc cvtScaleTab[][8] =
1314 (BinaryFunc)GET_OPTIMIZED(cvtScale8u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8u),
1315 (BinaryFunc)GET_OPTIMIZED(cvtScale16s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8u),
1316 (BinaryFunc)cvtScale64f8u, 0
1319 (BinaryFunc)GET_OPTIMIZED(cvtScale8u8s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8s),
1320 (BinaryFunc)GET_OPTIMIZED(cvtScale16s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8s),
1321 (BinaryFunc)cvtScale64f8s, 0
1324 (BinaryFunc)GET_OPTIMIZED(cvtScale8u16u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u),
1325 (BinaryFunc)GET_OPTIMIZED(cvtScale16s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16u),
1326 (BinaryFunc)cvtScale64f16u, 0
1329 (BinaryFunc)GET_OPTIMIZED(cvtScale8u16s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u16s),
1330 (BinaryFunc)GET_OPTIMIZED(cvtScale16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16s),
1331 (BinaryFunc)cvtScale64f16s, 0
1334 (BinaryFunc)GET_OPTIMIZED(cvtScale8u32s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32s),
1335 (BinaryFunc)GET_OPTIMIZED(cvtScale16s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f32s),
1336 (BinaryFunc)cvtScale64f32s, 0
1339 (BinaryFunc)GET_OPTIMIZED(cvtScale8u32f), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32f),
1340 (BinaryFunc)GET_OPTIMIZED(cvtScale16s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32f),
1341 (BinaryFunc)cvtScale64f32f, 0
1344 (BinaryFunc)cvtScale8u64f, (BinaryFunc)cvtScale8s64f, (BinaryFunc)cvtScale16u64f,
1345 (BinaryFunc)cvtScale16s64f, (BinaryFunc)cvtScale32s64f, (BinaryFunc)cvtScale32f64f,
1346 (BinaryFunc)cvtScale64f, 0
1349 0, 0, 0, 0, 0, 0, 0, 0
1353 return cvtScaleTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
1358 static bool ocl_convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
1360 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
1361 kercn = ocl::predictOptimalVectorWidth(_src, _dst);
1362 bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
1364 if (!doubleSupport && depth == CV_64F)
1368 int wdepth = std::max(depth, CV_32F);
1369 ocl::Kernel k("KF", ocl::core::arithm_oclsrc,
1370 format("-D OP_CONVERT_SCALE_ABS -D UNARY_OP -D dstT=%s -D srcT1=%s"
1371 " -D workT=%s -D wdepth=%d -D convertToWT1=%s -D convertToDT=%s -D workT1=%s%s",
1372 ocl::typeToStr(CV_8UC(kercn)),
1373 ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)),
1374 ocl::typeToStr(CV_MAKE_TYPE(wdepth, kercn)), wdepth,
1375 ocl::convertTypeStr(depth, wdepth, kercn, cvt[0]),
1376 ocl::convertTypeStr(wdepth, CV_8U, kercn, cvt[1]),
1377 ocl::typeToStr(wdepth),
1378 doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
1382 UMat src = _src.getUMat();
1383 _dst.create(src.size(), CV_8UC(cn));
1384 UMat dst = _dst.getUMat();
1386 ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
1387 dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);
1389 if (wdepth == CV_32F)
1390 k.args(srcarg, dstarg, (float)alpha, (float)beta);
1391 else if (wdepth == CV_64F)
1392 k.args(srcarg, dstarg, alpha, beta);
1394 size_t globalsize[2] = { src.cols * cn / kercn, src.rows };
1395 return k.run(2, globalsize, NULL, false);
1402 void cv::convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
1404 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
1405 ocl_convertScaleAbs(_src, _dst, alpha, beta))
1407 Mat src = _src.getMat();
1408 int cn = src.channels();
1409 double scale[] = {alpha, beta};
1410 _dst.create( src.dims, src.size, CV_8UC(cn) );
1411 Mat dst = _dst.getMat();
1412 BinaryFunc func = getCvtScaleAbsFunc(src.depth());
1413 CV_Assert( func != 0 );
1417 Size sz = getContinuousSize(src, dst, cn);
1418 func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale );
1422 const Mat* arrays[] = {&src, &dst, 0};
1424 NAryMatIterator it(arrays, ptrs);
1425 Size sz((int)it.size*cn, 1);
1427 for( size_t i = 0; i < it.nplanes; i++, ++it )
1428 func( ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale );
1432 void cv::Mat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const
1434 bool noScale = fabs(alpha-1) < DBL_EPSILON && fabs(beta) < DBL_EPSILON;
1437 _type = _dst.fixedType() ? _dst.type() : type();
1439 _type = CV_MAKETYPE(CV_MAT_DEPTH(_type), channels());
1441 int sdepth = depth(), ddepth = CV_MAT_DEPTH(_type);
1442 if( sdepth == ddepth && noScale )
1450 BinaryFunc func = noScale ? getConvertFunc(sdepth, ddepth) : getConvertScaleFunc(sdepth, ddepth);
1451 double scale[] = {alpha, beta};
1452 int cn = channels();
1453 CV_Assert( func != 0 );
1457 _dst.create( size(), _type );
1458 Mat dst = _dst.getMat();
1459 Size sz = getContinuousSize(src, dst, cn);
1460 func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale );
1464 _dst.create( dims, size, _type );
1465 Mat dst = _dst.getMat();
1466 const Mat* arrays[] = {&src, &dst, 0};
1468 NAryMatIterator it(arrays, ptrs);
1469 Size sz((int)(it.size*cn), 1);
1471 for( size_t i = 0; i < it.nplanes; i++, ++it )
1472 func(ptrs[0], 1, 0, 0, ptrs[1], 1, sz, scale);
1476 /****************************************************************************************\
1478 \****************************************************************************************/
1483 template<typename T> static void
1484 LUT8u_( const uchar* src, const T* lut, T* dst, int len, int cn, int lutcn )
1488 for( int i = 0; i < len*cn; i++ )
1489 dst[i] = lut[src[i]];
1493 for( int i = 0; i < len*cn; i += cn )
1494 for( int k = 0; k < cn; k++ )
1495 dst[i+k] = lut[src[i+k]*cn+k];
1499 static void LUT8u_8u( const uchar* src, const uchar* lut, uchar* dst, int len, int cn, int lutcn )
1501 LUT8u_( src, lut, dst, len, cn, lutcn );
1504 static void LUT8u_8s( const uchar* src, const schar* lut, schar* dst, int len, int cn, int lutcn )
1506 LUT8u_( src, lut, dst, len, cn, lutcn );
1509 static void LUT8u_16u( const uchar* src, const ushort* lut, ushort* dst, int len, int cn, int lutcn )
1511 LUT8u_( src, lut, dst, len, cn, lutcn );
1514 static void LUT8u_16s( const uchar* src, const short* lut, short* dst, int len, int cn, int lutcn )
1516 LUT8u_( src, lut, dst, len, cn, lutcn );
1519 static void LUT8u_32s( const uchar* src, const int* lut, int* dst, int len, int cn, int lutcn )
1521 LUT8u_( src, lut, dst, len, cn, lutcn );
1524 static void LUT8u_32f( const uchar* src, const float* lut, float* dst, int len, int cn, int lutcn )
1526 LUT8u_( src, lut, dst, len, cn, lutcn );
1529 static void LUT8u_64f( const uchar* src, const double* lut, double* dst, int len, int cn, int lutcn )
1531 LUT8u_( src, lut, dst, len, cn, lutcn );
1534 typedef void (*LUTFunc)( const uchar* src, const uchar* lut, uchar* dst, int len, int cn, int lutcn );
1536 static LUTFunc lutTab[] =
1538 (LUTFunc)LUT8u_8u, (LUTFunc)LUT8u_8s, (LUTFunc)LUT8u_16u, (LUTFunc)LUT8u_16s,
1539 (LUTFunc)LUT8u_32s, (LUTFunc)LUT8u_32f, (LUTFunc)LUT8u_64f, 0
1544 static bool ocl_LUT(InputArray _src, InputArray _lut, OutputArray _dst)
1546 int lcn = _lut.channels(), dcn = _src.channels(), ddepth = _lut.depth();
1548 UMat src = _src.getUMat(), lut = _lut.getUMat();
1549 _dst.create(src.size(), CV_MAKETYPE(ddepth, dcn));
1550 UMat dst = _dst.getUMat();
1551 bool bAligned = (1 == dcn) && (0 == (src.offset % 4)) && (0 == (src.cols % 4));
1553 ocl::Kernel k("LUT", ocl::core::lut_oclsrc,
1554 format("-D dcn=%d -D lcn=%d -D srcT=%s -D dstT=%s%s", dcn, lcn,
1555 ocl::typeToStr(src.depth()), ocl::memopTypeToStr(ddepth),
1556 bAligned ? " -D USE_ALIGNED" : ""
1561 k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::ReadOnlyNoSize(lut),
1562 ocl::KernelArg::WriteOnly(dst));
1564 size_t globalSize[2] = { dst.cols, (dst.rows + 3) / 4};
1566 globalSize[0] = (dst.cols + 3) / 4;
1567 return k.run(2, globalSize, NULL, false);
1572 #if defined(HAVE_IPP)
1575 #if 0 // there are no performance benefits (PR #2653)
1576 class IppLUTParallelBody_LUTC1 : public ParallelLoopBody
1584 typedef IppStatus (*IppFn)(const Ipp8u* pSrc, int srcStep, void* pDst, int dstStep,
1585 IppiSize roiSize, const void* pTable, int nBitSize);
1590 IppLUTParallelBody_LUTC1(const Mat& src, const Mat& lut, Mat& dst, bool* _ok)
1591 : ok(_ok), src_(src), lut_(lut), dst_(dst)
1593 width = dst.cols * dst.channels();
1595 size_t elemSize1 = CV_ELEM_SIZE1(dst.depth());
1598 elemSize1 == 1 ? (IppFn)ippiLUTPalette_8u_C1R :
1599 elemSize1 == 4 ? (IppFn)ippiLUTPalette_8u32u_C1R :
1605 void operator()( const cv::Range& range ) const
1610 const int row0 = range.start;
1611 const int row1 = range.end;
1613 Mat src = src_.rowRange(row0, row1);
1614 Mat dst = dst_.rowRange(row0, row1);
1616 IppiSize sz = { width, dst.rows };
1618 CV_DbgAssert(fn != NULL);
1619 if (fn(src.data, (int)src.step[0], dst.data, (int)dst.step[0], sz, lut_.data, 8) < 0)
1621 setIppErrorStatus();
1626 IppLUTParallelBody_LUTC1(const IppLUTParallelBody_LUTC1&);
1627 IppLUTParallelBody_LUTC1& operator=(const IppLUTParallelBody_LUTC1&);
1631 class IppLUTParallelBody_LUTCN : public ParallelLoopBody
1644 IppLUTParallelBody_LUTCN(const Mat& src, const Mat& lut, Mat& dst, bool* _ok)
1645 : ok(_ok), src_(src), lut_(lut), dst_(dst), lutBuffer(NULL)
1647 lutcn = lut.channels();
1648 IppiSize sz256 = {256, 1};
1650 size_t elemSize1 = dst.elemSize1();
1651 CV_DbgAssert(elemSize1 == 1);
1652 lutBuffer = (uchar*)ippMalloc(256 * (int)elemSize1 * 4);
1653 lutTable[0] = lutBuffer + 0;
1654 lutTable[1] = lutBuffer + 1 * 256 * elemSize1;
1655 lutTable[2] = lutBuffer + 2 * 256 * elemSize1;
1656 lutTable[3] = lutBuffer + 3 * 256 * elemSize1;
1658 CV_DbgAssert(lutcn == 3 || lutcn == 4);
1661 IppStatus status = ippiCopy_8u_C3P3R(lut.data, (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
1664 setIppErrorStatus();
1668 else if (lutcn == 4)
1670 IppStatus status = ippiCopy_8u_C4P4R(lut.data, (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
1673 setIppErrorStatus();
1681 ~IppLUTParallelBody_LUTCN()
1683 if (lutBuffer != NULL)
1689 void operator()( const cv::Range& range ) const
1694 const int row0 = range.start;
1695 const int row1 = range.end;
1697 Mat src = src_.rowRange(row0, row1);
1698 Mat dst = dst_.rowRange(row0, row1);
1702 if (ippiLUTPalette_8u_C3R(
1703 src.data, (int)src.step[0], dst.data, (int)dst.step[0],
1704 ippiSize(dst.size()), lutTable, 8) >= 0)
1707 else if (lutcn == 4)
1709 if (ippiLUTPalette_8u_C4R(
1710 src.data, (int)src.step[0], dst.data, (int)dst.step[0],
1711 ippiSize(dst.size()), lutTable, 8) >= 0)
1714 setIppErrorStatus();
1718 IppLUTParallelBody_LUTCN(const IppLUTParallelBody_LUTCN&);
1719 IppLUTParallelBody_LUTCN& operator=(const IppLUTParallelBody_LUTCN&);
1724 class LUTParallelBody : public ParallelLoopBody
1734 LUTParallelBody(const Mat& src, const Mat& lut, Mat& dst, bool* _ok)
1735 : ok(_ok), src_(src), lut_(lut), dst_(dst)
1737 func = lutTab[lut.depth()];
1738 *ok = (func != NULL);
1741 void operator()( const cv::Range& range ) const
1745 const int row0 = range.start;
1746 const int row1 = range.end;
1748 Mat src = src_.rowRange(row0, row1);
1749 Mat dst = dst_.rowRange(row0, row1);
1751 int cn = src.channels();
1752 int lutcn = lut_.channels();
1754 const Mat* arrays[] = {&src, &dst, 0};
1756 NAryMatIterator it(arrays, ptrs);
1757 int len = (int)it.size;
1759 for( size_t i = 0; i < it.nplanes; i++, ++it )
1760 func(ptrs[0], lut_.data, ptrs[1], len, cn, lutcn);
1763 LUTParallelBody(const LUTParallelBody&);
1764 LUTParallelBody& operator=(const LUTParallelBody&);
1769 void cv::LUT( InputArray _src, InputArray _lut, OutputArray _dst )
1771 int cn = _src.channels(), depth = _src.depth();
1772 int lutcn = _lut.channels();
1774 CV_Assert( (lutcn == cn || lutcn == 1) &&
1775 _lut.total() == 256 && _lut.isContinuous() &&
1776 (depth == CV_8U || depth == CV_8S) );
1778 CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
1779 ocl_LUT(_src, _lut, _dst))
1781 Mat src = _src.getMat(), lut = _lut.getMat();
1782 _dst.create(src.dims, src.size, CV_MAKETYPE(_lut.depth(), cn));
1783 Mat dst = _dst.getMat();
1785 if (_src.dims() <= 2)
1788 Ptr<ParallelLoopBody> body;
1789 #if defined(HAVE_IPP)
1790 size_t elemSize1 = CV_ELEM_SIZE1(dst.depth());
1791 #if 0 // there are no performance benefits (PR #2653)
1794 ParallelLoopBody* p = new ipp::IppLUTParallelBody_LUTC1(src, lut, dst, &ok);
1799 if ((lutcn == 3 || lutcn == 4) && elemSize1 == 1)
1801 ParallelLoopBody* p = new ipp::IppLUTParallelBody_LUTCN(src, lut, dst, &ok);
1805 if (body == NULL || ok == false)
1808 ParallelLoopBody* p = new LUTParallelBody(src, lut, dst, &ok);
1811 if (body != NULL && ok)
1813 Range all(0, dst.rows);
1814 if (dst.total()>>18)
1815 parallel_for_(all, *body, (double)std::max((size_t)1, dst.total()>>16));
1823 LUTFunc func = lutTab[lut.depth()];
1824 CV_Assert( func != 0 );
1826 const Mat* arrays[] = {&src, &dst, 0};
1828 NAryMatIterator it(arrays, ptrs);
1829 int len = (int)it.size;
1831 for( size_t i = 0; i < it.nplanes; i++, ++it )
1832 func(ptrs[0], lut.data, ptrs[1], len, cn, lutcn);
1839 static bool ocl_normalize( InputArray _src, OutputArray _dst, InputArray _mask, int rtype,
1840 double scale, double shift )
1842 UMat src = _src.getUMat(), dst = _dst.getUMat();
1845 src.convertTo( dst, rtype, scale, shift );
1849 src.convertTo( temp, rtype, scale, shift );
1850 temp.copyTo( dst, _mask );
1860 void cv::normalize( InputArray _src, OutputArray _dst, double a, double b,
1861 int norm_type, int rtype, InputArray _mask )
1863 double scale = 1, shift = 0;
1864 if( norm_type == CV_MINMAX )
1866 double smin = 0, smax = 0;
1867 double dmin = MIN( a, b ), dmax = MAX( a, b );
1868 minMaxLoc( _src, &smin, &smax, 0, 0, _mask );
1869 scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0);
1870 shift = dmin - smin*scale;
1872 else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
1874 scale = norm( _src, norm_type, _mask );
1875 scale = scale > DBL_EPSILON ? a/scale : 0.;
1879 CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
1881 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
1883 rtype = _dst.fixedType() ? _dst.depth() : depth;
1884 _dst.createSameSize(_src, CV_MAKETYPE(rtype, cn));
1886 CV_OCL_RUN(_dst.isUMat(),
1887 ocl_normalize(_src, _dst, _mask, rtype, scale, shift))
1889 Mat src = _src.getMat(), dst = _dst.getMat();
1891 src.convertTo( dst, rtype, scale, shift );
1895 src.convertTo( temp, rtype, scale, shift );
1896 temp.copyTo( dst, _mask );
1901 cvSplit( const void* srcarr, void* dstarr0, void* dstarr1, void* dstarr2, void* dstarr3 )
1903 void* dptrs[] = { dstarr0, dstarr1, dstarr2, dstarr3 };
1904 cv::Mat src = cv::cvarrToMat(srcarr);
1906 for( i = 0; i < 4; i++ )
1907 nz += dptrs[i] != 0;
1908 CV_Assert( nz > 0 );
1909 std::vector<cv::Mat> dvec(nz);
1910 std::vector<int> pairs(nz*2);
1912 for( i = j = 0; i < 4; i++ )
1916 dvec[j] = cv::cvarrToMat(dptrs[i]);
1917 CV_Assert( dvec[j].size() == src.size() );
1918 CV_Assert( dvec[j].depth() == src.depth() );
1919 CV_Assert( dvec[j].channels() == 1 );
1920 CV_Assert( i < src.channels() );
1926 if( nz == src.channels() )
1927 cv::split( src, dvec );
1930 cv::mixChannels( &src, 1, &dvec[0], nz, &pairs[0], nz );
1936 cvMerge( const void* srcarr0, const void* srcarr1, const void* srcarr2,
1937 const void* srcarr3, void* dstarr )
1939 const void* sptrs[] = { srcarr0, srcarr1, srcarr2, srcarr3 };
1940 cv::Mat dst = cv::cvarrToMat(dstarr);
1942 for( i = 0; i < 4; i++ )
1943 nz += sptrs[i] != 0;
1944 CV_Assert( nz > 0 );
1945 std::vector<cv::Mat> svec(nz);
1946 std::vector<int> pairs(nz*2);
1948 for( i = j = 0; i < 4; i++ )
1952 svec[j] = cv::cvarrToMat(sptrs[i]);
1953 CV_Assert( svec[j].size == dst.size &&
1954 svec[j].depth() == dst.depth() &&
1955 svec[j].channels() == 1 && i < dst.channels() );
1962 if( nz == dst.channels() )
1963 cv::merge( svec, dst );
1966 cv::mixChannels( &svec[0], nz, &dst, 1, &pairs[0], nz );
1972 cvMixChannels( const CvArr** src, int src_count,
1973 CvArr** dst, int dst_count,
1974 const int* from_to, int pair_count )
1976 cv::AutoBuffer<cv::Mat> buf(src_count + dst_count);
1979 for( i = 0; i < src_count; i++ )
1980 buf[i] = cv::cvarrToMat(src[i]);
1981 for( i = 0; i < dst_count; i++ )
1982 buf[i+src_count] = cv::cvarrToMat(dst[i]);
1983 cv::mixChannels(&buf[0], src_count, &buf[src_count], dst_count, from_to, pair_count);
1987 cvConvertScaleAbs( const void* srcarr, void* dstarr,
1988 double scale, double shift )
1990 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1991 CV_Assert( src.size == dst.size && dst.type() == CV_8UC(src.channels()));
1992 cv::convertScaleAbs( src, dst, scale, shift );
1996 cvConvertScale( const void* srcarr, void* dstarr,
1997 double scale, double shift )
1999 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
2001 CV_Assert( src.size == dst.size && src.channels() == dst.channels() );
2002 src.convertTo(dst, dst.type(), scale, shift);
2005 CV_IMPL void cvLUT( const void* srcarr, void* dstarr, const void* lutarr )
2007 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), lut = cv::cvarrToMat(lutarr);
2009 CV_Assert( dst.size() == src.size() && dst.type() == CV_MAKETYPE(lut.depth(), src.channels()) );
2010 cv::LUT( src, lut, dst );
2013 CV_IMPL void cvNormalize( const CvArr* srcarr, CvArr* dstarr,
2014 double a, double b, int norm_type, const CvArr* maskarr )
2016 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
2018 mask = cv::cvarrToMat(maskarr);
2019 CV_Assert( dst.size() == src.size() && src.channels() == dst.channels() );
2020 cv::normalize( src, dst, a, b, norm_type, dst.type(), mask );