1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // For Open Source Computer Vision Library
12 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
13 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of the copyright holders may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
42 /* ////////////////////////////////////////////////////////////////////
44 // Mat basic operations: Copy, Set
48 #include "precomp.hpp"
49 #include "opencl_kernels_core.hpp"
54 template<typename T> static void
55 copyMask_(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
57 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
59 const T* src = (const T*)_src;
62 #if CV_ENABLE_UNROLLED
63 for( ; x <= size.width - 4; x += 4 )
75 for( ; x < size.width; x++ )
82 copyMask_<uchar>(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
85 if (ippiCopy_8u_C1MR(_src, (int)sstep, _dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0)
90 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
92 const uchar* src = (const uchar*)_src;
93 uchar* dst = (uchar*)_dst;
98 __m128i zero = _mm_setzero_si128 ();
100 for( ; x <= size.width - 16; x += 16 )
102 const __m128i rSrc = _mm_lddqu_si128((const __m128i*)(src+x));
103 __m128i _mask = _mm_lddqu_si128((const __m128i*)(mask+x));
104 __m128i rDst = _mm_lddqu_si128((__m128i*)(dst+x));
105 __m128i _negMask = _mm_cmpeq_epi8(_mask, zero);
106 rDst = _mm_blendv_epi8(rSrc, rDst, _negMask);
107 _mm_storeu_si128((__m128i*)(dst + x), rDst);
111 for( ; x < size.width; x++ )
118 copyMask_<ushort>(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
121 if (ippiCopy_16u_C1MR((const Ipp16u *)_src, (int)sstep, (Ipp16u *)_dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0)
126 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
128 const ushort* src = (const ushort*)_src;
129 ushort* dst = (ushort*)_dst;
134 __m128i zero = _mm_setzero_si128 ();
135 for( ; x <= size.width - 8; x += 8 )
137 const __m128i rSrc =_mm_lddqu_si128((const __m128i*)(src+x));
138 __m128i _mask = _mm_loadl_epi64((const __m128i*)(mask+x));
139 _mask = _mm_unpacklo_epi8(_mask, _mask);
140 __m128i rDst = _mm_lddqu_si128((const __m128i*)(dst+x));
141 __m128i _negMask = _mm_cmpeq_epi8(_mask, zero);
142 rDst = _mm_blendv_epi8(rSrc, rDst, _negMask);
143 _mm_storeu_si128((__m128i*)(dst + x), rDst);
147 for( ; x < size.width; x++ )
154 copyMaskGeneric(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size, void* _esz)
156 size_t k, esz = *(size_t*)_esz;
157 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
159 const uchar* src = _src;
162 for( ; x < size.width; x++, src += esz, dst += esz )
166 for( k = 0; k < esz; k++ )
173 #define DEF_COPY_MASK(suffix, type) \
174 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
175 uchar* dst, size_t dstep, Size size, void*) \
177 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
181 #define DEF_COPY_MASK_F(suffix, type, ippfavor, ipptype) \
182 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
183 uchar* dst, size_t dstep, Size size, void*) \
185 if (ippiCopy_##ippfavor((const ipptype *)src, (int)sstep, (ipptype *)dst, (int)dstep, ippiSize(size), (const Ipp8u *)mask, (int)mstep) >= 0) \
187 setIppErrorStatus(); \
188 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
191 #define DEF_COPY_MASK_F(suffix, type, ippfavor, ipptype) \
192 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
193 uchar* dst, size_t dstep, Size size, void*) \
195 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
200 DEF_COPY_MASK(8u, uchar)
201 DEF_COPY_MASK(16u, ushort)
202 DEF_COPY_MASK_F(8uC3, Vec3b, 8u_C3MR, Ipp8u)
203 DEF_COPY_MASK_F(32s, int, 32s_C1MR, Ipp32s)
204 DEF_COPY_MASK_F(16uC3, Vec3s, 16u_C3MR, Ipp16u)
205 DEF_COPY_MASK(32sC2, Vec2i)
206 DEF_COPY_MASK_F(32sC3, Vec3i, 32s_C3MR, Ipp32s)
207 DEF_COPY_MASK_F(32sC4, Vec4i, 32s_C4MR, Ipp32s)
208 DEF_COPY_MASK(32sC6, Vec6i)
209 DEF_COPY_MASK(32sC8, Vec8i)
211 BinaryFunc copyMaskTab[] =
232 BinaryFunc getCopyMaskFunc(size_t esz)
234 return esz <= 32 && copyMaskTab[esz] ? copyMaskTab[esz] : copyMaskGeneric;
238 void Mat::copyTo( OutputArray _dst ) const
240 int dtype = _dst.type();
241 if( _dst.fixedType() && dtype != type() )
243 CV_Assert( channels() == CV_MAT_CN(dtype) );
244 convertTo( _dst, dtype );
256 _dst.create( dims, size.p, type() );
257 UMat dst = _dst.getUMat();
259 size_t i, sz[CV_MAX_DIM], dstofs[CV_MAX_DIM], esz = elemSize();
260 for( i = 0; i < (size_t)dims; i++ )
263 dst.ndoffset(dstofs);
264 dstofs[dims-1] *= esz;
265 dst.u->currAllocator->upload(dst.u, data, dims, sz, dstofs, dst.step.p, step.p);
271 _dst.create( rows, cols, type() );
272 Mat dst = _dst.getMat();
273 if( data == dst.data )
276 if( rows > 0 && cols > 0 )
278 const uchar* sptr = data;
279 uchar* dptr = dst.data;
281 Size sz = getContinuousSize(*this, dst);
282 size_t len = sz.width*elemSize();
285 if (ippiCopy_8u_C1R(sptr, (int)step, dptr, (int)dst.step, ippiSize((int)len, sz.height)) >= 0)
290 for( ; sz.height--; sptr += step, dptr += dst.step )
291 memcpy( dptr, sptr, len );
296 _dst.create( dims, size, type() );
297 Mat dst = _dst.getMat();
298 if( data == dst.data )
303 const Mat* arrays[] = { this, &dst };
305 NAryMatIterator it(arrays, ptrs, 2);
306 size_t sz = it.size*elemSize();
308 for( size_t i = 0; i < it.nplanes; i++, ++it )
309 memcpy(ptrs[1], ptrs[0], sz);
313 void Mat::copyTo( OutputArray _dst, InputArray _mask ) const
315 Mat mask = _mask.getMat();
322 int cn = channels(), mcn = mask.channels();
323 CV_Assert( mask.depth() == CV_8U && (mcn == 1 || mcn == cn) );
324 bool colorMask = mcn > 1;
326 size_t esz = colorMask ? elemSize1() : elemSize();
327 BinaryFunc copymask = getCopyMaskFunc(esz);
329 uchar* data0 = _dst.getMat().data;
330 _dst.create( dims, size, type() );
331 Mat dst = _dst.getMat();
333 if( dst.data != data0 ) // do not leave dst uninitialized
338 CV_Assert( size() == mask.size() );
339 Size sz = getContinuousSize(*this, dst, mask, mcn);
340 copymask(data, step, mask.data, mask.step, dst.data, dst.step, sz, &esz);
344 const Mat* arrays[] = { this, &dst, &mask, 0 };
346 NAryMatIterator it(arrays, ptrs);
347 Size sz((int)(it.size*mcn), 1);
349 for( size_t i = 0; i < it.nplanes; i++, ++it )
350 copymask(ptrs[0], 0, ptrs[2], 0, ptrs[1], 0, sz, &esz);
353 Mat& Mat::operator = (const Scalar& s)
355 const Mat* arrays[] = { this };
357 NAryMatIterator it(arrays, &dptr, 1);
358 size_t elsize = it.size*elemSize();
359 const int64* is = (const int64*)&s.val[0];
361 if( is[0] == 0 && is[1] == 0 && is[2] == 0 && is[3] == 0 )
363 #if defined HAVE_IPP && !defined HAVE_IPP_ICV_ONLY && 0
364 if (dims <= 2 || isContinuous())
366 IppiSize roisize = { cols, rows };
369 roisize.width = (int)total();
372 if (ippsZero_8u(data, static_cast<int>(roisize.width * elemSize())) >= 0)
376 roisize.width *= (int)elemSize();
378 if (ippiSet_8u_C1R(0, data, (int)step, roisize) >= 0)
384 for( size_t i = 0; i < it.nplanes; i++, ++it )
385 memset( dptr, 0, elsize );
392 scalarToRawData(s, scalar, type(), 12);
393 size_t blockSize = 12*elemSize1();
395 for( size_t j = 0; j < elsize; j += blockSize )
397 size_t sz = MIN(blockSize, elsize - j);
398 memcpy( dptr + j, scalar, sz );
402 for( size_t i = 1; i < it.nplanes; i++ )
405 memcpy( dptr, data, elsize );
412 Mat& Mat::setTo(InputArray _value, InputArray _mask)
417 Mat value = _value.getMat(), mask = _mask.getMat();
419 CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::MAT ));
420 CV_Assert( mask.empty() || (mask.type() == CV_8U && size == mask.size) );
423 int cn = channels(), depth0 = depth();
425 if (!mask.empty() && (dims <= 2 || (isContinuous() && mask.isContinuous())) &&
426 (/*depth0 == CV_8U ||*/ depth0 == CV_16U || depth0 == CV_16S || depth0 == CV_32S || depth0 == CV_32F) &&
427 (cn == 1 || cn == 3 || cn == 4))
431 convertAndUnrollScalar( value, type(), _buf, 1 );
433 IppStatus status = (IppStatus)-1;
434 IppiSize roisize = { cols, rows };
435 int mstep = (int)mask.step[0], dstep = (int)step[0];
437 if (isContinuous() && mask.isContinuous())
439 roisize.width = (int)total();
445 /*if (depth0 == CV_8U)
446 status = ippiSet_8u_C1MR(*(Ipp8u *)buf, (Ipp8u *)data, dstep, roisize, mask.data, mstep);
447 else*/ if (depth0 == CV_16U)
448 status = ippiSet_16u_C1MR(*(Ipp16u *)buf, (Ipp16u *)data, dstep, roisize, mask.data, mstep);
449 else if (depth0 == CV_16S)
450 status = ippiSet_16s_C1MR(*(Ipp16s *)buf, (Ipp16s *)data, dstep, roisize, mask.data, mstep);
451 else if (depth0 == CV_32S)
452 status = ippiSet_32s_C1MR(*(Ipp32s *)buf, (Ipp32s *)data, dstep, roisize, mask.data, mstep);
453 else if (depth0 == CV_32F)
454 status = ippiSet_32f_C1MR(*(Ipp32f *)buf, (Ipp32f *)data, dstep, roisize, mask.data, mstep);
456 else if (cn == 3 || cn == 4)
458 #define IPP_SET(ippfavor, ippcn) \
461 typedef Ipp##ippfavor ipptype; \
462 ipptype ippvalue[4] = { ((ipptype *)buf)[0], ((ipptype *)buf)[1], ((ipptype *)buf)[2], ((ipptype *)buf)[3] }; \
463 status = ippiSet_##ippfavor##_C##ippcn##MR(ippvalue, (ipptype *)data, dstep, roisize, mask.data, mstep); \
466 #define IPP_SET_CN(ippcn) \
471 /*if (depth0 == CV_8U) \
472 IPP_SET(8u, ippcn); \
473 else*/ if (depth0 == CV_16U) \
474 IPP_SET(16u, ippcn); \
475 else if (depth0 == CV_16S) \
476 IPP_SET(16s, ippcn); \
477 else if (depth0 == CV_32S) \
478 IPP_SET(32s, ippcn); \
479 else if (depth0 == CV_32F) \
480 IPP_SET(32f, ippcn); \
497 size_t esz = elemSize();
498 BinaryFunc copymask = getCopyMaskFunc(esz);
500 const Mat* arrays[] = { this, !mask.empty() ? &mask : 0, 0 };
501 uchar* ptrs[2]={0,0};
502 NAryMatIterator it(arrays, ptrs);
503 int totalsz = (int)it.size, blockSize0 = std::min(totalsz, (int)((BLOCK_SIZE + esz-1)/esz));
504 AutoBuffer<uchar> _scbuf(blockSize0*esz + 32);
505 uchar* scbuf = alignPtr((uchar*)_scbuf, (int)sizeof(double));
506 convertAndUnrollScalar( value, type(), scbuf, blockSize0 );
508 for( size_t i = 0; i < it.nplanes; i++, ++it )
510 for( int j = 0; j < totalsz; j += blockSize0 )
512 Size sz(std::min(blockSize0, totalsz - j), 1);
513 size_t blockSize = sz.width*esz;
516 copymask(scbuf, 0, ptrs[1], 0, ptrs[0], 0, sz, &esz);
520 memcpy(ptrs[0], scbuf, blockSize);
521 ptrs[0] += blockSize;
529 flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size, size_t esz )
531 int i, j, limit = (int)(((size.width + 1)/2)*esz);
532 AutoBuffer<int> _tab(size.width*esz);
535 for( i = 0; i < size.width; i++ )
536 for( size_t k = 0; k < esz; k++ )
537 tab[i*esz + k] = (int)((size.width - i - 1)*esz + k);
539 for( ; size.height--; src += sstep, dst += dstep )
541 for( i = 0; i < limit; i++ )
544 uchar t0 = src[i], t1 = src[j];
545 dst[i] = t1; dst[j] = t0;
551 flipVert( const uchar* src0, size_t sstep, uchar* dst0, size_t dstep, Size size, size_t esz )
553 const uchar* src1 = src0 + (size.height - 1)*sstep;
554 uchar* dst1 = dst0 + (size.height - 1)*dstep;
555 size.width *= (int)esz;
557 for( int y = 0; y < (size.height + 1)/2; y++, src0 += sstep, src1 -= sstep,
558 dst0 += dstep, dst1 -= dstep )
561 if( ((size_t)src0|(size_t)dst0|(size_t)src1|(size_t)dst1) % sizeof(int) == 0 )
563 for( ; i <= size.width - 16; i += 16 )
565 int t0 = ((int*)(src0 + i))[0];
566 int t1 = ((int*)(src1 + i))[0];
568 ((int*)(dst0 + i))[0] = t1;
569 ((int*)(dst1 + i))[0] = t0;
571 t0 = ((int*)(src0 + i))[1];
572 t1 = ((int*)(src1 + i))[1];
574 ((int*)(dst0 + i))[1] = t1;
575 ((int*)(dst1 + i))[1] = t0;
577 t0 = ((int*)(src0 + i))[2];
578 t1 = ((int*)(src1 + i))[2];
580 ((int*)(dst0 + i))[2] = t1;
581 ((int*)(dst1 + i))[2] = t0;
583 t0 = ((int*)(src0 + i))[3];
584 t1 = ((int*)(src1 + i))[3];
586 ((int*)(dst0 + i))[3] = t1;
587 ((int*)(dst1 + i))[3] = t0;
590 for( ; i <= size.width - 4; i += 4 )
592 int t0 = ((int*)(src0 + i))[0];
593 int t1 = ((int*)(src1 + i))[0];
595 ((int*)(dst0 + i))[0] = t1;
596 ((int*)(dst1 + i))[0] = t0;
600 for( ; i < size.width; i++ )
613 enum { FLIP_COLS = 1 << 0, FLIP_ROWS = 1 << 1, FLIP_BOTH = FLIP_ROWS | FLIP_COLS };
615 static bool ocl_flip(InputArray _src, OutputArray _dst, int flipCode )
617 CV_Assert(flipCode >= -1 && flipCode <= 1);
619 const ocl::Device & dev = ocl::Device::getDefault();
620 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
621 flipType, kercn = std::min(ocl::predictOptimalVectorWidth(_src, _dst), 4);
623 bool doubleSupport = dev.doubleFPConfig() > 0;
624 if (!doubleSupport && depth == CV_64F)
630 const char * kernelName;
632 kernelName = "arithm_flip_rows", flipType = FLIP_ROWS;
633 else if (flipCode > 0)
634 kernelName = "arithm_flip_cols", flipType = FLIP_COLS;
636 kernelName = "arithm_flip_rows_cols", flipType = FLIP_BOTH;
638 int pxPerWIy = (dev.isIntel() && (dev.type() & ocl::Device::TYPE_GPU)) ? 4 : 1;
639 kercn = (cn!=3 || flipType == FLIP_ROWS) ? std::max(kercn, cn) : cn;
641 ocl::Kernel k(kernelName, ocl::core::flip_oclsrc,
642 format( "-D T=%s -D T1=%s -D cn=%d -D PIX_PER_WI_Y=%d -D kercn=%d",
643 kercn != cn ? ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)) : ocl::memopTypeToStr(CV_MAKE_TYPE(depth, kercn)),
644 kercn != cn ? ocl::typeToStr(depth) : ocl::memopTypeToStr(depth), cn, pxPerWIy, kercn));
648 Size size = _src.size();
649 _dst.create(size, type);
650 UMat src = _src.getUMat(), dst = _dst.getUMat();
652 int cols = size.width * cn / kercn, rows = size.height;
653 cols = flipType == FLIP_COLS ? (cols + 1) >> 1 : cols;
654 rows = flipType & FLIP_ROWS ? (rows + 1) >> 1 : rows;
656 k.args(ocl::KernelArg::ReadOnlyNoSize(src),
657 ocl::KernelArg::WriteOnly(dst, cn, kercn), rows, cols);
659 size_t maxWorkGroupSize = dev.maxWorkGroupSize();
660 CV_Assert(maxWorkGroupSize % 4 == 0);
662 size_t globalsize[2] = { cols, (rows + pxPerWIy - 1) / pxPerWIy },
663 localsize[2] = { maxWorkGroupSize / 4, 4 };
664 return k.run(2, globalsize, (flipType == FLIP_COLS) && !dev.isIntel() ? localsize : NULL, false);
669 void flip( InputArray _src, OutputArray _dst, int flip_mode )
671 CV_Assert( _src.dims() <= 2 );
672 Size size = _src.size();
678 if (size.height == 1)
682 if ((size.width == 1 && flip_mode > 0) ||
683 (size.height == 1 && flip_mode == 0) ||
684 (size.height == 1 && size.width == 1 && flip_mode < 0))
686 return _src.copyTo(_dst);
689 CV_OCL_RUN( _dst.isUMat(), ocl_flip(_src, _dst, flip_mode))
691 Mat src = _src.getMat();
692 int type = src.type();
693 _dst.create( size, type );
694 Mat dst = _dst.getMat();
695 size_t esz = CV_ELEM_SIZE(type);
698 typedef IppStatus (CV_STDCALL * ippiMirror)(const void * pSrc, int srcStep, void * pDst, int dstStep, IppiSize roiSize, IppiAxis flip);
699 typedef IppStatus (CV_STDCALL * ippiMirrorI)(const void * pSrcDst, int srcDstStep, IppiSize roiSize, IppiAxis flip);
700 ippiMirror ippFunc = 0;
701 ippiMirrorI ippFuncI = 0;
703 if (src.data == dst.data)
705 CV_SUPPRESS_DEPRECATED_START
707 type == CV_8UC1 ? (ippiMirrorI)ippiMirror_8u_C1IR :
708 type == CV_8UC3 ? (ippiMirrorI)ippiMirror_8u_C3IR :
709 type == CV_8UC4 ? (ippiMirrorI)ippiMirror_8u_C4IR :
710 type == CV_16UC1 ? (ippiMirrorI)ippiMirror_16u_C1IR :
711 type == CV_16UC3 ? (ippiMirrorI)ippiMirror_16u_C3IR :
712 type == CV_16UC4 ? (ippiMirrorI)ippiMirror_16u_C4IR :
713 type == CV_16SC1 ? (ippiMirrorI)ippiMirror_16s_C1IR :
714 type == CV_16SC3 ? (ippiMirrorI)ippiMirror_16s_C3IR :
715 type == CV_16SC4 ? (ippiMirrorI)ippiMirror_16s_C4IR :
716 type == CV_32SC1 ? (ippiMirrorI)ippiMirror_32s_C1IR :
717 type == CV_32SC3 ? (ippiMirrorI)ippiMirror_32s_C3IR :
718 type == CV_32SC4 ? (ippiMirrorI)ippiMirror_32s_C4IR :
719 type == CV_32FC1 ? (ippiMirrorI)ippiMirror_32f_C1IR :
720 type == CV_32FC3 ? (ippiMirrorI)ippiMirror_32f_C3IR :
721 type == CV_32FC4 ? (ippiMirrorI)ippiMirror_32f_C4IR : 0;
722 CV_SUPPRESS_DEPRECATED_END
727 type == CV_8UC1 ? (ippiMirror)ippiMirror_8u_C1R :
728 type == CV_8UC3 ? (ippiMirror)ippiMirror_8u_C3R :
729 type == CV_8UC4 ? (ippiMirror)ippiMirror_8u_C4R :
730 type == CV_16UC1 ? (ippiMirror)ippiMirror_16u_C1R :
731 type == CV_16UC3 ? (ippiMirror)ippiMirror_16u_C3R :
732 type == CV_16UC4 ? (ippiMirror)ippiMirror_16u_C4R :
733 type == CV_16SC1 ? (ippiMirror)ippiMirror_16s_C1R :
734 type == CV_16SC3 ? (ippiMirror)ippiMirror_16s_C3R :
735 type == CV_16SC4 ? (ippiMirror)ippiMirror_16s_C4R :
736 type == CV_32SC1 ? (ippiMirror)ippiMirror_32s_C1R :
737 type == CV_32SC3 ? (ippiMirror)ippiMirror_32s_C3R :
738 type == CV_32SC4 ? (ippiMirror)ippiMirror_32s_C4R :
739 type == CV_32FC1 ? (ippiMirror)ippiMirror_32f_C1R :
740 type == CV_32FC3 ? (ippiMirror)ippiMirror_32f_C3R :
741 type == CV_32FC4 ? (ippiMirror)ippiMirror_32f_C4R : 0;
743 IppiAxis axis = flip_mode == 0 ? ippAxsHorizontal :
744 flip_mode > 0 ? ippAxsVertical : ippAxsBoth;
745 IppiSize roisize = { dst.cols, dst.rows };
749 if (ippFunc(src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, ippiSize(src.cols, src.rows), axis) >= 0)
753 else if (ippFuncI != 0)
755 if (ippFuncI(dst.ptr(), (int)dst.step, roisize, axis) >= 0)
762 flipVert( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
764 flipHoriz( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
767 flipHoriz( dst.ptr(), dst.step, dst.ptr(), dst.step, dst.size(), esz );
770 #if defined HAVE_OPENCL && !defined __APPLE__
772 static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
774 if (ny == 1 && nx == 1)
780 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
781 rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1,
782 kercn = std::min(ocl::predictOptimalVectorWidth(_src, _dst), 4);
784 ocl::Kernel k("repeat", ocl::core::repeat_oclsrc,
785 format("-D T=%s -D nx=%d -D ny=%d -D rowsPerWI=%d -D cn=%d",
786 ocl::memopTypeToStr(CV_MAKE_TYPE(depth, kercn)),
787 nx, ny, rowsPerWI, kercn));
791 UMat src = _src.getUMat(), dst = _dst.getUMat();
792 k.args(ocl::KernelArg::ReadOnly(src, cn, kercn), ocl::KernelArg::WriteOnlyNoSize(dst));
794 size_t globalsize[] = { src.cols * cn / kercn, (src.rows + rowsPerWI - 1) / rowsPerWI };
795 return k.run(2, globalsize, NULL, false);
800 void repeat(InputArray _src, int ny, int nx, OutputArray _dst)
802 CV_Assert( _src.dims() <= 2 );
803 CV_Assert( ny > 0 && nx > 0 );
805 Size ssize = _src.size();
806 _dst.create(ssize.height*ny, ssize.width*nx, _src.type());
808 #if !defined __APPLE__
809 CV_OCL_RUN(_dst.isUMat(),
810 ocl_repeat(_src, ny, nx, _dst))
813 Mat src = _src.getMat(), dst = _dst.getMat();
814 Size dsize = dst.size();
815 int esz = (int)src.elemSize();
817 ssize.width *= esz; dsize.width *= esz;
819 for( y = 0; y < ssize.height; y++ )
821 for( x = 0; x < dsize.width; x += ssize.width )
822 memcpy( dst.ptr(y) + x, src.ptr(y), ssize.width );
825 for( ; y < dsize.height; y++ )
826 memcpy( dst.ptr(y), dst.ptr(y - ssize.height), dsize.width );
829 Mat repeat(const Mat& src, int ny, int nx)
831 if( nx == 1 && ny == 1 )
834 repeat(src, ny, nx, dst);
843 Various border types, image boundaries are denoted with '|'
845 * BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
846 * BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
847 * BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
848 * BORDER_WRAP: cdefgh|abcdefgh|abcdefg
849 * BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
851 int cv::borderInterpolate( int p, int len, int borderType )
853 if( (unsigned)p < (unsigned)len )
855 else if( borderType == BORDER_REPLICATE )
856 p = p < 0 ? 0 : len - 1;
857 else if( borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101 )
859 int delta = borderType == BORDER_REFLECT_101;
867 p = len - 1 - (p - len) - delta;
869 while( (unsigned)p >= (unsigned)len );
871 else if( borderType == BORDER_WRAP )
875 p -= ((p-len+1)/len)*len;
879 else if( borderType == BORDER_CONSTANT )
882 CV_Error( CV_StsBadArg, "Unknown/unsupported border type" );
889 void copyMakeBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
890 uchar* dst, size_t dststep, cv::Size dstroi,
891 int top, int left, int cn, int borderType )
893 const int isz = (int)sizeof(int);
894 int i, j, k, elemSize = 1;
895 bool intMode = false;
897 if( (cn | srcstep | dststep | (size_t)src | (size_t)dst) % isz == 0 )
904 cv::AutoBuffer<int> _tab((dstroi.width - srcroi.width)*cn);
906 int right = dstroi.width - srcroi.width - left;
907 int bottom = dstroi.height - srcroi.height - top;
909 for( i = 0; i < left; i++ )
911 j = cv::borderInterpolate(i - left, srcroi.width, borderType)*cn;
912 for( k = 0; k < cn; k++ )
913 tab[i*cn + k] = j + k;
916 for( i = 0; i < right; i++ )
918 j = cv::borderInterpolate(srcroi.width + i, srcroi.width, borderType)*cn;
919 for( k = 0; k < cn; k++ )
920 tab[(i+left)*cn + k] = j + k;
928 uchar* dstInner = dst + dststep*top + left*elemSize;
930 for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
932 if( dstInner != src )
933 memcpy(dstInner, src, srcroi.width*elemSize);
937 const int* isrc = (int*)src;
938 int* idstInner = (int*)dstInner;
939 for( j = 0; j < left; j++ )
940 idstInner[j - left] = isrc[tab[j]];
941 for( j = 0; j < right; j++ )
942 idstInner[j + srcroi.width] = isrc[tab[j + left]];
946 for( j = 0; j < left; j++ )
947 dstInner[j - left] = src[tab[j]];
948 for( j = 0; j < right; j++ )
949 dstInner[j + srcroi.width] = src[tab[j + left]];
953 dstroi.width *= elemSize;
956 for( i = 0; i < top; i++ )
958 j = cv::borderInterpolate(i - top, srcroi.height, borderType);
959 memcpy(dst + (i - top)*dststep, dst + j*dststep, dstroi.width);
962 for( i = 0; i < bottom; i++ )
964 j = cv::borderInterpolate(i + srcroi.height, srcroi.height, borderType);
965 memcpy(dst + (i + srcroi.height)*dststep, dst + j*dststep, dstroi.width);
970 void copyMakeConstBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
971 uchar* dst, size_t dststep, cv::Size dstroi,
972 int top, int left, int cn, const uchar* value )
975 cv::AutoBuffer<uchar> _constBuf(dstroi.width*cn);
976 uchar* constBuf = _constBuf;
977 int right = dstroi.width - srcroi.width - left;
978 int bottom = dstroi.height - srcroi.height - top;
980 for( i = 0; i < dstroi.width; i++ )
982 for( j = 0; j < cn; j++ )
983 constBuf[i*cn + j] = value[j];
991 uchar* dstInner = dst + dststep*top + left;
993 for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
995 if( dstInner != src )
996 memcpy( dstInner, src, srcroi.width );
997 memcpy( dstInner - left, constBuf, left );
998 memcpy( dstInner + srcroi.width, constBuf, right );
1003 for( i = 0; i < top; i++ )
1004 memcpy(dst + (i - top)*dststep, constBuf, dstroi.width);
1006 for( i = 0; i < bottom; i++ )
1007 memcpy(dst + (i + srcroi.height)*dststep, constBuf, dstroi.width);
1016 static bool ocl_copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
1017 int left, int right, int borderType, const Scalar& value )
1019 int type = _src.type(), cn = CV_MAT_CN(type), depth = CV_MAT_DEPTH(type),
1020 rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
1021 bool isolated = (borderType & BORDER_ISOLATED) != 0;
1022 borderType &= ~cv::BORDER_ISOLATED;
1024 if ( !(borderType == BORDER_CONSTANT || borderType == BORDER_REPLICATE || borderType == BORDER_REFLECT ||
1025 borderType == BORDER_WRAP || borderType == BORDER_REFLECT_101) ||
1029 const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP", "BORDER_REFLECT_101" };
1030 int scalarcn = cn == 3 ? 4 : cn;
1031 int sctype = CV_MAKETYPE(depth, scalarcn);
1032 String buildOptions = format("-D T=%s -D %s -D T1=%s -D cn=%d -D ST=%s -D rowsPerWI=%d",
1033 ocl::memopTypeToStr(type), borderMap[borderType],
1034 ocl::memopTypeToStr(depth), cn,
1035 ocl::memopTypeToStr(sctype), rowsPerWI);
1037 ocl::Kernel k("copyMakeBorder", ocl::core::copymakeborder_oclsrc, buildOptions);
1041 UMat src = _src.getUMat();
1042 if( src.isSubmatrix() && !isolated )
1046 src.locateROI(wholeSize, ofs);
1047 int dtop = std::min(ofs.y, top);
1048 int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
1049 int dleft = std::min(ofs.x, left);
1050 int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
1051 src.adjustROI(dtop, dbottom, dleft, dright);
1058 _dst.create(src.rows + top + bottom, src.cols + left + right, type);
1059 UMat dst = _dst.getUMat();
1061 if (top == 0 && left == 0 && bottom == 0 && right == 0)
1063 if(src.u != dst.u || src.step != dst.step)
1068 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
1069 top, left, ocl::KernelArg::Constant(Mat(1, 1, sctype, value)));
1071 size_t globalsize[2] = { dst.cols, (dst.rows + rowsPerWI - 1) / rowsPerWI };
1072 return k.run(2, globalsize, NULL, false);
1079 void cv::copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
1080 int left, int right, int borderType, const Scalar& value )
1082 CV_Assert( top >= 0 && bottom >= 0 && left >= 0 && right >= 0 );
1084 CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
1085 ocl_copyMakeBorder(_src, _dst, top, bottom, left, right, borderType, value))
1087 Mat src = _src.getMat();
1088 int type = src.type();
1090 if( src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0 )
1094 src.locateROI(wholeSize, ofs);
1095 int dtop = std::min(ofs.y, top);
1096 int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
1097 int dleft = std::min(ofs.x, left);
1098 int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
1099 src.adjustROI(dtop, dbottom, dleft, dright);
1106 _dst.create( src.rows + top + bottom, src.cols + left + right, type );
1107 Mat dst = _dst.getMat();
1109 if(top == 0 && left == 0 && bottom == 0 && right == 0)
1111 if(src.data != dst.data || src.step != dst.step)
1116 borderType &= ~BORDER_ISOLATED;
1118 #if defined HAVE_IPP && 0
1119 typedef IppStatus (CV_STDCALL * ippiCopyMakeBorder)(const void * pSrc, int srcStep, IppiSize srcRoiSize, void * pDst,
1120 int dstStep, IppiSize dstRoiSize, int topBorderHeight, int leftBorderWidth);
1121 typedef IppStatus (CV_STDCALL * ippiCopyMakeBorderI)(const void * pSrc, int srcDstStep, IppiSize srcRoiSize, IppiSize dstRoiSize,
1122 int topBorderHeight, int leftborderwidth);
1123 typedef IppStatus (CV_STDCALL * ippiCopyConstBorder)(const void * pSrc, int srcStep, IppiSize srcRoiSize, void * pDst, int dstStep,
1124 IppiSize dstRoiSize, int topBorderHeight, int leftBorderWidth, void * value);
1126 IppiSize srcRoiSize = { src.cols, src.rows }, dstRoiSize = { dst.cols, dst.rows };
1127 ippiCopyMakeBorder ippFunc = 0;
1128 ippiCopyMakeBorderI ippFuncI = 0;
1129 ippiCopyConstBorder ippFuncConst = 0;
1130 bool inplace = dst.datastart == src.datastart;
1132 if (borderType == BORDER_CONSTANT)
1135 // type == CV_8UC1 ? (ippiCopyConstBorder)ippiCopyConstBorder_8u_C1R : bug in IPP 8.1
1136 type == CV_16UC1 ? (ippiCopyConstBorder)ippiCopyConstBorder_16u_C1R :
1137 // type == CV_16SC1 ? (ippiCopyConstBorder)ippiCopyConstBorder_16s_C1R : bug in IPP 8.1
1138 // type == CV_32SC1 ? (ippiCopyConstBorder)ippiCopyConstBorder_32s_C1R : bug in IPP 8.1
1139 // type == CV_32FC1 ? (ippiCopyConstBorder)ippiCopyConstBorder_32f_C1R : bug in IPP 8.1
1140 type == CV_8UC3 ? (ippiCopyConstBorder)ippiCopyConstBorder_8u_C3R :
1141 type == CV_16UC3 ? (ippiCopyConstBorder)ippiCopyConstBorder_16u_C3R :
1142 type == CV_16SC3 ? (ippiCopyConstBorder)ippiCopyConstBorder_16s_C3R :
1143 type == CV_32SC3 ? (ippiCopyConstBorder)ippiCopyConstBorder_32s_C3R :
1144 type == CV_32FC3 ? (ippiCopyConstBorder)ippiCopyConstBorder_32f_C3R :
1145 type == CV_8UC4 ? (ippiCopyConstBorder)ippiCopyConstBorder_8u_C4R :
1146 type == CV_16UC4 ? (ippiCopyConstBorder)ippiCopyConstBorder_16u_C4R :
1147 type == CV_16SC4 ? (ippiCopyConstBorder)ippiCopyConstBorder_16s_C4R :
1148 type == CV_32SC4 ? (ippiCopyConstBorder)ippiCopyConstBorder_32s_C4R :
1149 type == CV_32FC4 ? (ippiCopyConstBorder)ippiCopyConstBorder_32f_C4R : 0;
1151 else if (borderType == BORDER_WRAP)
1155 CV_SUPPRESS_DEPRECATED_START
1157 type == CV_32SC1 ? (ippiCopyMakeBorderI)ippiCopyWrapBorder_32s_C1IR :
1158 type == CV_32FC1 ? (ippiCopyMakeBorderI)ippiCopyWrapBorder_32s_C1IR : 0;
1159 CV_SUPPRESS_DEPRECATED_END
1164 type == CV_32SC1 ? (ippiCopyMakeBorder)ippiCopyWrapBorder_32s_C1R :
1165 type == CV_32FC1 ? (ippiCopyMakeBorder)ippiCopyWrapBorder_32s_C1R : 0;
1168 else if (borderType == BORDER_REPLICATE)
1172 CV_SUPPRESS_DEPRECATED_START
1174 type == CV_8UC1 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_8u_C1IR :
1175 type == CV_16UC1 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_16u_C1IR :
1176 type == CV_16SC1 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_16s_C1IR :
1177 type == CV_32SC1 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_32s_C1IR :
1178 type == CV_32FC1 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_32f_C1IR :
1179 type == CV_8UC3 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_8u_C3IR :
1180 type == CV_16UC3 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_16u_C3IR :
1181 type == CV_16SC3 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_16s_C3IR :
1182 type == CV_32SC3 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_32s_C3IR :
1183 type == CV_32FC3 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_32f_C3IR :
1184 type == CV_8UC4 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_8u_C4IR :
1185 type == CV_16UC4 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_16u_C4IR :
1186 type == CV_16SC4 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_16s_C4IR :
1187 type == CV_32SC4 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_32s_C4IR :
1188 type == CV_32FC4 ? (ippiCopyMakeBorderI)ippiCopyReplicateBorder_32f_C4IR : 0;
1189 CV_SUPPRESS_DEPRECATED_END
1194 type == CV_8UC1 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_8u_C1R :
1195 type == CV_16UC1 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_16u_C1R :
1196 type == CV_16SC1 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_16s_C1R :
1197 type == CV_32SC1 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_32s_C1R :
1198 type == CV_32FC1 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_32f_C1R :
1199 type == CV_8UC3 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_8u_C3R :
1200 type == CV_16UC3 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_16u_C3R :
1201 type == CV_16SC3 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_16s_C3R :
1202 type == CV_32SC3 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_32s_C3R :
1203 type == CV_32FC3 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_32f_C3R :
1204 type == CV_8UC4 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_8u_C4R :
1205 type == CV_16UC4 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_16u_C4R :
1206 type == CV_16SC4 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_16s_C4R :
1207 type == CV_32SC4 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_32s_C4R :
1208 type == CV_32FC4 ? (ippiCopyMakeBorder)ippiCopyReplicateBorder_32f_C4R : 0;
1212 if (ippFunc || ippFuncI || ippFuncConst)
1215 scalarToRawData(value, scbuf, type);
1217 if ( (ippFunc && ippFunc(src.data, (int)src.step, srcRoiSize, dst.data, (int)dst.step, dstRoiSize, top, left) >= 0) ||
1218 (ippFuncI && ippFuncI(src.data, (int)src.step, srcRoiSize, dstRoiSize, top, left) >= 0) ||
1219 (ippFuncConst && ippFuncConst(src.data, (int)src.step, srcRoiSize, dst.data, (int)dst.step,
1220 dstRoiSize, top, left, scbuf) >= 0))
1223 setIppErrorStatus();
1227 if( borderType != BORDER_CONSTANT )
1228 copyMakeBorder_8u( src.ptr(), src.step, src.size(),
1229 dst.ptr(), dst.step, dst.size(),
1230 top, left, (int)src.elemSize(), borderType );
1233 int cn = src.channels(), cn1 = cn;
1234 AutoBuffer<double> buf(cn);
1237 CV_Assert( value[0] == value[1] && value[0] == value[2] && value[0] == value[3] );
1240 scalarToRawData(value, buf, CV_MAKETYPE(src.depth(), cn1), cn);
1241 copyMakeConstBorder_8u( src.ptr(), src.step, src.size(),
1242 dst.ptr(), dst.step, dst.size(),
1243 top, left, (int)src.elemSize(), (uchar*)(double*)buf );
1249 cvCopy( const void* srcarr, void* dstarr, const void* maskarr )
1251 if( CV_IS_SPARSE_MAT(srcarr) && CV_IS_SPARSE_MAT(dstarr))
1253 CV_Assert( maskarr == 0 );
1254 CvSparseMat* src1 = (CvSparseMat*)srcarr;
1255 CvSparseMat* dst1 = (CvSparseMat*)dstarr;
1256 CvSparseMatIterator iterator;
1259 dst1->dims = src1->dims;
1260 memcpy( dst1->size, src1->size, src1->dims*sizeof(src1->size[0]));
1261 dst1->valoffset = src1->valoffset;
1262 dst1->idxoffset = src1->idxoffset;
1263 cvClearSet( dst1->heap );
1265 if( src1->heap->active_count >= dst1->hashsize*CV_SPARSE_HASH_RATIO )
1267 cvFree( &dst1->hashtable );
1268 dst1->hashsize = src1->hashsize;
1270 (void**)cvAlloc( dst1->hashsize*sizeof(dst1->hashtable[0]));
1273 memset( dst1->hashtable, 0, dst1->hashsize*sizeof(dst1->hashtable[0]));
1275 for( node = cvInitSparseMatIterator( src1, &iterator );
1276 node != 0; node = cvGetNextSparseNode( &iterator ))
1278 CvSparseNode* node_copy = (CvSparseNode*)cvSetNew( dst1->heap );
1279 int tabidx = node->hashval & (dst1->hashsize - 1);
1280 memcpy( node_copy, node, dst1->heap->elem_size );
1281 node_copy->next = (CvSparseNode*)dst1->hashtable[tabidx];
1282 dst1->hashtable[tabidx] = node_copy;
1286 cv::Mat src = cv::cvarrToMat(srcarr, false, true, 1), dst = cv::cvarrToMat(dstarr, false, true, 1);
1287 CV_Assert( src.depth() == dst.depth() && src.size == dst.size );
1289 int coi1 = 0, coi2 = 0;
1290 if( CV_IS_IMAGE(srcarr) )
1291 coi1 = cvGetImageCOI((const IplImage*)srcarr);
1292 if( CV_IS_IMAGE(dstarr) )
1293 coi2 = cvGetImageCOI((const IplImage*)dstarr);
1297 CV_Assert( (coi1 != 0 || src.channels() == 1) &&
1298 (coi2 != 0 || dst.channels() == 1) );
1300 int pair[] = { std::max(coi1-1, 0), std::max(coi2-1, 0) };
1301 cv::mixChannels( &src, 1, &dst, 1, pair, 1 );
1305 CV_Assert( src.channels() == dst.channels() );
1310 src.copyTo(dst, cv::cvarrToMat(maskarr));
1314 cvSet( void* arr, CvScalar value, const void* maskarr )
1316 cv::Mat m = cv::cvarrToMat(arr);
1320 m.setTo(cv::Scalar(value), cv::cvarrToMat(maskarr));
1324 cvSetZero( CvArr* arr )
1326 if( CV_IS_SPARSE_MAT(arr) )
1328 CvSparseMat* mat1 = (CvSparseMat*)arr;
1329 cvClearSet( mat1->heap );
1330 if( mat1->hashtable )
1331 memset( mat1->hashtable, 0, mat1->hashsize*sizeof(mat1->hashtable[0]));
1334 cv::Mat m = cv::cvarrToMat(arr);
1339 cvFlip( const CvArr* srcarr, CvArr* dstarr, int flip_mode )
1341 cv::Mat src = cv::cvarrToMat(srcarr);
1347 dst = cv::cvarrToMat(dstarr);
1349 CV_Assert( src.type() == dst.type() && src.size() == dst.size() );
1350 cv::flip( src, dst, flip_mode );
1354 cvRepeat( const CvArr* srcarr, CvArr* dstarr )
1356 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1357 CV_Assert( src.type() == dst.type() &&
1358 dst.rows % src.rows == 0 && dst.cols % src.cols == 0 );
1359 cv::repeat(src, dst.rows/src.rows, dst.cols/src.cols, dst);