1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // For Open Source Computer Vision Library
12 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
13 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
14 // Copyright (C) 2014, Itseez Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 /* ////////////////////////////////////////////////////////////////////
45 // Mat basic operations: Copy, Set
49 #include "precomp.hpp"
50 #include "opencl_kernels_core.hpp"
56 template<typename T> static void
57 copyMask_(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
59 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
61 const T* src = (const T*)_src;
64 #if CV_ENABLE_UNROLLED
65 for( ; x <= size.width - 4; x += 4 )
77 for( ; x < size.width; x++ )
84 copyMask_<uchar>(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
86 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_8u_C1MR, _src, (int)sstep, _dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0)
88 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
90 const uchar* src = (const uchar*)_src;
91 uchar* dst = (uchar*)_dst;
95 v_uint8x16 v_zero = v_setzero_u8();
97 for( ; x <= size.width - 16; x += 16 )
99 v_uint8x16 v_src = v_load(src + x),
100 v_dst = v_load(dst + x),
101 v_nmask = v_load(mask + x) == v_zero;
103 v_dst = v_select(v_nmask, v_dst, v_src);
104 v_store(dst + x, v_dst);
108 for( ; x < size.width; x++ )
115 copyMask_<ushort>(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
117 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_16u_C1MR, (const Ipp16u *)_src, (int)sstep, (Ipp16u *)_dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0)
119 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
121 const ushort* src = (const ushort*)_src;
122 ushort* dst = (ushort*)_dst;
126 v_uint8x16 v_zero = v_setzero_u8();
128 for( ; x <= size.width - 16; x += 16 )
130 v_uint16x8 v_src1 = v_load(src + x), v_src2 = v_load(src + x + 8),
131 v_dst1 = v_load(dst + x), v_dst2 = v_load(dst + x + 8);
133 v_uint8x16 v_nmask1, v_nmask2;
134 v_uint8x16 v_nmask = v_load(mask + x) == v_zero;
135 v_zip(v_nmask, v_nmask, v_nmask1, v_nmask2);
137 v_dst1 = v_select(v_reinterpret_as_u16(v_nmask1), v_dst1, v_src1);
138 v_dst2 = v_select(v_reinterpret_as_u16(v_nmask2), v_dst2, v_src2);
139 v_store(dst + x, v_dst1);
140 v_store(dst + x + 8, v_dst2);
144 for( ; x < size.width; x++ )
151 copyMaskGeneric(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size, void* _esz)
153 size_t k, esz = *(size_t*)_esz;
154 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
156 const uchar* src = _src;
159 for( ; x < size.width; x++, src += esz, dst += esz )
163 for( k = 0; k < esz; k++ )
170 #define DEF_COPY_MASK(suffix, type) \
171 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
172 uchar* dst, size_t dstep, Size size, void*) \
174 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
178 #define DEF_COPY_MASK_F(suffix, type, ippfavor, ipptype) \
179 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
180 uchar* dst, size_t dstep, Size size, void*) \
182 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_##ippfavor, (const ipptype *)src, (int)sstep, (ipptype *)dst, (int)dstep, ippiSize(size), (const Ipp8u *)mask, (int)mstep) >= 0)\
183 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
186 #define DEF_COPY_MASK_F(suffix, type, ippfavor, ipptype) \
187 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
188 uchar* dst, size_t dstep, Size size, void*) \
190 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
194 #if IPP_VERSION_X100 == 901 // bug in IPP 9.0.1
195 DEF_COPY_MASK(32sC3, Vec3i)
196 DEF_COPY_MASK(8uC3, Vec3b)
198 DEF_COPY_MASK_F(8uC3, Vec3b, 8u_C3MR, Ipp8u)
199 DEF_COPY_MASK_F(32sC3, Vec3i, 32s_C3MR, Ipp32s)
201 DEF_COPY_MASK(8u, uchar)
202 DEF_COPY_MASK(16u, ushort)
203 DEF_COPY_MASK_F(32s, int, 32s_C1MR, Ipp32s)
204 DEF_COPY_MASK_F(16uC3, Vec3s, 16u_C3MR, Ipp16u)
205 DEF_COPY_MASK(32sC2, Vec2i)
206 DEF_COPY_MASK_F(32sC4, Vec4i, 32s_C4MR, Ipp32s)
207 DEF_COPY_MASK(32sC6, Vec6i)
208 DEF_COPY_MASK(32sC8, Vec8i)
210 BinaryFunc copyMaskTab[] =
231 BinaryFunc getCopyMaskFunc(size_t esz)
233 return esz <= 32 && copyMaskTab[esz] ? copyMaskTab[esz] : copyMaskGeneric;
237 void Mat::copyTo( OutputArray _dst ) const
239 CV_INSTRUMENT_REGION()
241 int dtype = _dst.type();
242 if( _dst.fixedType() && dtype != type() )
244 CV_Assert( channels() == CV_MAT_CN(dtype) );
245 convertTo( _dst, dtype );
257 _dst.create( dims, size.p, type() );
258 UMat dst = _dst.getUMat();
259 CV_Assert(dst.u != NULL);
260 size_t i, sz[CV_MAX_DIM] = {0}, dstofs[CV_MAX_DIM], esz = elemSize();
261 CV_Assert(dims >= 0 && dims < CV_MAX_DIM);
262 for( i = 0; i < (size_t)dims; i++ )
265 dst.ndoffset(dstofs);
266 dstofs[dims-1] *= esz;
267 dst.u->currAllocator->upload(dst.u, data, dims, sz, dstofs, dst.step.p, step.p);
273 _dst.create( rows, cols, type() );
274 Mat dst = _dst.getMat();
275 if( data == dst.data )
278 if( rows > 0 && cols > 0 )
280 // For some cases (with vector) dst.size != src.size, so force to column-based form
281 // It prevents memory corruption in case of column-based src
283 dst = dst.reshape(0, (int)dst.total());
285 const uchar* sptr = data;
286 uchar* dptr = dst.data;
288 #if IPP_VERSION_X100 >= 201700
289 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_8u_C1R_L, sptr, (int)step, dptr, (int)dst.step, ippiSizeL((int)(cols*elemSize()), rows)) >= 0)
292 Size sz = getContinuousSize(*this, dst);
293 size_t len = sz.width*elemSize();
295 for( ; sz.height--; sptr += step, dptr += dst.step )
296 memcpy( dptr, sptr, len );
301 _dst.create( dims, size, type() );
302 Mat dst = _dst.getMat();
303 if( data == dst.data )
308 const Mat* arrays[] = { this, &dst };
310 NAryMatIterator it(arrays, ptrs, 2);
311 size_t sz = it.size*elemSize();
313 for( size_t i = 0; i < it.nplanes; i++, ++it )
314 memcpy(ptrs[1], ptrs[0], sz);
319 static bool ipp_copyTo(const Mat &src, Mat &dst, const Mat &mask)
322 CV_INSTRUMENT_REGION_IPP()
324 if(mask.channels() > 1 || mask.depth() != CV_8U)
329 IppiSize size = ippiSize(src.size());
330 return CV_INSTRUMENT_FUN_IPP(llwiCopyMask, src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, size, (int)src.elemSize1(), src.channels(), mask.ptr(), (int)mask.step) >= 0;
334 const Mat *arrays[] = {&src, &dst, &mask, NULL};
335 uchar *ptrs[3] = {NULL};
336 NAryMatIterator it(arrays, ptrs);
338 IppiSize size = ippiSize(it.size, 1);
340 for (size_t i = 0; i < it.nplanes; i++, ++it)
342 if(CV_INSTRUMENT_FUN_IPP(llwiCopyMask, ptrs[0], 0, ptrs[1], 0, size, (int)src.elemSize1(), src.channels(), ptrs[2], 0) < 0)
348 CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(mask);
354 void Mat::copyTo( OutputArray _dst, InputArray _mask ) const
356 CV_INSTRUMENT_REGION()
358 Mat mask = _mask.getMat();
365 int cn = channels(), mcn = mask.channels();
366 CV_Assert( mask.depth() == CV_8U && (mcn == 1 || mcn == cn) );
367 bool colorMask = mcn > 1;
370 CV_Assert( size() == mask.size() );
375 Mat dst0 = _dst.getMat();
376 _dst.create(dims, size, type()); // TODO Prohibit 'dst' re-creation, user should pass it explicitly with correct size/type or empty
379 if (dst.data != dst0.data) // re-allocation happened
382 CV_Assert(dst0.empty() &&
383 "copyTo(): dst size/type mismatch (looks like a bug) - use dst.release() before copyTo() call to suppress this message");
385 dst = Scalar(0); // do not leave dst uninitialized
389 CV_IPP_RUN_FAST(ipp_copyTo(*this, dst, mask))
391 size_t esz = colorMask ? elemSize1() : elemSize();
392 BinaryFunc copymask = getCopyMaskFunc(esz);
396 Size sz = getContinuousSize(*this, dst, mask, mcn);
397 copymask(data, step, mask.data, mask.step, dst.data, dst.step, sz, &esz);
401 const Mat* arrays[] = { this, &dst, &mask, 0 };
403 NAryMatIterator it(arrays, ptrs);
404 Size sz((int)(it.size*mcn), 1);
406 for( size_t i = 0; i < it.nplanes; i++, ++it )
407 copymask(ptrs[0], 0, ptrs[2], 0, ptrs[1], 0, sz, &esz);
410 Mat& Mat::operator = (const Scalar& s)
412 CV_INSTRUMENT_REGION()
414 if (empty()) return *this;
416 const Mat* arrays[] = { this };
418 NAryMatIterator it(arrays, &dptr, 1);
419 size_t elsize = it.size*elemSize();
420 const int64* is = (const int64*)&s.val[0];
422 if( is[0] == 0 && is[1] == 0 && is[2] == 0 && is[3] == 0 )
424 for( size_t i = 0; i < it.nplanes; i++, ++it )
425 memset( dptr, 0, elsize );
432 scalarToRawData(s, scalar, type(), 12);
433 size_t blockSize = 12*elemSize1();
435 for( size_t j = 0; j < elsize; j += blockSize )
437 size_t sz = MIN(blockSize, elsize - j);
438 CV_Assert(sz <= sizeof(scalar));
439 memcpy( dptr + j, scalar, sz );
443 for( size_t i = 1; i < it.nplanes; i++ )
446 memcpy( dptr, data, elsize );
453 static bool ipp_Mat_setTo_Mat(Mat &dst, Mat &_val, Mat &mask)
456 CV_INSTRUMENT_REGION_IPP()
461 if(mask.depth() != CV_8U || mask.channels() > 1)
464 if(dst.channels() > 4)
467 if (dst.depth() == CV_32F)
469 for (int i = 0; i < (int)(_val.total()); i++)
471 float v = (float)(_val.at<double>(i)); // cast to float
472 if (cvIsNaN(v) || cvIsInf(v)) // accept finite numbers only
479 IppiSize size = ippiSize(dst.size());
480 IppDataType dataType = ippiGetDataType(dst.depth());
481 ::ipp::IwValueFloat s;
482 convertAndUnrollScalar(_val, CV_MAKETYPE(CV_64F, dst.channels()), (uchar*)((Ipp64f*)s), 1);
484 return CV_INSTRUMENT_FUN_IPP(llwiSetMask, s, dst.ptr(), (int)dst.step, size, dataType, dst.channels(), mask.ptr(), (int)mask.step) >= 0;
488 const Mat *arrays[] = {&dst, mask.empty()?NULL:&mask, NULL};
489 uchar *ptrs[2] = {NULL};
490 NAryMatIterator it(arrays, ptrs);
492 IppiSize size = {(int)it.size, 1};
493 IppDataType dataType = ippiGetDataType(dst.depth());
494 ::ipp::IwValueFloat s;
495 convertAndUnrollScalar(_val, CV_MAKETYPE(CV_64F, dst.channels()), (uchar*)((Ipp64f*)s), 1);
497 for( size_t i = 0; i < it.nplanes; i++, ++it)
499 if(CV_INSTRUMENT_FUN_IPP(llwiSetMask, s, ptrs[0], 0, size, dataType, dst.channels(), ptrs[1], 0) < 0)
505 CV_UNUSED(dst); CV_UNUSED(_val); CV_UNUSED(mask);
511 Mat& Mat::setTo(InputArray _value, InputArray _mask)
513 CV_INSTRUMENT_REGION()
518 Mat value = _value.getMat(), mask = _mask.getMat();
520 CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::MAT ));
521 int cn = channels(), mcn = mask.channels();
522 CV_Assert( mask.empty() || (mask.depth() == CV_8U && (mcn == 1 || mcn == cn) && size == mask.size) );
524 CV_IPP_RUN_FAST(ipp_Mat_setTo_Mat(*this, value, mask), *this)
526 size_t esz = mcn > 1 ? elemSize1() : elemSize();
527 BinaryFunc copymask = getCopyMaskFunc(esz);
529 const Mat* arrays[] = { this, !mask.empty() ? &mask : 0, 0 };
530 uchar* ptrs[2]={0,0};
531 NAryMatIterator it(arrays, ptrs);
532 int totalsz = (int)it.size*mcn;
533 int blockSize0 = std::min(totalsz, (int)((BLOCK_SIZE + esz-1)/esz));
534 blockSize0 -= blockSize0 % mcn; // must be divisible without remainder for unrolling and advancing
535 AutoBuffer<uchar> _scbuf(blockSize0*esz + 32);
536 uchar* scbuf = alignPtr((uchar*)_scbuf.data(), (int)sizeof(double));
537 convertAndUnrollScalar( value, type(), scbuf, blockSize0/mcn );
539 for( size_t i = 0; i < it.nplanes; i++, ++it )
541 for( int j = 0; j < totalsz; j += blockSize0 )
543 Size sz(std::min(blockSize0, totalsz - j), 1);
544 size_t blockSize = sz.width*esz;
547 copymask(scbuf, 0, ptrs[1], 0, ptrs[0], 0, sz, &esz);
551 memcpy(ptrs[0], scbuf, blockSize);
552 ptrs[0] += blockSize;
560 flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size, size_t esz )
562 int i, j, limit = (int)(((size.width + 1)/2)*esz);
563 AutoBuffer<int> _tab(size.width*esz);
564 int* tab = _tab.data();
566 for( i = 0; i < size.width; i++ )
567 for( size_t k = 0; k < esz; k++ )
568 tab[i*esz + k] = (int)((size.width - i - 1)*esz + k);
570 for( ; size.height--; src += sstep, dst += dstep )
572 for( i = 0; i < limit; i++ )
575 uchar t0 = src[i], t1 = src[j];
576 dst[i] = t1; dst[j] = t0;
582 flipVert( const uchar* src0, size_t sstep, uchar* dst0, size_t dstep, Size size, size_t esz )
584 const uchar* src1 = src0 + (size.height - 1)*sstep;
585 uchar* dst1 = dst0 + (size.height - 1)*dstep;
586 size.width *= (int)esz;
588 for( int y = 0; y < (size.height + 1)/2; y++, src0 += sstep, src1 -= sstep,
589 dst0 += dstep, dst1 -= dstep )
592 if( ((size_t)src0|(size_t)dst0|(size_t)src1|(size_t)dst1) % sizeof(int) == 0 )
594 for( ; i <= size.width - 16; i += 16 )
596 int t0 = ((int*)(src0 + i))[0];
597 int t1 = ((int*)(src1 + i))[0];
599 ((int*)(dst0 + i))[0] = t1;
600 ((int*)(dst1 + i))[0] = t0;
602 t0 = ((int*)(src0 + i))[1];
603 t1 = ((int*)(src1 + i))[1];
605 ((int*)(dst0 + i))[1] = t1;
606 ((int*)(dst1 + i))[1] = t0;
608 t0 = ((int*)(src0 + i))[2];
609 t1 = ((int*)(src1 + i))[2];
611 ((int*)(dst0 + i))[2] = t1;
612 ((int*)(dst1 + i))[2] = t0;
614 t0 = ((int*)(src0 + i))[3];
615 t1 = ((int*)(src1 + i))[3];
617 ((int*)(dst0 + i))[3] = t1;
618 ((int*)(dst1 + i))[3] = t0;
621 for( ; i <= size.width - 4; i += 4 )
623 int t0 = ((int*)(src0 + i))[0];
624 int t1 = ((int*)(src1 + i))[0];
626 ((int*)(dst0 + i))[0] = t1;
627 ((int*)(dst1 + i))[0] = t0;
631 for( ; i < size.width; i++ )
644 enum { FLIP_COLS = 1 << 0, FLIP_ROWS = 1 << 1, FLIP_BOTH = FLIP_ROWS | FLIP_COLS };
646 static bool ocl_flip(InputArray _src, OutputArray _dst, int flipCode )
648 CV_Assert(flipCode >= -1 && flipCode <= 1);
650 const ocl::Device & dev = ocl::Device::getDefault();
651 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
652 flipType, kercn = std::min(ocl::predictOptimalVectorWidth(_src, _dst), 4);
654 bool doubleSupport = dev.doubleFPConfig() > 0;
655 if (!doubleSupport && depth == CV_64F)
661 const char * kernelName;
663 kernelName = "arithm_flip_rows", flipType = FLIP_ROWS;
664 else if (flipCode > 0)
665 kernelName = "arithm_flip_cols", flipType = FLIP_COLS;
667 kernelName = "arithm_flip_rows_cols", flipType = FLIP_BOTH;
669 int pxPerWIy = (dev.isIntel() && (dev.type() & ocl::Device::TYPE_GPU)) ? 4 : 1;
670 kercn = (cn!=3 || flipType == FLIP_ROWS) ? std::max(kercn, cn) : cn;
672 ocl::Kernel k(kernelName, ocl::core::flip_oclsrc,
673 format( "-D T=%s -D T1=%s -D cn=%d -D PIX_PER_WI_Y=%d -D kercn=%d",
674 kercn != cn ? ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)) : ocl::vecopTypeToStr(CV_MAKE_TYPE(depth, kercn)),
675 kercn != cn ? ocl::typeToStr(depth) : ocl::vecopTypeToStr(depth), cn, pxPerWIy, kercn));
679 Size size = _src.size();
680 _dst.create(size, type);
681 UMat src = _src.getUMat(), dst = _dst.getUMat();
683 int cols = size.width * cn / kercn, rows = size.height;
684 cols = flipType == FLIP_COLS ? (cols + 1) >> 1 : cols;
685 rows = flipType & FLIP_ROWS ? (rows + 1) >> 1 : rows;
687 k.args(ocl::KernelArg::ReadOnlyNoSize(src),
688 ocl::KernelArg::WriteOnly(dst, cn, kercn), rows, cols);
690 size_t maxWorkGroupSize = dev.maxWorkGroupSize();
691 CV_Assert(maxWorkGroupSize % 4 == 0);
693 size_t globalsize[2] = { (size_t)cols, ((size_t)rows + pxPerWIy - 1) / pxPerWIy },
694 localsize[2] = { maxWorkGroupSize / 4, 4 };
695 return k.run(2, globalsize, (flipType == FLIP_COLS) && !dev.isIntel() ? localsize : NULL, false);
701 static bool ipp_flip(Mat &src, Mat &dst, int flip_mode)
704 CV_INSTRUMENT_REGION_IPP()
708 ippMode = ippAxsBoth;
709 else if(flip_mode == 0)
710 ippMode = ippAxsHorizontal;
712 ippMode = ippAxsVertical;
716 ::ipp::IwiImage iwSrc = ippiGetImage(src);
717 ::ipp::IwiImage iwDst = ippiGetImage(dst);
719 CV_INSTRUMENT_FUN_IPP(::ipp::iwiMirror, iwSrc, iwDst, ippMode);
721 catch(::ipp::IwException)
728 CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(flip_mode);
735 void flip( InputArray _src, OutputArray _dst, int flip_mode )
737 CV_INSTRUMENT_REGION()
739 CV_Assert( _src.dims() <= 2 );
740 Size size = _src.size();
746 if (size.height == 1)
750 if ((size.width == 1 && flip_mode > 0) ||
751 (size.height == 1 && flip_mode == 0) ||
752 (size.height == 1 && size.width == 1 && flip_mode < 0))
754 return _src.copyTo(_dst);
757 CV_OCL_RUN( _dst.isUMat(), ocl_flip(_src, _dst, flip_mode))
759 Mat src = _src.getMat();
760 int type = src.type();
761 _dst.create( size, type );
762 Mat dst = _dst.getMat();
764 CV_IPP_RUN_FAST(ipp_flip(src, dst, flip_mode));
766 size_t esz = CV_ELEM_SIZE(type);
769 flipVert( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
771 flipHoriz( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
774 flipHoriz( dst.ptr(), dst.step, dst.ptr(), dst.step, dst.size(), esz );
779 static bool ocl_rotate(InputArray _src, OutputArray _dst, int rotateMode)
783 case ROTATE_90_CLOCKWISE:
784 transpose(_src, _dst);
788 flip(_src, _dst, -1);
790 case ROTATE_90_COUNTERCLOCKWISE:
791 transpose(_src, _dst);
801 void rotate(InputArray _src, OutputArray _dst, int rotateMode)
803 CV_Assert(_src.dims() <= 2);
805 CV_OCL_RUN(_dst.isUMat(), ocl_rotate(_src, _dst, rotateMode))
809 case ROTATE_90_CLOCKWISE:
810 transpose(_src, _dst);
814 flip(_src, _dst, -1);
816 case ROTATE_90_COUNTERCLOCKWISE:
817 transpose(_src, _dst);
825 #if defined HAVE_OPENCL && !defined __APPLE__
827 static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
829 if (ny == 1 && nx == 1)
835 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
836 rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1,
837 kercn = ocl::predictOptimalVectorWidth(_src, _dst);
839 ocl::Kernel k("repeat", ocl::core::repeat_oclsrc,
840 format("-D T=%s -D nx=%d -D ny=%d -D rowsPerWI=%d -D cn=%d",
841 ocl::memopTypeToStr(CV_MAKE_TYPE(depth, kercn)),
842 nx, ny, rowsPerWI, kercn));
846 UMat src = _src.getUMat(), dst = _dst.getUMat();
847 k.args(ocl::KernelArg::ReadOnly(src, cn, kercn), ocl::KernelArg::WriteOnlyNoSize(dst));
849 size_t globalsize[] = { (size_t)src.cols * cn / kercn, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
850 return k.run(2, globalsize, NULL, false);
855 void repeat(InputArray _src, int ny, int nx, OutputArray _dst)
857 CV_INSTRUMENT_REGION()
859 CV_Assert(_src.getObj() != _dst.getObj());
860 CV_Assert( _src.dims() <= 2 );
861 CV_Assert( ny > 0 && nx > 0 );
863 Size ssize = _src.size();
864 _dst.create(ssize.height*ny, ssize.width*nx, _src.type());
866 #if !defined __APPLE__
867 CV_OCL_RUN(_dst.isUMat(),
868 ocl_repeat(_src, ny, nx, _dst))
871 Mat src = _src.getMat(), dst = _dst.getMat();
872 Size dsize = dst.size();
873 int esz = (int)src.elemSize();
875 ssize.width *= esz; dsize.width *= esz;
877 for( y = 0; y < ssize.height; y++ )
879 for( x = 0; x < dsize.width; x += ssize.width )
880 memcpy( dst.ptr(y) + x, src.ptr(y), ssize.width );
883 for( ; y < dsize.height; y++ )
884 memcpy( dst.ptr(y), dst.ptr(y - ssize.height), dsize.width );
887 Mat repeat(const Mat& src, int ny, int nx)
889 if( nx == 1 && ny == 1 )
892 repeat(src, ny, nx, dst);
901 Various border types, image boundaries are denoted with '|'
903 * BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
904 * BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
905 * BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
906 * BORDER_WRAP: cdefgh|abcdefgh|abcdefg
907 * BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
909 int cv::borderInterpolate( int p, int len, int borderType )
911 CV_TRACE_FUNCTION_VERBOSE();
913 if( (unsigned)p < (unsigned)len )
915 else if( borderType == BORDER_REPLICATE )
916 p = p < 0 ? 0 : len - 1;
917 else if( borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101 )
919 int delta = borderType == BORDER_REFLECT_101;
927 p = len - 1 - (p - len) - delta;
929 while( (unsigned)p >= (unsigned)len );
931 else if( borderType == BORDER_WRAP )
935 p -= ((p-len+1)/len)*len;
939 else if( borderType == BORDER_CONSTANT )
942 CV_Error( CV_StsBadArg, "Unknown/unsupported border type" );
949 void copyMakeBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
950 uchar* dst, size_t dststep, cv::Size dstroi,
951 int top, int left, int cn, int borderType )
953 const int isz = (int)sizeof(int);
954 int i, j, k, elemSize = 1;
955 bool intMode = false;
957 if( (cn | srcstep | dststep | (size_t)src | (size_t)dst) % isz == 0 )
964 cv::AutoBuffer<int> _tab((dstroi.width - srcroi.width)*cn);
965 int* tab = _tab.data();
966 int right = dstroi.width - srcroi.width - left;
967 int bottom = dstroi.height - srcroi.height - top;
969 for( i = 0; i < left; i++ )
971 j = cv::borderInterpolate(i - left, srcroi.width, borderType)*cn;
972 for( k = 0; k < cn; k++ )
973 tab[i*cn + k] = j + k;
976 for( i = 0; i < right; i++ )
978 j = cv::borderInterpolate(srcroi.width + i, srcroi.width, borderType)*cn;
979 for( k = 0; k < cn; k++ )
980 tab[(i+left)*cn + k] = j + k;
988 uchar* dstInner = dst + dststep*top + left*elemSize;
990 for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
992 if( dstInner != src )
993 memcpy(dstInner, src, srcroi.width*elemSize);
997 const int* isrc = (int*)src;
998 int* idstInner = (int*)dstInner;
999 for( j = 0; j < left; j++ )
1000 idstInner[j - left] = isrc[tab[j]];
1001 for( j = 0; j < right; j++ )
1002 idstInner[j + srcroi.width] = isrc[tab[j + left]];
1006 for( j = 0; j < left; j++ )
1007 dstInner[j - left] = src[tab[j]];
1008 for( j = 0; j < right; j++ )
1009 dstInner[j + srcroi.width] = src[tab[j + left]];
1013 dstroi.width *= elemSize;
1016 for( i = 0; i < top; i++ )
1018 j = cv::borderInterpolate(i - top, srcroi.height, borderType);
1019 memcpy(dst + (i - top)*dststep, dst + j*dststep, dstroi.width);
1022 for( i = 0; i < bottom; i++ )
1024 j = cv::borderInterpolate(i + srcroi.height, srcroi.height, borderType);
1025 memcpy(dst + (i + srcroi.height)*dststep, dst + j*dststep, dstroi.width);
1030 void copyMakeConstBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
1031 uchar* dst, size_t dststep, cv::Size dstroi,
1032 int top, int left, int cn, const uchar* value )
1035 cv::AutoBuffer<uchar> _constBuf(dstroi.width*cn);
1036 uchar* constBuf = _constBuf.data();
1037 int right = dstroi.width - srcroi.width - left;
1038 int bottom = dstroi.height - srcroi.height - top;
1040 for( i = 0; i < dstroi.width; i++ )
1042 for( j = 0; j < cn; j++ )
1043 constBuf[i*cn + j] = value[j];
1051 uchar* dstInner = dst + dststep*top + left;
1053 for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
1055 if( dstInner != src )
1056 memcpy( dstInner, src, srcroi.width );
1057 memcpy( dstInner - left, constBuf, left );
1058 memcpy( dstInner + srcroi.width, constBuf, right );
1063 for( i = 0; i < top; i++ )
1064 memcpy(dst + (i - top)*dststep, constBuf, dstroi.width);
1066 for( i = 0; i < bottom; i++ )
1067 memcpy(dst + (i + srcroi.height)*dststep, constBuf, dstroi.width);
1076 static bool ocl_copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
1077 int left, int right, int borderType, const Scalar& value )
1079 int type = _src.type(), cn = CV_MAT_CN(type), depth = CV_MAT_DEPTH(type),
1080 rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
1081 bool isolated = (borderType & BORDER_ISOLATED) != 0;
1082 borderType &= ~cv::BORDER_ISOLATED;
1084 if ( !(borderType == BORDER_CONSTANT || borderType == BORDER_REPLICATE || borderType == BORDER_REFLECT ||
1085 borderType == BORDER_WRAP || borderType == BORDER_REFLECT_101) ||
1089 const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP", "BORDER_REFLECT_101" };
1090 int scalarcn = cn == 3 ? 4 : cn;
1091 int sctype = CV_MAKETYPE(depth, scalarcn);
1092 String buildOptions = format("-D T=%s -D %s -D T1=%s -D cn=%d -D ST=%s -D rowsPerWI=%d",
1093 ocl::memopTypeToStr(type), borderMap[borderType],
1094 ocl::memopTypeToStr(depth), cn,
1095 ocl::memopTypeToStr(sctype), rowsPerWI);
1097 ocl::Kernel k("copyMakeBorder", ocl::core::copymakeborder_oclsrc, buildOptions);
1101 UMat src = _src.getUMat();
1102 if( src.isSubmatrix() && !isolated )
1106 src.locateROI(wholeSize, ofs);
1107 int dtop = std::min(ofs.y, top);
1108 int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
1109 int dleft = std::min(ofs.x, left);
1110 int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
1111 src.adjustROI(dtop, dbottom, dleft, dright);
1118 _dst.create(src.rows + top + bottom, src.cols + left + right, type);
1119 UMat dst = _dst.getUMat();
1121 if (top == 0 && left == 0 && bottom == 0 && right == 0)
1123 if(src.u != dst.u || src.step != dst.step)
1128 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
1129 top, left, ocl::KernelArg::Constant(Mat(1, 1, sctype, value)));
1131 size_t globalsize[2] = { (size_t)dst.cols, ((size_t)dst.rows + rowsPerWI - 1) / rowsPerWI };
1132 return k.run(2, globalsize, NULL, false);
1141 static bool ipp_copyMakeBorder( Mat &_src, Mat &_dst, int top, int bottom,
1142 int left, int right, int _borderType, const Scalar& value )
1144 #if defined HAVE_IPP_IW && !IPP_DISABLE_PERF_COPYMAKE
1145 CV_INSTRUMENT_REGION_IPP()
1147 ::ipp::IwiBorderSize borderSize(left, top, right, bottom);
1148 ::ipp::IwiSize size(_src.cols, _src.rows);
1149 IppDataType dataType = ippiGetDataType(_src.depth());
1150 IppiBorderType borderType = ippiGetBorderType(_borderType);
1151 if((int)borderType == -1)
1157 Rect dstRect(borderSize.left, borderSize.top,
1158 _dst.cols - borderSize.right - borderSize.left,
1159 _dst.rows - borderSize.bottom - borderSize.top);
1160 Mat subDst = Mat(_dst, dstRect);
1163 return CV_INSTRUMENT_FUN_IPP(llwiCopyMakeBorder, pSrc->ptr(), pSrc->step, subDst.ptr(), subDst.step, size, dataType, _src.channels(), borderSize, borderType, &value[0]) >= 0;
1165 CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(top); CV_UNUSED(bottom); CV_UNUSED(left); CV_UNUSED(right);
1166 CV_UNUSED(_borderType); CV_UNUSED(value);
1173 void cv::copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
1174 int left, int right, int borderType, const Scalar& value )
1176 CV_INSTRUMENT_REGION()
1178 CV_Assert( top >= 0 && bottom >= 0 && left >= 0 && right >= 0 );
1180 CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
1181 ocl_copyMakeBorder(_src, _dst, top, bottom, left, right, borderType, value))
1183 Mat src = _src.getMat();
1184 int type = src.type();
1186 if( src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0 )
1190 src.locateROI(wholeSize, ofs);
1191 int dtop = std::min(ofs.y, top);
1192 int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
1193 int dleft = std::min(ofs.x, left);
1194 int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
1195 src.adjustROI(dtop, dbottom, dleft, dright);
1202 _dst.create( src.rows + top + bottom, src.cols + left + right, type );
1203 Mat dst = _dst.getMat();
1205 if(top == 0 && left == 0 && bottom == 0 && right == 0)
1207 if(src.data != dst.data || src.step != dst.step)
1212 borderType &= ~BORDER_ISOLATED;
1214 CV_IPP_RUN_FAST(ipp_copyMakeBorder(src, dst, top, bottom, left, right, borderType, value))
1216 if( borderType != BORDER_CONSTANT )
1217 copyMakeBorder_8u( src.ptr(), src.step, src.size(),
1218 dst.ptr(), dst.step, dst.size(),
1219 top, left, (int)src.elemSize(), borderType );
1222 int cn = src.channels(), cn1 = cn;
1223 AutoBuffer<double> buf(cn);
1226 CV_Assert( value[0] == value[1] && value[0] == value[2] && value[0] == value[3] );
1229 scalarToRawData(value, buf.data(), CV_MAKETYPE(src.depth(), cn1), cn);
1230 copyMakeConstBorder_8u( src.ptr(), src.step, src.size(),
1231 dst.ptr(), dst.step, dst.size(),
1232 top, left, (int)src.elemSize(), (uchar*)buf.data() );
1238 cvCopy( const void* srcarr, void* dstarr, const void* maskarr )
1240 if( CV_IS_SPARSE_MAT(srcarr) && CV_IS_SPARSE_MAT(dstarr))
1242 CV_Assert( maskarr == 0 );
1243 CvSparseMat* src1 = (CvSparseMat*)srcarr;
1244 CvSparseMat* dst1 = (CvSparseMat*)dstarr;
1245 CvSparseMatIterator iterator;
1248 dst1->dims = src1->dims;
1249 memcpy( dst1->size, src1->size, src1->dims*sizeof(src1->size[0]));
1250 dst1->valoffset = src1->valoffset;
1251 dst1->idxoffset = src1->idxoffset;
1252 cvClearSet( dst1->heap );
1254 if( src1->heap->active_count >= dst1->hashsize*CV_SPARSE_HASH_RATIO )
1256 cvFree( &dst1->hashtable );
1257 dst1->hashsize = src1->hashsize;
1259 (void**)cvAlloc( dst1->hashsize*sizeof(dst1->hashtable[0]));
1262 memset( dst1->hashtable, 0, dst1->hashsize*sizeof(dst1->hashtable[0]));
1264 for( node = cvInitSparseMatIterator( src1, &iterator );
1265 node != 0; node = cvGetNextSparseNode( &iterator ))
1267 CvSparseNode* node_copy = (CvSparseNode*)cvSetNew( dst1->heap );
1268 int tabidx = node->hashval & (dst1->hashsize - 1);
1269 memcpy( node_copy, node, dst1->heap->elem_size );
1270 node_copy->next = (CvSparseNode*)dst1->hashtable[tabidx];
1271 dst1->hashtable[tabidx] = node_copy;
1275 cv::Mat src = cv::cvarrToMat(srcarr, false, true, 1), dst = cv::cvarrToMat(dstarr, false, true, 1);
1276 CV_Assert( src.depth() == dst.depth() && src.size == dst.size );
1278 int coi1 = 0, coi2 = 0;
1279 if( CV_IS_IMAGE(srcarr) )
1280 coi1 = cvGetImageCOI((const IplImage*)srcarr);
1281 if( CV_IS_IMAGE(dstarr) )
1282 coi2 = cvGetImageCOI((const IplImage*)dstarr);
1286 CV_Assert( (coi1 != 0 || src.channels() == 1) &&
1287 (coi2 != 0 || dst.channels() == 1) );
1289 int pair[] = { std::max(coi1-1, 0), std::max(coi2-1, 0) };
1290 cv::mixChannels( &src, 1, &dst, 1, pair, 1 );
1294 CV_Assert( src.channels() == dst.channels() );
1299 src.copyTo(dst, cv::cvarrToMat(maskarr));
1303 cvSet( void* arr, CvScalar value, const void* maskarr )
1305 cv::Mat m = cv::cvarrToMat(arr);
1309 m.setTo(cv::Scalar(value), cv::cvarrToMat(maskarr));
1313 cvSetZero( CvArr* arr )
1315 if( CV_IS_SPARSE_MAT(arr) )
1317 CvSparseMat* mat1 = (CvSparseMat*)arr;
1318 cvClearSet( mat1->heap );
1319 if( mat1->hashtable )
1320 memset( mat1->hashtable, 0, mat1->hashsize*sizeof(mat1->hashtable[0]));
1323 cv::Mat m = cv::cvarrToMat(arr);
1328 cvFlip( const CvArr* srcarr, CvArr* dstarr, int flip_mode )
1330 cv::Mat src = cv::cvarrToMat(srcarr);
1336 dst = cv::cvarrToMat(dstarr);
1338 CV_Assert( src.type() == dst.type() && src.size() == dst.size() );
1339 cv::flip( src, dst, flip_mode );
1343 cvRepeat( const CvArr* srcarr, CvArr* dstarr )
1345 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1346 CV_Assert( src.type() == dst.type() &&
1347 dst.rows % src.rows == 0 && dst.cols % src.cols == 0 );
1348 cv::repeat(src, dst.rows/src.rows, dst.cols/src.cols, dst);