1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // For Open Source Computer Vision Library
12 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
13 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
14 // Copyright (C) 2014, Itseez Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 /* ////////////////////////////////////////////////////////////////////
45 // Mat basic operations: Copy, Set
49 #include "precomp.hpp"
50 #include "opencl_kernels_core.hpp"
56 template<typename T> static void
57 copyMask_(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
59 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
61 const T* src = (const T*)_src;
64 #if CV_ENABLE_UNROLLED
65 for( ; x <= size.width - 4; x += 4 )
77 for( ; x < size.width; x++ )
84 copyMask_<uchar>(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
86 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_8u_C1MR, _src, (int)sstep, _dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0)
88 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
90 const uchar* src = (const uchar*)_src;
91 uchar* dst = (uchar*)_dst;
95 v_uint8x16 v_zero = v_setzero_u8();
97 for( ; x <= size.width - 16; x += 16 )
99 v_uint8x16 v_src = v_load(src + x),
100 v_dst = v_load(dst + x),
101 v_nmask = v_load(mask + x) == v_zero;
103 v_dst = v_select(v_nmask, v_dst, v_src);
104 v_store(dst + x, v_dst);
108 for( ; x < size.width; x++ )
115 copyMask_<ushort>(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
117 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_16u_C1MR, (const Ipp16u *)_src, (int)sstep, (Ipp16u *)_dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0)
119 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
121 const ushort* src = (const ushort*)_src;
122 ushort* dst = (ushort*)_dst;
126 v_uint8x16 v_zero = v_setzero_u8();
128 for( ; x <= size.width - 16; x += 16 )
130 v_uint16x8 v_src1 = v_load(src + x), v_src2 = v_load(src + x + 8),
131 v_dst1 = v_load(dst + x), v_dst2 = v_load(dst + x + 8);
133 v_uint8x16 v_nmask1, v_nmask2;
134 v_uint8x16 v_nmask = v_load(mask + x) == v_zero;
135 v_zip(v_nmask, v_nmask, v_nmask1, v_nmask2);
137 v_dst1 = v_select(v_reinterpret_as_u16(v_nmask1), v_dst1, v_src1);
138 v_dst2 = v_select(v_reinterpret_as_u16(v_nmask2), v_dst2, v_src2);
139 v_store(dst + x, v_dst1);
140 v_store(dst + x + 8, v_dst2);
144 for( ; x < size.width; x++ )
151 copyMaskGeneric(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size, void* _esz)
153 size_t k, esz = *(size_t*)_esz;
154 for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
156 const uchar* src = _src;
159 for( ; x < size.width; x++, src += esz, dst += esz )
163 for( k = 0; k < esz; k++ )
170 #define DEF_COPY_MASK(suffix, type) \
171 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
172 uchar* dst, size_t dstep, Size size, void*) \
174 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
178 #define DEF_COPY_MASK_F(suffix, type, ippfavor, ipptype) \
179 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
180 uchar* dst, size_t dstep, Size size, void*) \
182 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_##ippfavor, (const ipptype *)src, (int)sstep, (ipptype *)dst, (int)dstep, ippiSize(size), (const Ipp8u *)mask, (int)mstep) >= 0)\
183 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
186 #define DEF_COPY_MASK_F(suffix, type, ippfavor, ipptype) \
187 static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
188 uchar* dst, size_t dstep, Size size, void*) \
190 copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
194 #if IPP_VERSION_X100 == 901 // bug in IPP 9.0.1
195 DEF_COPY_MASK(32sC3, Vec3i)
196 DEF_COPY_MASK(8uC3, Vec3b)
198 DEF_COPY_MASK_F(8uC3, Vec3b, 8u_C3MR, Ipp8u)
199 DEF_COPY_MASK_F(32sC3, Vec3i, 32s_C3MR, Ipp32s)
201 DEF_COPY_MASK(8u, uchar)
202 DEF_COPY_MASK(16u, ushort)
203 DEF_COPY_MASK_F(32s, int, 32s_C1MR, Ipp32s)
204 DEF_COPY_MASK_F(16uC3, Vec3s, 16u_C3MR, Ipp16u)
205 DEF_COPY_MASK(32sC2, Vec2i)
206 DEF_COPY_MASK_F(32sC4, Vec4i, 32s_C4MR, Ipp32s)
207 DEF_COPY_MASK(32sC6, Vec6i)
208 DEF_COPY_MASK(32sC8, Vec8i)
210 BinaryFunc copyMaskTab[] =
231 BinaryFunc getCopyMaskFunc(size_t esz)
233 return esz <= 32 && copyMaskTab[esz] ? copyMaskTab[esz] : copyMaskGeneric;
237 void Mat::copyTo( OutputArray _dst ) const
239 CV_INSTRUMENT_REGION()
241 int dtype = _dst.type();
242 if( _dst.fixedType() && dtype != type() )
244 CV_Assert( channels() == CV_MAT_CN(dtype) );
245 convertTo( _dst, dtype );
257 _dst.create( dims, size.p, type() );
258 UMat dst = _dst.getUMat();
259 CV_Assert(dst.u != NULL);
260 size_t i, sz[CV_MAX_DIM] = {0}, dstofs[CV_MAX_DIM], esz = elemSize();
261 CV_Assert(dims >= 0 && dims < CV_MAX_DIM);
262 for( i = 0; i < (size_t)dims; i++ )
265 dst.ndoffset(dstofs);
266 dstofs[dims-1] *= esz;
267 dst.u->currAllocator->upload(dst.u, data, dims, sz, dstofs, dst.step.p, step.p);
273 _dst.create( rows, cols, type() );
274 Mat dst = _dst.getMat();
275 if( data == dst.data )
278 if( rows > 0 && cols > 0 )
280 // For some cases (with vector) dst.size != src.size, so force to column-based form
281 // It prevents memory corruption in case of column-based src
283 dst = dst.reshape(0, (int)dst.total());
285 const uchar* sptr = data;
286 uchar* dptr = dst.data;
288 #if IPP_VERSION_X100 >= 201700
289 CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_8u_C1R_L, sptr, (int)step, dptr, (int)dst.step, ippiSizeL((int)(cols*elemSize()), rows)) >= 0)
292 Size sz = getContinuousSize(*this, dst);
293 size_t len = sz.width*elemSize();
295 for( ; sz.height--; sptr += step, dptr += dst.step )
296 memcpy( dptr, sptr, len );
301 _dst.create( dims, size, type() );
302 Mat dst = _dst.getMat();
303 if( data == dst.data )
308 const Mat* arrays[] = { this, &dst };
310 NAryMatIterator it(arrays, ptrs, 2);
311 size_t sz = it.size*elemSize();
313 for( size_t i = 0; i < it.nplanes; i++, ++it )
314 memcpy(ptrs[1], ptrs[0], sz);
319 static bool ipp_copyTo(const Mat &src, Mat &dst, const Mat &mask)
322 CV_INSTRUMENT_REGION_IPP()
324 if(mask.channels() > 1 || mask.depth() != CV_8U)
329 IppiSize size = ippiSize(src.size());
330 return CV_INSTRUMENT_FUN_IPP(llwiCopyMask, src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, size, (int)src.elemSize1(), src.channels(), mask.ptr(), (int)mask.step) >= 0;
334 const Mat *arrays[] = {&src, &dst, &mask, NULL};
335 uchar *ptrs[3] = {NULL};
336 NAryMatIterator it(arrays, ptrs);
338 IppiSize size = ippiSize(it.size, 1);
340 for (size_t i = 0; i < it.nplanes; i++, ++it)
342 if(CV_INSTRUMENT_FUN_IPP(llwiCopyMask, ptrs[0], 0, ptrs[1], 0, size, (int)src.elemSize1(), src.channels(), ptrs[2], 0) < 0)
348 CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(mask);
354 void Mat::copyTo( OutputArray _dst, InputArray _mask ) const
356 CV_INSTRUMENT_REGION()
358 Mat mask = _mask.getMat();
365 int cn = channels(), mcn = mask.channels();
366 CV_Assert( mask.depth() == CV_8U && (mcn == 1 || mcn == cn) );
367 bool colorMask = mcn > 1;
370 CV_Assert( size() == mask.size() );
375 Mat dst0 = _dst.getMat();
376 _dst.create(dims, size, type()); // TODO Prohibit 'dst' re-creation, user should pass it explicitly with correct size/type or empty
379 if (dst.data != dst0.data) // re-allocation happened
382 CV_Assert(dst0.empty() &&
383 "copyTo(): dst size/type mismatch (looks like a bug) - use dst.release() before copyTo() call to suppress this message");
385 dst = Scalar(0); // do not leave dst uninitialized
389 CV_IPP_RUN_FAST(ipp_copyTo(*this, dst, mask))
391 size_t esz = colorMask ? elemSize1() : elemSize();
392 BinaryFunc copymask = getCopyMaskFunc(esz);
396 Size sz = getContinuousSize(*this, dst, mask, mcn);
397 copymask(data, step, mask.data, mask.step, dst.data, dst.step, sz, &esz);
401 const Mat* arrays[] = { this, &dst, &mask, 0 };
403 NAryMatIterator it(arrays, ptrs);
404 Size sz((int)(it.size*mcn), 1);
406 for( size_t i = 0; i < it.nplanes; i++, ++it )
407 copymask(ptrs[0], 0, ptrs[2], 0, ptrs[1], 0, sz, &esz);
410 Mat& Mat::operator = (const Scalar& s)
412 CV_INSTRUMENT_REGION()
417 const Mat* arrays[] = { this };
419 NAryMatIterator it(arrays, &dptr, 1);
420 size_t elsize = it.size*elemSize();
421 const int64* is = (const int64*)&s.val[0];
423 if( is[0] == 0 && is[1] == 0 && is[2] == 0 && is[3] == 0 )
425 for( size_t i = 0; i < it.nplanes; i++, ++it )
426 memset( dptr, 0, elsize );
433 scalarToRawData(s, scalar, type(), 12);
434 size_t blockSize = 12*elemSize1();
436 for( size_t j = 0; j < elsize; j += blockSize )
438 size_t sz = MIN(blockSize, elsize - j);
439 CV_Assert(sz <= sizeof(scalar));
440 memcpy( dptr + j, scalar, sz );
444 for( size_t i = 1; i < it.nplanes; i++ )
447 memcpy( dptr, data, elsize );
454 static bool ipp_Mat_setTo_Mat(Mat &dst, Mat &_val, Mat &mask)
457 CV_INSTRUMENT_REGION_IPP()
462 if(mask.depth() != CV_8U || mask.channels() > 1)
465 if(dst.channels() > 4)
468 if (dst.depth() == CV_32F)
470 for (int i = 0; i < (int)(_val.total()); i++)
472 float v = (float)(_val.at<double>(i)); // cast to float
473 if (cvIsNaN(v) || cvIsInf(v)) // accept finite numbers only
480 IppiSize size = ippiSize(dst.size());
481 IppDataType dataType = ippiGetDataType(dst.depth());
482 ::ipp::IwValueFloat s;
483 convertAndUnrollScalar(_val, CV_MAKETYPE(CV_64F, dst.channels()), (uchar*)((Ipp64f*)s), 1);
485 return CV_INSTRUMENT_FUN_IPP(llwiSetMask, s, dst.ptr(), (int)dst.step, size, dataType, dst.channels(), mask.ptr(), (int)mask.step) >= 0;
489 const Mat *arrays[] = {&dst, mask.empty()?NULL:&mask, NULL};
490 uchar *ptrs[2] = {NULL};
491 NAryMatIterator it(arrays, ptrs);
493 IppiSize size = {(int)it.size, 1};
494 IppDataType dataType = ippiGetDataType(dst.depth());
495 ::ipp::IwValueFloat s;
496 convertAndUnrollScalar(_val, CV_MAKETYPE(CV_64F, dst.channels()), (uchar*)((Ipp64f*)s), 1);
498 for( size_t i = 0; i < it.nplanes; i++, ++it)
500 if(CV_INSTRUMENT_FUN_IPP(llwiSetMask, s, ptrs[0], 0, size, dataType, dst.channels(), ptrs[1], 0) < 0)
506 CV_UNUSED(dst); CV_UNUSED(_val); CV_UNUSED(mask);
512 Mat& Mat::setTo(InputArray _value, InputArray _mask)
514 CV_INSTRUMENT_REGION()
519 Mat value = _value.getMat(), mask = _mask.getMat();
521 CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::MAT ));
522 int cn = channels(), mcn = mask.channels();
523 CV_Assert( mask.empty() || (mask.depth() == CV_8U && (mcn == 1 || mcn == cn) && size == mask.size) );
525 CV_IPP_RUN_FAST(ipp_Mat_setTo_Mat(*this, value, mask), *this)
527 size_t esz = mcn > 1 ? elemSize1() : elemSize();
528 BinaryFunc copymask = getCopyMaskFunc(esz);
530 const Mat* arrays[] = { this, !mask.empty() ? &mask : 0, 0 };
531 uchar* ptrs[2]={0,0};
532 NAryMatIterator it(arrays, ptrs);
533 int totalsz = (int)it.size*mcn;
534 int blockSize0 = std::min(totalsz, (int)((BLOCK_SIZE + esz-1)/esz));
535 blockSize0 -= blockSize0 % mcn; // must be divisible without remainder for unrolling and advancing
536 AutoBuffer<uchar> _scbuf(blockSize0*esz + 32);
537 uchar* scbuf = alignPtr((uchar*)_scbuf.data(), (int)sizeof(double));
538 convertAndUnrollScalar( value, type(), scbuf, blockSize0/mcn );
540 for( size_t i = 0; i < it.nplanes; i++, ++it )
542 for( int j = 0; j < totalsz; j += blockSize0 )
544 Size sz(std::min(blockSize0, totalsz - j), 1);
545 size_t blockSize = sz.width*esz;
548 copymask(scbuf, 0, ptrs[1], 0, ptrs[0], 0, sz, &esz);
552 memcpy(ptrs[0], scbuf, blockSize);
553 ptrs[0] += blockSize;
561 flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size, size_t esz )
563 int i, j, limit = (int)(((size.width + 1)/2)*esz);
564 AutoBuffer<int> _tab(size.width*esz);
565 int* tab = _tab.data();
567 for( i = 0; i < size.width; i++ )
568 for( size_t k = 0; k < esz; k++ )
569 tab[i*esz + k] = (int)((size.width - i - 1)*esz + k);
571 for( ; size.height--; src += sstep, dst += dstep )
573 for( i = 0; i < limit; i++ )
576 uchar t0 = src[i], t1 = src[j];
577 dst[i] = t1; dst[j] = t0;
583 flipVert( const uchar* src0, size_t sstep, uchar* dst0, size_t dstep, Size size, size_t esz )
585 const uchar* src1 = src0 + (size.height - 1)*sstep;
586 uchar* dst1 = dst0 + (size.height - 1)*dstep;
587 size.width *= (int)esz;
589 for( int y = 0; y < (size.height + 1)/2; y++, src0 += sstep, src1 -= sstep,
590 dst0 += dstep, dst1 -= dstep )
593 if( ((size_t)src0|(size_t)dst0|(size_t)src1|(size_t)dst1) % sizeof(int) == 0 )
595 for( ; i <= size.width - 16; i += 16 )
597 int t0 = ((int*)(src0 + i))[0];
598 int t1 = ((int*)(src1 + i))[0];
600 ((int*)(dst0 + i))[0] = t1;
601 ((int*)(dst1 + i))[0] = t0;
603 t0 = ((int*)(src0 + i))[1];
604 t1 = ((int*)(src1 + i))[1];
606 ((int*)(dst0 + i))[1] = t1;
607 ((int*)(dst1 + i))[1] = t0;
609 t0 = ((int*)(src0 + i))[2];
610 t1 = ((int*)(src1 + i))[2];
612 ((int*)(dst0 + i))[2] = t1;
613 ((int*)(dst1 + i))[2] = t0;
615 t0 = ((int*)(src0 + i))[3];
616 t1 = ((int*)(src1 + i))[3];
618 ((int*)(dst0 + i))[3] = t1;
619 ((int*)(dst1 + i))[3] = t0;
622 for( ; i <= size.width - 4; i += 4 )
624 int t0 = ((int*)(src0 + i))[0];
625 int t1 = ((int*)(src1 + i))[0];
627 ((int*)(dst0 + i))[0] = t1;
628 ((int*)(dst1 + i))[0] = t0;
632 for( ; i < size.width; i++ )
645 enum { FLIP_COLS = 1 << 0, FLIP_ROWS = 1 << 1, FLIP_BOTH = FLIP_ROWS | FLIP_COLS };
647 static bool ocl_flip(InputArray _src, OutputArray _dst, int flipCode )
649 CV_Assert(flipCode >= -1 && flipCode <= 1);
651 const ocl::Device & dev = ocl::Device::getDefault();
652 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
653 flipType, kercn = std::min(ocl::predictOptimalVectorWidth(_src, _dst), 4);
655 bool doubleSupport = dev.doubleFPConfig() > 0;
656 if (!doubleSupport && depth == CV_64F)
662 const char * kernelName;
664 kernelName = "arithm_flip_rows", flipType = FLIP_ROWS;
665 else if (flipCode > 0)
666 kernelName = "arithm_flip_cols", flipType = FLIP_COLS;
668 kernelName = "arithm_flip_rows_cols", flipType = FLIP_BOTH;
670 int pxPerWIy = (dev.isIntel() && (dev.type() & ocl::Device::TYPE_GPU)) ? 4 : 1;
671 kercn = (cn!=3 || flipType == FLIP_ROWS) ? std::max(kercn, cn) : cn;
673 ocl::Kernel k(kernelName, ocl::core::flip_oclsrc,
674 format( "-D T=%s -D T1=%s -D cn=%d -D PIX_PER_WI_Y=%d -D kercn=%d",
675 kercn != cn ? ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)) : ocl::vecopTypeToStr(CV_MAKE_TYPE(depth, kercn)),
676 kercn != cn ? ocl::typeToStr(depth) : ocl::vecopTypeToStr(depth), cn, pxPerWIy, kercn));
680 Size size = _src.size();
681 _dst.create(size, type);
682 UMat src = _src.getUMat(), dst = _dst.getUMat();
684 int cols = size.width * cn / kercn, rows = size.height;
685 cols = flipType == FLIP_COLS ? (cols + 1) >> 1 : cols;
686 rows = flipType & FLIP_ROWS ? (rows + 1) >> 1 : rows;
688 k.args(ocl::KernelArg::ReadOnlyNoSize(src),
689 ocl::KernelArg::WriteOnly(dst, cn, kercn), rows, cols);
691 size_t maxWorkGroupSize = dev.maxWorkGroupSize();
692 CV_Assert(maxWorkGroupSize % 4 == 0);
694 size_t globalsize[2] = { (size_t)cols, ((size_t)rows + pxPerWIy - 1) / pxPerWIy },
695 localsize[2] = { maxWorkGroupSize / 4, 4 };
696 return k.run(2, globalsize, (flipType == FLIP_COLS) && !dev.isIntel() ? localsize : NULL, false);
702 static bool ipp_flip(Mat &src, Mat &dst, int flip_mode)
705 CV_INSTRUMENT_REGION_IPP()
709 ippMode = ippAxsBoth;
710 else if(flip_mode == 0)
711 ippMode = ippAxsHorizontal;
713 ippMode = ippAxsVertical;
717 ::ipp::IwiImage iwSrc = ippiGetImage(src);
718 ::ipp::IwiImage iwDst = ippiGetImage(dst);
720 CV_INSTRUMENT_FUN_IPP(::ipp::iwiMirror, iwSrc, iwDst, ippMode);
722 catch(::ipp::IwException)
729 CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(flip_mode);
736 void flip( InputArray _src, OutputArray _dst, int flip_mode )
738 CV_INSTRUMENT_REGION()
740 CV_Assert( _src.dims() <= 2 );
741 Size size = _src.size();
747 if (size.height == 1)
751 if ((size.width == 1 && flip_mode > 0) ||
752 (size.height == 1 && flip_mode == 0) ||
753 (size.height == 1 && size.width == 1 && flip_mode < 0))
755 return _src.copyTo(_dst);
758 CV_OCL_RUN( _dst.isUMat(), ocl_flip(_src, _dst, flip_mode))
760 Mat src = _src.getMat();
761 int type = src.type();
762 _dst.create( size, type );
763 Mat dst = _dst.getMat();
765 CV_IPP_RUN_FAST(ipp_flip(src, dst, flip_mode));
767 size_t esz = CV_ELEM_SIZE(type);
770 flipVert( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
772 flipHoriz( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
775 flipHoriz( dst.ptr(), dst.step, dst.ptr(), dst.step, dst.size(), esz );
780 static bool ocl_rotate(InputArray _src, OutputArray _dst, int rotateMode)
784 case ROTATE_90_CLOCKWISE:
785 transpose(_src, _dst);
789 flip(_src, _dst, -1);
791 case ROTATE_90_COUNTERCLOCKWISE:
792 transpose(_src, _dst);
802 void rotate(InputArray _src, OutputArray _dst, int rotateMode)
804 CV_Assert(_src.dims() <= 2);
806 CV_OCL_RUN(_dst.isUMat(), ocl_rotate(_src, _dst, rotateMode))
810 case ROTATE_90_CLOCKWISE:
811 transpose(_src, _dst);
815 flip(_src, _dst, -1);
817 case ROTATE_90_COUNTERCLOCKWISE:
818 transpose(_src, _dst);
826 #if defined HAVE_OPENCL && !defined __APPLE__
828 static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
830 if (ny == 1 && nx == 1)
836 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
837 rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1,
838 kercn = ocl::predictOptimalVectorWidth(_src, _dst);
840 ocl::Kernel k("repeat", ocl::core::repeat_oclsrc,
841 format("-D T=%s -D nx=%d -D ny=%d -D rowsPerWI=%d -D cn=%d",
842 ocl::memopTypeToStr(CV_MAKE_TYPE(depth, kercn)),
843 nx, ny, rowsPerWI, kercn));
847 UMat src = _src.getUMat(), dst = _dst.getUMat();
848 k.args(ocl::KernelArg::ReadOnly(src, cn, kercn), ocl::KernelArg::WriteOnlyNoSize(dst));
850 size_t globalsize[] = { (size_t)src.cols * cn / kercn, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
851 return k.run(2, globalsize, NULL, false);
856 void repeat(InputArray _src, int ny, int nx, OutputArray _dst)
858 CV_INSTRUMENT_REGION()
860 CV_Assert(_src.getObj() != _dst.getObj());
861 CV_Assert( _src.dims() <= 2 );
862 CV_Assert( ny > 0 && nx > 0 );
864 Size ssize = _src.size();
865 _dst.create(ssize.height*ny, ssize.width*nx, _src.type());
867 #if !defined __APPLE__
868 CV_OCL_RUN(_dst.isUMat(),
869 ocl_repeat(_src, ny, nx, _dst))
872 Mat src = _src.getMat(), dst = _dst.getMat();
873 Size dsize = dst.size();
874 int esz = (int)src.elemSize();
876 ssize.width *= esz; dsize.width *= esz;
878 for( y = 0; y < ssize.height; y++ )
880 for( x = 0; x < dsize.width; x += ssize.width )
881 memcpy( dst.ptr(y) + x, src.ptr(y), ssize.width );
884 for( ; y < dsize.height; y++ )
885 memcpy( dst.ptr(y), dst.ptr(y - ssize.height), dsize.width );
888 Mat repeat(const Mat& src, int ny, int nx)
890 if( nx == 1 && ny == 1 )
893 repeat(src, ny, nx, dst);
902 Various border types, image boundaries are denoted with '|'
904 * BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
905 * BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
906 * BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
907 * BORDER_WRAP: cdefgh|abcdefgh|abcdefg
908 * BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
910 int cv::borderInterpolate( int p, int len, int borderType )
912 CV_TRACE_FUNCTION_VERBOSE();
914 if( (unsigned)p < (unsigned)len )
916 else if( borderType == BORDER_REPLICATE )
917 p = p < 0 ? 0 : len - 1;
918 else if( borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101 )
920 int delta = borderType == BORDER_REFLECT_101;
928 p = len - 1 - (p - len) - delta;
930 while( (unsigned)p >= (unsigned)len );
932 else if( borderType == BORDER_WRAP )
936 p -= ((p-len+1)/len)*len;
940 else if( borderType == BORDER_CONSTANT )
943 CV_Error( CV_StsBadArg, "Unknown/unsupported border type" );
950 void copyMakeBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
951 uchar* dst, size_t dststep, cv::Size dstroi,
952 int top, int left, int cn, int borderType )
954 const int isz = (int)sizeof(int);
955 int i, j, k, elemSize = 1;
956 bool intMode = false;
958 if( (cn | srcstep | dststep | (size_t)src | (size_t)dst) % isz == 0 )
965 cv::AutoBuffer<int> _tab((dstroi.width - srcroi.width)*cn);
966 int* tab = _tab.data();
967 int right = dstroi.width - srcroi.width - left;
968 int bottom = dstroi.height - srcroi.height - top;
970 for( i = 0; i < left; i++ )
972 j = cv::borderInterpolate(i - left, srcroi.width, borderType)*cn;
973 for( k = 0; k < cn; k++ )
974 tab[i*cn + k] = j + k;
977 for( i = 0; i < right; i++ )
979 j = cv::borderInterpolate(srcroi.width + i, srcroi.width, borderType)*cn;
980 for( k = 0; k < cn; k++ )
981 tab[(i+left)*cn + k] = j + k;
989 uchar* dstInner = dst + dststep*top + left*elemSize;
991 for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
993 if( dstInner != src )
994 memcpy(dstInner, src, srcroi.width*elemSize);
998 const int* isrc = (int*)src;
999 int* idstInner = (int*)dstInner;
1000 for( j = 0; j < left; j++ )
1001 idstInner[j - left] = isrc[tab[j]];
1002 for( j = 0; j < right; j++ )
1003 idstInner[j + srcroi.width] = isrc[tab[j + left]];
1007 for( j = 0; j < left; j++ )
1008 dstInner[j - left] = src[tab[j]];
1009 for( j = 0; j < right; j++ )
1010 dstInner[j + srcroi.width] = src[tab[j + left]];
1014 dstroi.width *= elemSize;
1017 for( i = 0; i < top; i++ )
1019 j = cv::borderInterpolate(i - top, srcroi.height, borderType);
1020 memcpy(dst + (i - top)*dststep, dst + j*dststep, dstroi.width);
1023 for( i = 0; i < bottom; i++ )
1025 j = cv::borderInterpolate(i + srcroi.height, srcroi.height, borderType);
1026 memcpy(dst + (i + srcroi.height)*dststep, dst + j*dststep, dstroi.width);
1031 void copyMakeConstBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
1032 uchar* dst, size_t dststep, cv::Size dstroi,
1033 int top, int left, int cn, const uchar* value )
1036 cv::AutoBuffer<uchar> _constBuf(dstroi.width*cn);
1037 uchar* constBuf = _constBuf.data();
1038 int right = dstroi.width - srcroi.width - left;
1039 int bottom = dstroi.height - srcroi.height - top;
1041 for( i = 0; i < dstroi.width; i++ )
1043 for( j = 0; j < cn; j++ )
1044 constBuf[i*cn + j] = value[j];
1052 uchar* dstInner = dst + dststep*top + left;
1054 for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
1056 if( dstInner != src )
1057 memcpy( dstInner, src, srcroi.width );
1058 memcpy( dstInner - left, constBuf, left );
1059 memcpy( dstInner + srcroi.width, constBuf, right );
1064 for( i = 0; i < top; i++ )
1065 memcpy(dst + (i - top)*dststep, constBuf, dstroi.width);
1067 for( i = 0; i < bottom; i++ )
1068 memcpy(dst + (i + srcroi.height)*dststep, constBuf, dstroi.width);
1077 static bool ocl_copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
1078 int left, int right, int borderType, const Scalar& value )
1080 int type = _src.type(), cn = CV_MAT_CN(type), depth = CV_MAT_DEPTH(type),
1081 rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
1082 bool isolated = (borderType & BORDER_ISOLATED) != 0;
1083 borderType &= ~cv::BORDER_ISOLATED;
1085 if ( !(borderType == BORDER_CONSTANT || borderType == BORDER_REPLICATE || borderType == BORDER_REFLECT ||
1086 borderType == BORDER_WRAP || borderType == BORDER_REFLECT_101) ||
1090 const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP", "BORDER_REFLECT_101" };
1091 int scalarcn = cn == 3 ? 4 : cn;
1092 int sctype = CV_MAKETYPE(depth, scalarcn);
1093 String buildOptions = format("-D T=%s -D %s -D T1=%s -D cn=%d -D ST=%s -D rowsPerWI=%d",
1094 ocl::memopTypeToStr(type), borderMap[borderType],
1095 ocl::memopTypeToStr(depth), cn,
1096 ocl::memopTypeToStr(sctype), rowsPerWI);
1098 ocl::Kernel k("copyMakeBorder", ocl::core::copymakeborder_oclsrc, buildOptions);
1102 UMat src = _src.getUMat();
1103 if( src.isSubmatrix() && !isolated )
1107 src.locateROI(wholeSize, ofs);
1108 int dtop = std::min(ofs.y, top);
1109 int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
1110 int dleft = std::min(ofs.x, left);
1111 int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
1112 src.adjustROI(dtop, dbottom, dleft, dright);
1119 _dst.create(src.rows + top + bottom, src.cols + left + right, type);
1120 UMat dst = _dst.getUMat();
1122 if (top == 0 && left == 0 && bottom == 0 && right == 0)
1124 if(src.u != dst.u || src.step != dst.step)
1129 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
1130 top, left, ocl::KernelArg::Constant(Mat(1, 1, sctype, value)));
1132 size_t globalsize[2] = { (size_t)dst.cols, ((size_t)dst.rows + rowsPerWI - 1) / rowsPerWI };
1133 return k.run(2, globalsize, NULL, false);
1142 static bool ipp_copyMakeBorder( Mat &_src, Mat &_dst, int top, int bottom,
1143 int left, int right, int _borderType, const Scalar& value )
1145 #if defined HAVE_IPP_IW && !IPP_DISABLE_PERF_COPYMAKE
1146 CV_INSTRUMENT_REGION_IPP()
1148 ::ipp::IwiBorderSize borderSize(left, top, right, bottom);
1149 ::ipp::IwiSize size(_src.cols, _src.rows);
1150 IppDataType dataType = ippiGetDataType(_src.depth());
1151 IppiBorderType borderType = ippiGetBorderType(_borderType);
1152 if((int)borderType == -1)
1158 Rect dstRect(borderSize.left, borderSize.top,
1159 _dst.cols - borderSize.right - borderSize.left,
1160 _dst.rows - borderSize.bottom - borderSize.top);
1161 Mat subDst = Mat(_dst, dstRect);
1164 return CV_INSTRUMENT_FUN_IPP(llwiCopyMakeBorder, pSrc->ptr(), pSrc->step, subDst.ptr(), subDst.step, size, dataType, _src.channels(), borderSize, borderType, &value[0]) >= 0;
1166 CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(top); CV_UNUSED(bottom); CV_UNUSED(left); CV_UNUSED(right);
1167 CV_UNUSED(_borderType); CV_UNUSED(value);
1174 void cv::copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
1175 int left, int right, int borderType, const Scalar& value )
1177 CV_INSTRUMENT_REGION()
1179 CV_Assert( top >= 0 && bottom >= 0 && left >= 0 && right >= 0 );
1181 CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
1182 ocl_copyMakeBorder(_src, _dst, top, bottom, left, right, borderType, value))
1184 Mat src = _src.getMat();
1185 int type = src.type();
1187 if( src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0 )
1191 src.locateROI(wholeSize, ofs);
1192 int dtop = std::min(ofs.y, top);
1193 int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
1194 int dleft = std::min(ofs.x, left);
1195 int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
1196 src.adjustROI(dtop, dbottom, dleft, dright);
1203 _dst.create( src.rows + top + bottom, src.cols + left + right, type );
1204 Mat dst = _dst.getMat();
1206 if(top == 0 && left == 0 && bottom == 0 && right == 0)
1208 if(src.data != dst.data || src.step != dst.step)
1213 borderType &= ~BORDER_ISOLATED;
1215 CV_IPP_RUN_FAST(ipp_copyMakeBorder(src, dst, top, bottom, left, right, borderType, value))
1217 if( borderType != BORDER_CONSTANT )
1218 copyMakeBorder_8u( src.ptr(), src.step, src.size(),
1219 dst.ptr(), dst.step, dst.size(),
1220 top, left, (int)src.elemSize(), borderType );
1223 int cn = src.channels(), cn1 = cn;
1224 AutoBuffer<double> buf(cn);
1227 CV_Assert( value[0] == value[1] && value[0] == value[2] && value[0] == value[3] );
1230 scalarToRawData(value, buf.data(), CV_MAKETYPE(src.depth(), cn1), cn);
1231 copyMakeConstBorder_8u( src.ptr(), src.step, src.size(),
1232 dst.ptr(), dst.step, dst.size(),
1233 top, left, (int)src.elemSize(), (uchar*)buf.data() );
1239 cvCopy( const void* srcarr, void* dstarr, const void* maskarr )
1241 if( CV_IS_SPARSE_MAT(srcarr) && CV_IS_SPARSE_MAT(dstarr))
1243 CV_Assert( maskarr == 0 );
1244 CvSparseMat* src1 = (CvSparseMat*)srcarr;
1245 CvSparseMat* dst1 = (CvSparseMat*)dstarr;
1246 CvSparseMatIterator iterator;
1249 dst1->dims = src1->dims;
1250 memcpy( dst1->size, src1->size, src1->dims*sizeof(src1->size[0]));
1251 dst1->valoffset = src1->valoffset;
1252 dst1->idxoffset = src1->idxoffset;
1253 cvClearSet( dst1->heap );
1255 if( src1->heap->active_count >= dst1->hashsize*CV_SPARSE_HASH_RATIO )
1257 cvFree( &dst1->hashtable );
1258 dst1->hashsize = src1->hashsize;
1260 (void**)cvAlloc( dst1->hashsize*sizeof(dst1->hashtable[0]));
1263 memset( dst1->hashtable, 0, dst1->hashsize*sizeof(dst1->hashtable[0]));
1265 for( node = cvInitSparseMatIterator( src1, &iterator );
1266 node != 0; node = cvGetNextSparseNode( &iterator ))
1268 CvSparseNode* node_copy = (CvSparseNode*)cvSetNew( dst1->heap );
1269 int tabidx = node->hashval & (dst1->hashsize - 1);
1270 memcpy( node_copy, node, dst1->heap->elem_size );
1271 node_copy->next = (CvSparseNode*)dst1->hashtable[tabidx];
1272 dst1->hashtable[tabidx] = node_copy;
1276 cv::Mat src = cv::cvarrToMat(srcarr, false, true, 1), dst = cv::cvarrToMat(dstarr, false, true, 1);
1277 CV_Assert( src.depth() == dst.depth() && src.size == dst.size );
1279 int coi1 = 0, coi2 = 0;
1280 if( CV_IS_IMAGE(srcarr) )
1281 coi1 = cvGetImageCOI((const IplImage*)srcarr);
1282 if( CV_IS_IMAGE(dstarr) )
1283 coi2 = cvGetImageCOI((const IplImage*)dstarr);
1287 CV_Assert( (coi1 != 0 || src.channels() == 1) &&
1288 (coi2 != 0 || dst.channels() == 1) );
1290 int pair[] = { std::max(coi1-1, 0), std::max(coi2-1, 0) };
1291 cv::mixChannels( &src, 1, &dst, 1, pair, 1 );
1295 CV_Assert( src.channels() == dst.channels() );
1300 src.copyTo(dst, cv::cvarrToMat(maskarr));
1304 cvSet( void* arr, CvScalar value, const void* maskarr )
1306 cv::Mat m = cv::cvarrToMat(arr);
1310 m.setTo(cv::Scalar(value), cv::cvarrToMat(maskarr));
1314 cvSetZero( CvArr* arr )
1316 if( CV_IS_SPARSE_MAT(arr) )
1318 CvSparseMat* mat1 = (CvSparseMat*)arr;
1319 cvClearSet( mat1->heap );
1320 if( mat1->hashtable )
1321 memset( mat1->hashtable, 0, mat1->hashsize*sizeof(mat1->hashtable[0]));
1324 cv::Mat m = cv::cvarrToMat(arr);
1329 cvFlip( const CvArr* srcarr, CvArr* dstarr, int flip_mode )
1331 cv::Mat src = cv::cvarrToMat(srcarr);
1337 dst = cv::cvarrToMat(dstarr);
1339 CV_Assert( src.type() == dst.type() && src.size() == dst.size() );
1340 cv::flip( src, dst, flip_mode );
1344 cvRepeat( const CvArr* srcarr, CvArr* dstarr )
1346 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1347 CV_Assert( src.type() == dst.type() &&
1348 dst.rows % src.rows == 0 && dst.cols % src.cols == 0 );
1349 cv::repeat(src, dst.rows/src.rows, dst.cols/src.cols, dst);