1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
48 /****************************************************************************************\
50 \****************************************************************************************/
52 template<typename T> static void
53 split_( const T* src, T** dst, int len, int cn )
55 int k = cn % 4 ? cn % 4 : 4;
60 for( i = j = 0; i < len; i++, j += cn )
65 T *dst0 = dst[0], *dst1 = dst[1];
66 for( i = j = 0; i < len; i++, j += cn )
74 T *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2];
75 for( i = j = 0; i < len; i++, j += cn )
84 T *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2], *dst3 = dst[3];
85 for( i = j = 0; i < len; i++, j += cn )
87 dst0[i] = src[j]; dst1[i] = src[j+1];
88 dst2[i] = src[j+2]; dst3[i] = src[j+3];
92 for( ; k < cn; k += 4 )
94 T *dst0 = dst[k], *dst1 = dst[k+1], *dst2 = dst[k+2], *dst3 = dst[k+3];
95 for( i = 0, j = k; i < len; i++, j += cn )
97 dst0[i] = src[j]; dst1[i] = src[j+1];
98 dst2[i] = src[j+2]; dst3[i] = src[j+3];
103 template<typename T> static void
104 merge_( const T** src, T* dst, int len, int cn )
106 int k = cn % 4 ? cn % 4 : 4;
110 const T* src0 = src[0];
111 for( i = j = 0; i < len; i++, j += cn )
116 const T *src0 = src[0], *src1 = src[1];
117 for( i = j = 0; i < len; i++, j += cn )
125 const T *src0 = src[0], *src1 = src[1], *src2 = src[2];
126 for( i = j = 0; i < len; i++, j += cn )
135 const T *src0 = src[0], *src1 = src[1], *src2 = src[2], *src3 = src[3];
136 for( i = j = 0; i < len; i++, j += cn )
138 dst[j] = src0[i]; dst[j+1] = src1[i];
139 dst[j+2] = src2[i]; dst[j+3] = src3[i];
143 for( ; k < cn; k += 4 )
145 const T *src0 = src[k], *src1 = src[k+1], *src2 = src[k+2], *src3 = src[k+3];
146 for( i = 0, j = k; i < len; i++, j += cn )
148 dst[j] = src0[i]; dst[j+1] = src1[i];
149 dst[j+2] = src2[i]; dst[j+3] = src3[i];
154 static void split8u(const uchar* src, uchar** dst, int len, int cn )
156 split_(src, dst, len, cn);
159 static void split16u(const ushort* src, ushort** dst, int len, int cn )
161 split_(src, dst, len, cn);
164 static void split32s(const int* src, int** dst, int len, int cn )
166 split_(src, dst, len, cn);
169 static void split64s(const int64* src, int64** dst, int len, int cn )
171 split_(src, dst, len, cn);
174 static void merge8u(const uchar** src, uchar* dst, int len, int cn )
176 merge_(src, dst, len, cn);
179 static void merge16u(const ushort** src, ushort* dst, int len, int cn )
181 merge_(src, dst, len, cn);
184 static void merge32s(const int** src, int* dst, int len, int cn )
186 merge_(src, dst, len, cn);
189 static void merge64s(const int64** src, int64* dst, int len, int cn )
191 merge_(src, dst, len, cn);
194 typedef void (*SplitFunc)(const uchar* src, uchar** dst, int len, int cn);
195 typedef void (*MergeFunc)(const uchar** src, uchar* dst, int len, int cn);
197 static SplitFunc splitTab[] =
199 (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split16u), (SplitFunc)GET_OPTIMIZED(split16u),
200 (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split64s), 0
203 static MergeFunc mergeTab[] =
205 (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge16u), (MergeFunc)GET_OPTIMIZED(merge16u),
206 (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge64s), 0
211 void cv::split(const Mat& src, Mat* mv)
213 int k, depth = src.depth(), cn = src.channels();
220 SplitFunc func = splitTab[depth];
221 CV_Assert( func != 0 );
223 int esz = (int)src.elemSize(), esz1 = (int)src.elemSize1();
224 int blocksize0 = (BLOCK_SIZE + esz-1)/esz;
225 AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16);
226 const Mat** arrays = (const Mat**)(uchar*)_buf;
227 uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16);
230 for( k = 0; k < cn; k++ )
232 mv[k].create(src.dims, src.size, depth);
233 arrays[k+1] = &mv[k];
236 NAryMatIterator it(arrays, ptrs, cn+1);
237 int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0);
239 for( size_t i = 0; i < it.nplanes; i++, ++it )
241 for( int j = 0; j < total; j += blocksize )
243 int bsz = std::min(total - j, blocksize);
244 func( ptrs[0], &ptrs[1], bsz, cn );
246 if( j + blocksize < total )
249 for( k = 0; k < cn; k++ )
250 ptrs[k+1] += bsz*esz1;
256 void cv::split(const Mat& m, vector<Mat>& mv)
258 mv.resize(!m.empty() ? m.channels() : 0);
263 void cv::merge(const Mat* mv, size_t n, OutputArray _dst)
265 CV_Assert( mv && n > 0 );
267 int depth = mv[0].depth();
272 for( i = 0; i < n; i++ )
274 CV_Assert(mv[i].size == mv[0].size && mv[i].depth() == depth);
275 allch1 = allch1 && mv[i].channels() == 1;
276 cn += mv[i].channels();
279 CV_Assert( 0 < cn && cn <= CV_CN_MAX );
280 _dst.create(mv[0].dims, mv[0].size, CV_MAKETYPE(depth, cn));
281 Mat dst = _dst.getMat();
291 AutoBuffer<int> pairs(cn*2);
294 for( i = 0, j = 0; i < n; i++, j += ni )
296 ni = mv[i].channels();
297 for( k = 0; k < ni; k++ )
299 pairs[(j+k)*2] = j + k;
300 pairs[(j+k)*2+1] = j + k;
303 mixChannels( mv, n, &dst, 1, &pairs[0], cn );
307 size_t esz = dst.elemSize(), esz1 = dst.elemSize1();
308 int blocksize0 = (int)((BLOCK_SIZE + esz-1)/esz);
309 AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16);
310 const Mat** arrays = (const Mat**)(uchar*)_buf;
311 uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16);
314 for( k = 0; k < cn; k++ )
315 arrays[k+1] = &mv[k];
317 NAryMatIterator it(arrays, ptrs, cn+1);
318 int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0);
319 MergeFunc func = mergeTab[depth];
321 for( i = 0; i < it.nplanes; i++, ++it )
323 for( int j = 0; j < total; j += blocksize )
325 int bsz = std::min(total - j, blocksize);
326 func( (const uchar**)&ptrs[1], ptrs[0], bsz, cn );
328 if( j + blocksize < total )
331 for( int k = 0; k < cn; k++ )
332 ptrs[k+1] += bsz*esz1;
338 void cv::merge(const vector<Mat>& mv, OutputArray _dst)
340 merge(!mv.empty() ? &mv[0] : 0, mv.size(), _dst);
343 /****************************************************************************************\
344 * Generalized split/merge: mixing channels *
345 \****************************************************************************************/
350 template<typename T> static void
351 mixChannels_( const T** src, const int* sdelta,
352 T** dst, const int* ddelta,
353 int len, int npairs )
356 for( k = 0; k < npairs; k++ )
360 int ds = sdelta[k], dd = ddelta[k];
363 for( i = 0; i <= len - 2; i += 2, s += ds*2, d += dd*2 )
365 T t0 = s[0], t1 = s[ds];
366 d[0] = t0; d[dd] = t1;
373 for( i = 0; i <= len - 2; i += 2, d += dd*2 )
382 static void mixChannels8u( const uchar** src, const int* sdelta,
383 uchar** dst, const int* ddelta,
384 int len, int npairs )
386 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
389 static void mixChannels16u( const ushort** src, const int* sdelta,
390 ushort** dst, const int* ddelta,
391 int len, int npairs )
393 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
396 static void mixChannels32s( const int** src, const int* sdelta,
397 int** dst, const int* ddelta,
398 int len, int npairs )
400 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
403 static void mixChannels64s( const int64** src, const int* sdelta,
404 int64** dst, const int* ddelta,
405 int len, int npairs )
407 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
410 typedef void (*MixChannelsFunc)( const uchar** src, const int* sdelta,
411 uchar** dst, const int* ddelta, int len, int npairs );
413 static MixChannelsFunc mixchTab[] =
415 (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels16u,
416 (MixChannelsFunc)mixChannels16u, (MixChannelsFunc)mixChannels32s, (MixChannelsFunc)mixChannels32s,
417 (MixChannelsFunc)mixChannels64s, 0
422 void cv::mixChannels( const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, const int* fromTo, size_t npairs )
426 CV_Assert( src && nsrcs > 0 && dst && ndsts > 0 && fromTo && npairs > 0 );
428 size_t i, j, k, esz1 = dst[0].elemSize1();
429 int depth = dst[0].depth();
431 AutoBuffer<uchar> buf((nsrcs + ndsts + 1)*(sizeof(Mat*) + sizeof(uchar*)) + npairs*(sizeof(uchar*)*2 + sizeof(int)*6));
432 const Mat** arrays = (const Mat**)(uchar*)buf;
433 uchar** ptrs = (uchar**)(arrays + nsrcs + ndsts);
434 const uchar** srcs = (const uchar**)(ptrs + nsrcs + ndsts + 1);
435 uchar** dsts = (uchar**)(srcs + npairs);
436 int* tab = (int*)(dsts + npairs);
437 int *sdelta = (int*)(tab + npairs*4), *ddelta = sdelta + npairs;
439 for( i = 0; i < nsrcs; i++ )
441 for( i = 0; i < ndsts; i++ )
442 arrays[i + nsrcs] = &dst[i];
443 ptrs[nsrcs + ndsts] = 0;
445 for( i = 0; i < npairs; i++ )
447 int i0 = fromTo[i*2], i1 = fromTo[i*2+1];
450 for( j = 0; j < nsrcs; i0 -= src[j].channels(), j++ )
451 if( i0 < src[j].channels() )
453 CV_Assert(j < nsrcs && src[j].depth() == depth);
454 tab[i*4] = (int)j; tab[i*4+1] = (int)(i0*esz1);
455 sdelta[i] = src[j].channels();
459 tab[i*4] = (int)(nsrcs + ndsts); tab[i*4+1] = 0;
463 for( j = 0; j < ndsts; i1 -= dst[j].channels(), j++ )
464 if( i1 < dst[j].channels() )
466 CV_Assert(i1 >= 0 && j < ndsts && dst[j].depth() == depth);
467 tab[i*4+2] = (int)(j + nsrcs); tab[i*4+3] = (int)(i1*esz1);
468 ddelta[i] = dst[j].channels();
471 NAryMatIterator it(arrays, ptrs, (int)(nsrcs + ndsts));
472 int total = (int)it.size, blocksize = std::min(total, (int)((BLOCK_SIZE + esz1-1)/esz1));
473 MixChannelsFunc func = mixchTab[depth];
475 for( i = 0; i < it.nplanes; i++, ++it )
477 for( k = 0; k < npairs; k++ )
479 srcs[k] = ptrs[tab[k*4]] + tab[k*4+1];
480 dsts[k] = ptrs[tab[k*4+2]] + tab[k*4+3];
483 for( int j = 0; j < total; j += blocksize )
485 int bsz = std::min(total - j, blocksize);
486 func( srcs, sdelta, dsts, ddelta, bsz, (int)npairs );
488 if( j + blocksize < total )
489 for( k = 0; k < npairs; k++ )
491 srcs[k] += blocksize*sdelta[k]*esz1;
492 dsts[k] += blocksize*ddelta[k]*esz1;
499 void cv::mixChannels(const vector<Mat>& src, vector<Mat>& dst,
500 const int* fromTo, size_t npairs)
502 mixChannels(!src.empty() ? &src[0] : 0, src.size(),
503 !dst.empty() ? &dst[0] : 0, dst.size(), fromTo, npairs);
506 void cv::mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst,
507 const vector<int>& fromTo)
511 bool src_is_mat = src.kind() != _InputArray::STD_VECTOR_MAT &&
512 src.kind() != _InputArray::STD_VECTOR_VECTOR;
513 bool dst_is_mat = dst.kind() != _InputArray::STD_VECTOR_MAT &&
514 dst.kind() != _InputArray::STD_VECTOR_VECTOR;
516 int nsrc = src_is_mat ? 1 : (int)src.total();
517 int ndst = dst_is_mat ? 1 : (int)dst.total();
519 CV_Assert(fromTo.size()%2 == 0 && nsrc > 0 && ndst > 0);
520 cv::AutoBuffer<Mat> _buf(nsrc + ndst);
522 for( i = 0; i < nsrc; i++ )
523 buf[i] = src.getMat(src_is_mat ? -1 : i);
524 for( i = 0; i < ndst; i++ )
525 buf[nsrc + i] = dst.getMat(dst_is_mat ? -1 : i);
526 mixChannels(&buf[0], nsrc, &buf[nsrc], ndst, &fromTo[0], fromTo.size()/2);
529 void cv::extractChannel(InputArray _src, OutputArray _dst, int coi)
531 Mat src = _src.getMat();
532 CV_Assert( 0 <= coi && coi < src.channels() );
533 _dst.create(src.dims, &src.size[0], src.depth());
534 Mat dst = _dst.getMat();
535 int ch[] = { coi, 0 };
536 mixChannels(&src, 1, &dst, 1, ch, 1);
539 void cv::insertChannel(InputArray _src, InputOutputArray _dst, int coi)
541 Mat src = _src.getMat(), dst = _dst.getMat();
542 CV_Assert( src.size == dst.size && src.depth() == dst.depth() );
543 CV_Assert( 0 <= coi && coi < dst.channels() && src.channels() == 1 );
544 int ch[] = { 0, coi };
545 mixChannels(&src, 1, &dst, 1, ch, 1);
548 /****************************************************************************************\
549 * convertScale[Abs] *
550 \****************************************************************************************/
555 template<typename T, typename DT, typename WT> static void
556 cvtScaleAbs_( const T* src, size_t sstep,
557 DT* dst, size_t dstep, Size size,
560 sstep /= sizeof(src[0]);
561 dstep /= sizeof(dst[0]);
563 for( ; size.height--; src += sstep, dst += dstep )
566 #if CV_ENABLE_UNROLLED
567 for( ; x <= size.width - 4; x += 4 )
570 t0 = saturate_cast<DT>(std::abs(src[x]*scale + shift));
571 t1 = saturate_cast<DT>(std::abs(src[x+1]*scale + shift));
572 dst[x] = t0; dst[x+1] = t1;
573 t0 = saturate_cast<DT>(std::abs(src[x+2]*scale + shift));
574 t1 = saturate_cast<DT>(std::abs(src[x+3]*scale + shift));
575 dst[x+2] = t0; dst[x+3] = t1;
578 for( ; x < size.width; x++ )
579 dst[x] = saturate_cast<DT>(std::abs(src[x]*scale + shift));
584 template<typename T, typename DT, typename WT> static void
585 cvtScale_( const T* src, size_t sstep,
586 DT* dst, size_t dstep, Size size,
589 sstep /= sizeof(src[0]);
590 dstep /= sizeof(dst[0]);
592 for( ; size.height--; src += sstep, dst += dstep )
595 #if CV_ENABLE_UNROLLED
596 for( ; x <= size.width - 4; x += 4 )
599 t0 = saturate_cast<DT>(src[x]*scale + shift);
600 t1 = saturate_cast<DT>(src[x+1]*scale + shift);
601 dst[x] = t0; dst[x+1] = t1;
602 t0 = saturate_cast<DT>(src[x+2]*scale + shift);
603 t1 = saturate_cast<DT>(src[x+3]*scale + shift);
604 dst[x+2] = t0; dst[x+3] = t1;
608 for( ; x < size.width; x++ )
609 dst[x] = saturate_cast<DT>(src[x]*scale + shift);
613 //vz optimized template specialization
615 cvtScale_<short, short, float>( const short* src, size_t sstep,
616 short* dst, size_t dstep, Size size,
617 float scale, float shift )
619 sstep /= sizeof(src[0]);
620 dstep /= sizeof(dst[0]);
622 for( ; size.height--; src += sstep, dst += dstep )
628 __m128 scale128 = _mm_set1_ps (scale);
629 __m128 shift128 = _mm_set1_ps (shift);
630 for(; x <= size.width - 8; x += 8 )
632 __m128i r0 = _mm_loadl_epi64((const __m128i*)(src + x));
633 __m128i r1 = _mm_loadl_epi64((const __m128i*)(src + x + 4));
634 __m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16));
635 __m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r1, r1), 16));
636 rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128);
637 rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128);
638 r0 = _mm_cvtps_epi32(rf0);
639 r1 = _mm_cvtps_epi32(rf1);
640 r0 = _mm_packs_epi32(r0, r1);
641 _mm_storeu_si128((__m128i*)(dst + x), r0);
646 for(; x < size.width; x++ )
647 dst[x] = saturate_cast<short>(src[x]*scale + shift);
652 template<typename T, typename DT> static void
653 cvt_( const T* src, size_t sstep,
654 DT* dst, size_t dstep, Size size )
656 sstep /= sizeof(src[0]);
657 dstep /= sizeof(dst[0]);
659 for( ; size.height--; src += sstep, dst += dstep )
662 #if CV_ENABLE_UNROLLED
663 for( ; x <= size.width - 4; x += 4 )
666 t0 = saturate_cast<DT>(src[x]);
667 t1 = saturate_cast<DT>(src[x+1]);
668 dst[x] = t0; dst[x+1] = t1;
669 t0 = saturate_cast<DT>(src[x+2]);
670 t1 = saturate_cast<DT>(src[x+3]);
671 dst[x+2] = t0; dst[x+3] = t1;
674 for( ; x < size.width; x++ )
675 dst[x] = saturate_cast<DT>(src[x]);
679 //vz optimized template specialization, test Core_ConvertScale/ElemWiseTest
681 cvt_<float, short>( const float* src, size_t sstep,
682 short* dst, size_t dstep, Size size )
684 sstep /= sizeof(src[0]);
685 dstep /= sizeof(dst[0]);
687 for( ; size.height--; src += sstep, dst += dstep )
692 for( ; x <= size.width - 8; x += 8 )
694 __m128 src128 = _mm_loadu_ps (src + x);
695 __m128i src_int128 = _mm_cvtps_epi32 (src128);
697 src128 = _mm_loadu_ps (src + x + 4);
698 __m128i src1_int128 = _mm_cvtps_epi32 (src128);
700 src1_int128 = _mm_packs_epi32(src_int128, src1_int128);
701 _mm_storeu_si128((__m128i*)(dst + x),src1_int128);
705 for( ; x < size.width; x++ )
706 dst[x] = saturate_cast<short>(src[x]);
712 template<typename T> static void
713 cpy_( const T* src, size_t sstep, T* dst, size_t dstep, Size size )
715 sstep /= sizeof(src[0]);
716 dstep /= sizeof(dst[0]);
718 for( ; size.height--; src += sstep, dst += dstep )
719 memcpy(dst, src, size.width*sizeof(src[0]));
722 #define DEF_CVT_SCALE_ABS_FUNC(suffix, tfunc, stype, dtype, wtype) \
723 static void cvtScaleAbs##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
724 dtype* dst, size_t dstep, Size size, double* scale) \
726 tfunc(src, sstep, dst, dstep, size, (wtype)scale[0], (wtype)scale[1]); \
729 #define DEF_CVT_SCALE_FUNC(suffix, stype, dtype, wtype) \
730 static void cvtScale##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
731 dtype* dst, size_t dstep, Size size, double* scale) \
733 cvtScale_(src, sstep, dst, dstep, size, (wtype)scale[0], (wtype)scale[1]); \
737 #define DEF_CVT_FUNC(suffix, stype, dtype) \
738 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
739 dtype* dst, size_t dstep, Size size, double*) \
741 cvt_(src, sstep, dst, dstep, size); \
744 #define DEF_CPY_FUNC(suffix, stype) \
745 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
746 stype* dst, size_t dstep, Size size, double*) \
748 cpy_(src, sstep, dst, dstep, size); \
752 DEF_CVT_SCALE_ABS_FUNC(8u, cvtScaleAbs_, uchar, uchar, float);
753 DEF_CVT_SCALE_ABS_FUNC(8s8u, cvtScaleAbs_, schar, uchar, float);
754 DEF_CVT_SCALE_ABS_FUNC(16u8u, cvtScaleAbs_, ushort, uchar, float);
755 DEF_CVT_SCALE_ABS_FUNC(16s8u, cvtScaleAbs_, short, uchar, float);
756 DEF_CVT_SCALE_ABS_FUNC(32s8u, cvtScaleAbs_, int, uchar, float);
757 DEF_CVT_SCALE_ABS_FUNC(32f8u, cvtScaleAbs_, float, uchar, float);
758 DEF_CVT_SCALE_ABS_FUNC(64f8u, cvtScaleAbs_, double, uchar, float);
760 DEF_CVT_SCALE_FUNC(8u, uchar, uchar, float);
761 DEF_CVT_SCALE_FUNC(8s8u, schar, uchar, float);
762 DEF_CVT_SCALE_FUNC(16u8u, ushort, uchar, float);
763 DEF_CVT_SCALE_FUNC(16s8u, short, uchar, float);
764 DEF_CVT_SCALE_FUNC(32s8u, int, uchar, float);
765 DEF_CVT_SCALE_FUNC(32f8u, float, uchar, float);
766 DEF_CVT_SCALE_FUNC(64f8u, double, uchar, float);
768 DEF_CVT_SCALE_FUNC(8u8s, uchar, schar, float);
769 DEF_CVT_SCALE_FUNC(8s, schar, schar, float);
770 DEF_CVT_SCALE_FUNC(16u8s, ushort, schar, float);
771 DEF_CVT_SCALE_FUNC(16s8s, short, schar, float);
772 DEF_CVT_SCALE_FUNC(32s8s, int, schar, float);
773 DEF_CVT_SCALE_FUNC(32f8s, float, schar, float);
774 DEF_CVT_SCALE_FUNC(64f8s, double, schar, float);
776 DEF_CVT_SCALE_FUNC(8u16u, uchar, ushort, float);
777 DEF_CVT_SCALE_FUNC(8s16u, schar, ushort, float);
778 DEF_CVT_SCALE_FUNC(16u, ushort, ushort, float);
779 DEF_CVT_SCALE_FUNC(16s16u, short, ushort, float);
780 DEF_CVT_SCALE_FUNC(32s16u, int, ushort, float);
781 DEF_CVT_SCALE_FUNC(32f16u, float, ushort, float);
782 DEF_CVT_SCALE_FUNC(64f16u, double, ushort, float);
784 DEF_CVT_SCALE_FUNC(8u16s, uchar, short, float);
785 DEF_CVT_SCALE_FUNC(8s16s, schar, short, float);
786 DEF_CVT_SCALE_FUNC(16u16s, ushort, short, float);
787 DEF_CVT_SCALE_FUNC(16s, short, short, float);
788 DEF_CVT_SCALE_FUNC(32s16s, int, short, float);
789 DEF_CVT_SCALE_FUNC(32f16s, float, short, float);
790 DEF_CVT_SCALE_FUNC(64f16s, double, short, float);
792 DEF_CVT_SCALE_FUNC(8u32s, uchar, int, float);
793 DEF_CVT_SCALE_FUNC(8s32s, schar, int, float);
794 DEF_CVT_SCALE_FUNC(16u32s, ushort, int, float);
795 DEF_CVT_SCALE_FUNC(16s32s, short, int, float);
796 DEF_CVT_SCALE_FUNC(32s, int, int, double);
797 DEF_CVT_SCALE_FUNC(32f32s, float, int, float);
798 DEF_CVT_SCALE_FUNC(64f32s, double, int, double);
800 DEF_CVT_SCALE_FUNC(8u32f, uchar, float, float);
801 DEF_CVT_SCALE_FUNC(8s32f, schar, float, float);
802 DEF_CVT_SCALE_FUNC(16u32f, ushort, float, float);
803 DEF_CVT_SCALE_FUNC(16s32f, short, float, float);
804 DEF_CVT_SCALE_FUNC(32s32f, int, float, double);
805 DEF_CVT_SCALE_FUNC(32f, float, float, float);
806 DEF_CVT_SCALE_FUNC(64f32f, double, float, double);
808 DEF_CVT_SCALE_FUNC(8u64f, uchar, double, double);
809 DEF_CVT_SCALE_FUNC(8s64f, schar, double, double);
810 DEF_CVT_SCALE_FUNC(16u64f, ushort, double, double);
811 DEF_CVT_SCALE_FUNC(16s64f, short, double, double);
812 DEF_CVT_SCALE_FUNC(32s64f, int, double, double);
813 DEF_CVT_SCALE_FUNC(32f64f, float, double, double);
814 DEF_CVT_SCALE_FUNC(64f, double, double, double);
816 DEF_CPY_FUNC(8u, uchar);
817 DEF_CVT_FUNC(8s8u, schar, uchar);
818 DEF_CVT_FUNC(16u8u, ushort, uchar);
819 DEF_CVT_FUNC(16s8u, short, uchar);
820 DEF_CVT_FUNC(32s8u, int, uchar);
821 DEF_CVT_FUNC(32f8u, float, uchar);
822 DEF_CVT_FUNC(64f8u, double, uchar);
824 DEF_CVT_FUNC(8u8s, uchar, schar);
825 DEF_CVT_FUNC(16u8s, ushort, schar);
826 DEF_CVT_FUNC(16s8s, short, schar);
827 DEF_CVT_FUNC(32s8s, int, schar);
828 DEF_CVT_FUNC(32f8s, float, schar);
829 DEF_CVT_FUNC(64f8s, double, schar);
831 DEF_CVT_FUNC(8u16u, uchar, ushort);
832 DEF_CVT_FUNC(8s16u, schar, ushort);
833 DEF_CPY_FUNC(16u, ushort);
834 DEF_CVT_FUNC(16s16u, short, ushort);
835 DEF_CVT_FUNC(32s16u, int, ushort);
836 DEF_CVT_FUNC(32f16u, float, ushort);
837 DEF_CVT_FUNC(64f16u, double, ushort);
839 DEF_CVT_FUNC(8u16s, uchar, short);
840 DEF_CVT_FUNC(8s16s, schar, short);
841 DEF_CVT_FUNC(16u16s, ushort, short);
842 DEF_CVT_FUNC(32s16s, int, short);
843 DEF_CVT_FUNC(32f16s, float, short);
844 DEF_CVT_FUNC(64f16s, double, short);
846 DEF_CVT_FUNC(8u32s, uchar, int);
847 DEF_CVT_FUNC(8s32s, schar, int);
848 DEF_CVT_FUNC(16u32s, ushort, int);
849 DEF_CVT_FUNC(16s32s, short, int);
850 DEF_CPY_FUNC(32s, int);
851 DEF_CVT_FUNC(32f32s, float, int);
852 DEF_CVT_FUNC(64f32s, double, int);
854 DEF_CVT_FUNC(8u32f, uchar, float);
855 DEF_CVT_FUNC(8s32f, schar, float);
856 DEF_CVT_FUNC(16u32f, ushort, float);
857 DEF_CVT_FUNC(16s32f, short, float);
858 DEF_CVT_FUNC(32s32f, int, float);
859 DEF_CVT_FUNC(64f32f, double, float);
861 DEF_CVT_FUNC(8u64f, uchar, double);
862 DEF_CVT_FUNC(8s64f, schar, double);
863 DEF_CVT_FUNC(16u64f, ushort, double);
864 DEF_CVT_FUNC(16s64f, short, double);
865 DEF_CVT_FUNC(32s64f, int, double);
866 DEF_CVT_FUNC(32f64f, float, double);
867 DEF_CPY_FUNC(64s, int64);
869 static BinaryFunc cvtScaleAbsTab[] =
871 (BinaryFunc)cvtScaleAbs8u, (BinaryFunc)cvtScaleAbs8s8u, (BinaryFunc)cvtScaleAbs16u8u,
872 (BinaryFunc)cvtScaleAbs16s8u, (BinaryFunc)cvtScaleAbs32s8u, (BinaryFunc)cvtScaleAbs32f8u,
873 (BinaryFunc)cvtScaleAbs64f8u, 0
876 static BinaryFunc cvtScaleTab[][8] =
879 (BinaryFunc)GET_OPTIMIZED(cvtScale8u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8u),
880 (BinaryFunc)GET_OPTIMIZED(cvtScale16s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8u),
881 (BinaryFunc)cvtScale64f8u, 0
884 (BinaryFunc)GET_OPTIMIZED(cvtScale8u8s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8s),
885 (BinaryFunc)GET_OPTIMIZED(cvtScale16s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8s),
886 (BinaryFunc)cvtScale64f8s, 0
889 (BinaryFunc)GET_OPTIMIZED(cvtScale8u16u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u),
890 (BinaryFunc)GET_OPTIMIZED(cvtScale16s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16u),
891 (BinaryFunc)cvtScale64f16u, 0
894 (BinaryFunc)GET_OPTIMIZED(cvtScale8u16s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u16s),
895 (BinaryFunc)GET_OPTIMIZED(cvtScale16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16s),
896 (BinaryFunc)cvtScale64f16s, 0
899 (BinaryFunc)GET_OPTIMIZED(cvtScale8u32s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32s),
900 (BinaryFunc)GET_OPTIMIZED(cvtScale16s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f32s),
901 (BinaryFunc)cvtScale64f32s, 0
904 (BinaryFunc)GET_OPTIMIZED(cvtScale8u32f), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32f),
905 (BinaryFunc)GET_OPTIMIZED(cvtScale16s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32f),
906 (BinaryFunc)cvtScale64f32f, 0
909 (BinaryFunc)cvtScale8u64f, (BinaryFunc)cvtScale8s64f, (BinaryFunc)cvtScale16u64f,
910 (BinaryFunc)cvtScale16s64f, (BinaryFunc)cvtScale32s64f, (BinaryFunc)cvtScale32f64f,
911 (BinaryFunc)cvtScale64f, 0
914 0, 0, 0, 0, 0, 0, 0, 0
918 static BinaryFunc cvtTab[][8] =
921 (BinaryFunc)(cvt8u), (BinaryFunc)GET_OPTIMIZED(cvt8s8u), (BinaryFunc)GET_OPTIMIZED(cvt16u8u),
922 (BinaryFunc)GET_OPTIMIZED(cvt16s8u), (BinaryFunc)GET_OPTIMIZED(cvt32s8u), (BinaryFunc)GET_OPTIMIZED(cvt32f8u),
923 (BinaryFunc)GET_OPTIMIZED(cvt64f8u), 0
926 (BinaryFunc)GET_OPTIMIZED(cvt8u8s), (BinaryFunc)cvt8u, (BinaryFunc)GET_OPTIMIZED(cvt16u8s),
927 (BinaryFunc)GET_OPTIMIZED(cvt16s8s), (BinaryFunc)GET_OPTIMIZED(cvt32s8s), (BinaryFunc)GET_OPTIMIZED(cvt32f8s),
928 (BinaryFunc)GET_OPTIMIZED(cvt64f8s), 0
931 (BinaryFunc)GET_OPTIMIZED(cvt8u16u), (BinaryFunc)GET_OPTIMIZED(cvt8s16u), (BinaryFunc)cvt16u,
932 (BinaryFunc)GET_OPTIMIZED(cvt16s16u), (BinaryFunc)GET_OPTIMIZED(cvt32s16u), (BinaryFunc)GET_OPTIMIZED(cvt32f16u),
933 (BinaryFunc)GET_OPTIMIZED(cvt64f16u), 0
936 (BinaryFunc)GET_OPTIMIZED(cvt8u16s), (BinaryFunc)GET_OPTIMIZED(cvt8s16s), (BinaryFunc)GET_OPTIMIZED(cvt16u16s),
937 (BinaryFunc)cvt16u, (BinaryFunc)GET_OPTIMIZED(cvt32s16s), (BinaryFunc)GET_OPTIMIZED(cvt32f16s),
938 (BinaryFunc)GET_OPTIMIZED(cvt64f16s), 0
941 (BinaryFunc)GET_OPTIMIZED(cvt8u32s), (BinaryFunc)GET_OPTIMIZED(cvt8s32s), (BinaryFunc)GET_OPTIMIZED(cvt16u32s),
942 (BinaryFunc)GET_OPTIMIZED(cvt16s32s), (BinaryFunc)cvt32s, (BinaryFunc)GET_OPTIMIZED(cvt32f32s),
943 (BinaryFunc)GET_OPTIMIZED(cvt64f32s), 0
946 (BinaryFunc)GET_OPTIMIZED(cvt8u32f), (BinaryFunc)GET_OPTIMIZED(cvt8s32f), (BinaryFunc)GET_OPTIMIZED(cvt16u32f),
947 (BinaryFunc)GET_OPTIMIZED(cvt16s32f), (BinaryFunc)GET_OPTIMIZED(cvt32s32f), (BinaryFunc)cvt32s,
948 (BinaryFunc)GET_OPTIMIZED(cvt64f32f), 0
951 (BinaryFunc)GET_OPTIMIZED(cvt8u64f), (BinaryFunc)GET_OPTIMIZED(cvt8s64f), (BinaryFunc)GET_OPTIMIZED(cvt16u64f),
952 (BinaryFunc)GET_OPTIMIZED(cvt16s64f), (BinaryFunc)GET_OPTIMIZED(cvt32s64f), (BinaryFunc)GET_OPTIMIZED(cvt32f64f),
953 (BinaryFunc)(cvt64s), 0
956 0, 0, 0, 0, 0, 0, 0, 0
960 BinaryFunc getConvertFunc(int sdepth, int ddepth)
962 return cvtTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
965 BinaryFunc getConvertScaleFunc(int sdepth, int ddepth)
967 return cvtScaleTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
972 void cv::convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
974 Mat src = _src.getMat();
975 int cn = src.channels();
976 double scale[] = {alpha, beta};
977 _dst.create( src.dims, src.size, CV_8UC(cn) );
978 Mat dst = _dst.getMat();
979 BinaryFunc func = cvtScaleAbsTab[src.depth()];
980 CV_Assert( func != 0 );
984 Size sz = getContinuousSize(src, dst, cn);
985 func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale );
989 const Mat* arrays[] = {&src, &dst, 0};
991 NAryMatIterator it(arrays, ptrs);
992 Size sz((int)it.size*cn, 1);
994 for( size_t i = 0; i < it.nplanes; i++, ++it )
995 func( ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale );
999 void cv::Mat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const
1001 bool noScale = fabs(alpha-1) < DBL_EPSILON && fabs(beta) < DBL_EPSILON;
1004 _type = _dst.fixedType() ? _dst.type() : type();
1006 _type = CV_MAKETYPE(CV_MAT_DEPTH(_type), channels());
1008 int sdepth = depth(), ddepth = CV_MAT_DEPTH(_type);
1009 if( sdepth == ddepth && noScale )
1017 BinaryFunc func = noScale ? getConvertFunc(sdepth, ddepth) : getConvertScaleFunc(sdepth, ddepth);
1018 double scale[] = {alpha, beta};
1019 int cn = channels();
1020 CV_Assert( func != 0 );
1024 _dst.create( size(), _type );
1025 Mat dst = _dst.getMat();
1026 Size sz = getContinuousSize(src, dst, cn);
1027 func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale );
1031 _dst.create( dims, size, _type );
1032 Mat dst = _dst.getMat();
1033 const Mat* arrays[] = {&src, &dst, 0};
1035 NAryMatIterator it(arrays, ptrs);
1036 Size sz((int)(it.size*cn), 1);
1038 for( size_t i = 0; i < it.nplanes; i++, ++it )
1039 func(ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale);
1043 /****************************************************************************************\
1045 \****************************************************************************************/
1050 template<typename T> static void
1051 LUT8u_( const uchar* src, const T* lut, T* dst, int len, int cn, int lutcn )
1055 for( int i = 0; i < len*cn; i++ )
1056 dst[i] = lut[src[i]];
1060 for( int i = 0; i < len*cn; i += cn )
1061 for( int k = 0; k < cn; k++ )
1062 dst[i+k] = lut[src[i+k]*cn+k];
1066 static void LUT8u_8u( const uchar* src, const uchar* lut, uchar* dst, int len, int cn, int lutcn )
1068 LUT8u_( src, lut, dst, len, cn, lutcn );
1071 static void LUT8u_8s( const uchar* src, const schar* lut, schar* dst, int len, int cn, int lutcn )
1073 LUT8u_( src, lut, dst, len, cn, lutcn );
1076 static void LUT8u_16u( const uchar* src, const ushort* lut, ushort* dst, int len, int cn, int lutcn )
1078 LUT8u_( src, lut, dst, len, cn, lutcn );
1081 static void LUT8u_16s( const uchar* src, const short* lut, short* dst, int len, int cn, int lutcn )
1083 LUT8u_( src, lut, dst, len, cn, lutcn );
1086 static void LUT8u_32s( const uchar* src, const int* lut, int* dst, int len, int cn, int lutcn )
1088 LUT8u_( src, lut, dst, len, cn, lutcn );
1091 static void LUT8u_32f( const uchar* src, const float* lut, float* dst, int len, int cn, int lutcn )
1093 LUT8u_( src, lut, dst, len, cn, lutcn );
1096 static void LUT8u_64f( const uchar* src, const double* lut, double* dst, int len, int cn, int lutcn )
1098 LUT8u_( src, lut, dst, len, cn, lutcn );
1101 typedef void (*LUTFunc)( const uchar* src, const uchar* lut, uchar* dst, int len, int cn, int lutcn );
1103 static LUTFunc lutTab[] =
1105 (LUTFunc)LUT8u_8u, (LUTFunc)LUT8u_8s, (LUTFunc)LUT8u_16u, (LUTFunc)LUT8u_16s,
1106 (LUTFunc)LUT8u_32s, (LUTFunc)LUT8u_32f, (LUTFunc)LUT8u_64f, 0
1111 void cv::LUT( InputArray _src, InputArray _lut, OutputArray _dst, int interpolation )
1113 Mat src = _src.getMat(), lut = _lut.getMat();
1114 CV_Assert( interpolation == 0 );
1115 int cn = src.channels();
1116 int lutcn = lut.channels();
1118 CV_Assert( (lutcn == cn || lutcn == 1) &&
1119 lut.total() == 256 && lut.isContinuous() &&
1120 (src.depth() == CV_8U || src.depth() == CV_8S) );
1121 _dst.create( src.dims, src.size, CV_MAKETYPE(lut.depth(), cn));
1122 Mat dst = _dst.getMat();
1124 LUTFunc func = lutTab[lut.depth()];
1125 CV_Assert( func != 0 );
1127 const Mat* arrays[] = {&src, &dst, 0};
1129 NAryMatIterator it(arrays, ptrs);
1130 int len = (int)it.size;
1132 for( size_t i = 0; i < it.nplanes; i++, ++it )
1133 func(ptrs[0], lut.data, ptrs[1], len, cn, lutcn);
1137 void cv::normalize( InputArray _src, OutputArray _dst, double a, double b,
1138 int norm_type, int rtype, InputArray _mask )
1140 Mat src = _src.getMat(), mask = _mask.getMat();
1142 double scale = 1, shift = 0;
1143 if( norm_type == CV_MINMAX )
1145 double smin = 0, smax = 0;
1146 double dmin = MIN( a, b ), dmax = MAX( a, b );
1147 minMaxLoc( _src, &smin, &smax, 0, 0, mask );
1148 scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0);
1149 shift = dmin - smin*scale;
1151 else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
1153 scale = norm( src, norm_type, mask );
1154 scale = scale > DBL_EPSILON ? a/scale : 0.;
1158 CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
1161 rtype = _dst.fixedType() ? _dst.depth() : src.depth();
1163 _dst.create(src.dims, src.size, CV_MAKETYPE(rtype, src.channels()));
1164 Mat dst = _dst.getMat();
1167 src.convertTo( dst, rtype, scale, shift );
1171 src.convertTo( temp, rtype, scale, shift );
1172 temp.copyTo( dst, mask );
1177 cvSplit( const void* srcarr, void* dstarr0, void* dstarr1, void* dstarr2, void* dstarr3 )
1179 void* dptrs[] = { dstarr0, dstarr1, dstarr2, dstarr3 };
1180 cv::Mat src = cv::cvarrToMat(srcarr);
1182 for( i = 0; i < 4; i++ )
1183 nz += dptrs[i] != 0;
1184 CV_Assert( nz > 0 );
1185 cv::vector<cv::Mat> dvec(nz);
1186 cv::vector<int> pairs(nz*2);
1188 for( i = j = 0; i < 4; i++ )
1192 dvec[j] = cv::cvarrToMat(dptrs[i]);
1193 CV_Assert( dvec[j].size() == src.size() );
1194 CV_Assert( dvec[j].depth() == src.depth() );
1195 CV_Assert( dvec[j].channels() == 1 );
1196 CV_Assert( i < src.channels() );
1202 if( nz == src.channels() )
1203 cv::split( src, dvec );
1206 cv::mixChannels( &src, 1, &dvec[0], nz, &pairs[0], nz );
1212 cvMerge( const void* srcarr0, const void* srcarr1, const void* srcarr2,
1213 const void* srcarr3, void* dstarr )
1215 const void* sptrs[] = { srcarr0, srcarr1, srcarr2, srcarr3 };
1216 cv::Mat dst = cv::cvarrToMat(dstarr);
1218 for( i = 0; i < 4; i++ )
1219 nz += sptrs[i] != 0;
1220 CV_Assert( nz > 0 );
1221 cv::vector<cv::Mat> svec(nz);
1222 cv::vector<int> pairs(nz*2);
1224 for( i = j = 0; i < 4; i++ )
1228 svec[j] = cv::cvarrToMat(sptrs[i]);
1229 CV_Assert( svec[j].size == dst.size &&
1230 svec[j].depth() == dst.depth() &&
1231 svec[j].channels() == 1 && i < dst.channels() );
1238 if( nz == dst.channels() )
1239 cv::merge( svec, dst );
1242 cv::mixChannels( &svec[0], nz, &dst, 1, &pairs[0], nz );
1248 cvMixChannels( const CvArr** src, int src_count,
1249 CvArr** dst, int dst_count,
1250 const int* from_to, int pair_count )
1252 cv::AutoBuffer<cv::Mat, 32> buf(src_count + dst_count);
1255 for( i = 0; i < src_count; i++ )
1256 buf[i] = cv::cvarrToMat(src[i]);
1257 for( i = 0; i < dst_count; i++ )
1258 buf[i+src_count] = cv::cvarrToMat(dst[i]);
1259 cv::mixChannels(&buf[0], src_count, &buf[src_count], dst_count, from_to, pair_count);
1263 cvConvertScaleAbs( const void* srcarr, void* dstarr,
1264 double scale, double shift )
1266 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1267 CV_Assert( src.size == dst.size && dst.type() == CV_8UC(src.channels()));
1268 cv::convertScaleAbs( src, dst, scale, shift );
1272 cvConvertScale( const void* srcarr, void* dstarr,
1273 double scale, double shift )
1275 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1277 CV_Assert( src.size == dst.size && src.channels() == dst.channels() );
1278 src.convertTo(dst, dst.type(), scale, shift);
1281 CV_IMPL void cvLUT( const void* srcarr, void* dstarr, const void* lutarr )
1283 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), lut = cv::cvarrToMat(lutarr);
1285 CV_Assert( dst.size() == src.size() && dst.type() == CV_MAKETYPE(lut.depth(), src.channels()) );
1286 cv::LUT( src, lut, dst );
1289 CV_IMPL void cvNormalize( const CvArr* srcarr, CvArr* dstarr,
1290 double a, double b, int norm_type, const CvArr* maskarr )
1292 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
1294 mask = cv::cvarrToMat(maskarr);
1295 CV_Assert( dst.size() == src.size() && src.channels() == dst.channels() );
1296 cv::normalize( src, dst, a, b, norm_type, dst.type(), mask );