1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
44 #include "opencl_kernels_core.hpp"
54 /****************************************************************************************\
56 \****************************************************************************************/
59 template<typename T> struct VSplit2;
60 template<typename T> struct VSplit3;
61 template<typename T> struct VSplit4;
63 #define SPLIT2_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \
65 struct name<data_type>{ \
66 void operator()(const data_type* src, data_type* dst0, data_type* dst1){ \
67 reg_type r = load_func(src); \
68 store_func(dst0, r.val[0]); \
69 store_func(dst1, r.val[1]); \
73 #define SPLIT3_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \
75 struct name<data_type>{ \
76 void operator()(const data_type* src, data_type* dst0, data_type* dst1, \
78 reg_type r = load_func(src); \
79 store_func(dst0, r.val[0]); \
80 store_func(dst1, r.val[1]); \
81 store_func(dst2, r.val[2]); \
85 #define SPLIT4_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \
87 struct name<data_type>{ \
88 void operator()(const data_type* src, data_type* dst0, data_type* dst1, \
89 data_type* dst2, data_type* dst3){ \
90 reg_type r = load_func(src); \
91 store_func(dst0, r.val[0]); \
92 store_func(dst1, r.val[1]); \
93 store_func(dst2, r.val[2]); \
94 store_func(dst3, r.val[3]); \
98 SPLIT2_KERNEL_TEMPLATE(VSplit2, uchar , uint8x16x2_t, vld2q_u8 , vst1q_u8 );
99 SPLIT2_KERNEL_TEMPLATE(VSplit2, schar , int8x16x2_t, vld2q_s8 , vst1q_s8 );
100 SPLIT2_KERNEL_TEMPLATE(VSplit2, ushort, uint16x8x2_t, vld2q_u16, vst1q_u16);
101 SPLIT2_KERNEL_TEMPLATE(VSplit2, short , int16x8x2_t, vld2q_s16, vst1q_s16);
102 SPLIT2_KERNEL_TEMPLATE(VSplit2, int , int32x4x2_t, vld2q_s32, vst1q_s32);
103 SPLIT2_KERNEL_TEMPLATE(VSplit2, float , float32x4x2_t, vld2q_f32, vst1q_f32);
104 SPLIT2_KERNEL_TEMPLATE(VSplit2, int64 , int64x1x2_t, vld2_s64 , vst1_s64 );
106 SPLIT3_KERNEL_TEMPLATE(VSplit3, uchar , uint8x16x3_t, vld3q_u8 , vst1q_u8 );
107 SPLIT3_KERNEL_TEMPLATE(VSplit3, schar , int8x16x3_t, vld3q_s8 , vst1q_s8 );
108 SPLIT3_KERNEL_TEMPLATE(VSplit3, ushort, uint16x8x3_t, vld3q_u16, vst1q_u16);
109 SPLIT3_KERNEL_TEMPLATE(VSplit3, short , int16x8x3_t, vld3q_s16, vst1q_s16);
110 SPLIT3_KERNEL_TEMPLATE(VSplit3, int , int32x4x3_t, vld3q_s32, vst1q_s32);
111 SPLIT3_KERNEL_TEMPLATE(VSplit3, float , float32x4x3_t, vld3q_f32, vst1q_f32);
112 SPLIT3_KERNEL_TEMPLATE(VSplit3, int64 , int64x1x3_t, vld3_s64 , vst1_s64 );
114 SPLIT4_KERNEL_TEMPLATE(VSplit4, uchar , uint8x16x4_t, vld4q_u8 , vst1q_u8 );
115 SPLIT4_KERNEL_TEMPLATE(VSplit4, schar , int8x16x4_t, vld4q_s8 , vst1q_s8 );
116 SPLIT4_KERNEL_TEMPLATE(VSplit4, ushort, uint16x8x4_t, vld4q_u16, vst1q_u16);
117 SPLIT4_KERNEL_TEMPLATE(VSplit4, short , int16x8x4_t, vld4q_s16, vst1q_s16);
118 SPLIT4_KERNEL_TEMPLATE(VSplit4, int , int32x4x4_t, vld4q_s32, vst1q_s32);
119 SPLIT4_KERNEL_TEMPLATE(VSplit4, float , float32x4x4_t, vld4q_f32, vst1q_f32);
120 SPLIT4_KERNEL_TEMPLATE(VSplit4, int64 , int64x1x4_t, vld4_s64 , vst1_s64 );
123 template<typename T> static void
124 split_( const T* src, T** dst, int len, int cn )
126 int k = cn % 4 ? cn % 4 : 4;
134 memcpy(dst0, src, len * sizeof(T));
138 for( i = 0, j = 0 ; i < len; i++, j += cn )
144 T *dst0 = dst[0], *dst1 = dst[1];
150 int inc_i = (sizeof(T) == 8)? 1: 16/sizeof(T);
151 int inc_j = 2 * inc_i;
154 for( ; i < len - inc_i; i += inc_i, j += inc_j)
155 vsplit(src + j, dst0 + i, dst1 + i);
158 for( ; i < len; i++, j += cn )
166 T *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2];
172 int inc_i = (sizeof(T) == 8)? 1: 16/sizeof(T);
173 int inc_j = 3 * inc_i;
176 for( ; i <= len - inc_i; i += inc_i, j += inc_j)
177 vsplit(src + j, dst0 + i, dst1 + i, dst2 + i);
180 for( ; i < len; i++, j += cn )
189 T *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2], *dst3 = dst[3];
195 int inc_i = (sizeof(T) == 8)? 1: 16/sizeof(T);
196 int inc_j = 4 * inc_i;
199 for( ; i <= len - inc_i; i += inc_i, j += inc_j)
200 vsplit(src + j, dst0 + i, dst1 + i, dst2 + i, dst3 + i);
203 for( ; i < len; i++, j += cn )
205 dst0[i] = src[j]; dst1[i] = src[j+1];
206 dst2[i] = src[j+2]; dst3[i] = src[j+3];
210 for( ; k < cn; k += 4 )
212 T *dst0 = dst[k], *dst1 = dst[k+1], *dst2 = dst[k+2], *dst3 = dst[k+3];
213 for( i = 0, j = k; i < len; i++, j += cn )
215 dst0[i] = src[j]; dst1[i] = src[j+1];
216 dst2[i] = src[j+2]; dst3[i] = src[j+3];
223 template<typename T> struct VMerge2;
224 template<typename T> struct VMerge3;
225 template<typename T> struct VMerge4;
227 #define MERGE2_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \
229 struct name<data_type>{ \
230 void operator()(const data_type* src0, const data_type* src1, \
233 r.val[0] = load_func(src0); \
234 r.val[1] = load_func(src1); \
235 store_func(dst, r); \
239 #define MERGE3_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \
241 struct name<data_type>{ \
242 void operator()(const data_type* src0, const data_type* src1, \
243 const data_type* src2, data_type* dst){ \
245 r.val[0] = load_func(src0); \
246 r.val[1] = load_func(src1); \
247 r.val[2] = load_func(src2); \
248 store_func(dst, r); \
252 #define MERGE4_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \
254 struct name<data_type>{ \
255 void operator()(const data_type* src0, const data_type* src1, \
256 const data_type* src2, const data_type* src3, \
259 r.val[0] = load_func(src0); \
260 r.val[1] = load_func(src1); \
261 r.val[2] = load_func(src2); \
262 r.val[3] = load_func(src3); \
263 store_func(dst, r); \
267 MERGE2_KERNEL_TEMPLATE(VMerge2, uchar , uint8x16x2_t, vld1q_u8 , vst2q_u8 );
268 MERGE2_KERNEL_TEMPLATE(VMerge2, schar , int8x16x2_t, vld1q_s8 , vst2q_s8 );
269 MERGE2_KERNEL_TEMPLATE(VMerge2, ushort, uint16x8x2_t, vld1q_u16, vst2q_u16);
270 MERGE2_KERNEL_TEMPLATE(VMerge2, short , int16x8x2_t, vld1q_s16, vst2q_s16);
271 MERGE2_KERNEL_TEMPLATE(VMerge2, int , int32x4x2_t, vld1q_s32, vst2q_s32);
272 MERGE2_KERNEL_TEMPLATE(VMerge2, float , float32x4x2_t, vld1q_f32, vst2q_f32);
273 MERGE2_KERNEL_TEMPLATE(VMerge2, int64 , int64x1x2_t, vld1_s64 , vst2_s64 );
275 MERGE3_KERNEL_TEMPLATE(VMerge3, uchar , uint8x16x3_t, vld1q_u8 , vst3q_u8 );
276 MERGE3_KERNEL_TEMPLATE(VMerge3, schar , int8x16x3_t, vld1q_s8 , vst3q_s8 );
277 MERGE3_KERNEL_TEMPLATE(VMerge3, ushort, uint16x8x3_t, vld1q_u16, vst3q_u16);
278 MERGE3_KERNEL_TEMPLATE(VMerge3, short , int16x8x3_t, vld1q_s16, vst3q_s16);
279 MERGE3_KERNEL_TEMPLATE(VMerge3, int , int32x4x3_t, vld1q_s32, vst3q_s32);
280 MERGE3_KERNEL_TEMPLATE(VMerge3, float , float32x4x3_t, vld1q_f32, vst3q_f32);
281 MERGE3_KERNEL_TEMPLATE(VMerge3, int64 , int64x1x3_t, vld1_s64 , vst3_s64 );
283 MERGE4_KERNEL_TEMPLATE(VMerge4, uchar , uint8x16x4_t, vld1q_u8 , vst4q_u8 );
284 MERGE4_KERNEL_TEMPLATE(VMerge4, schar , int8x16x4_t, vld1q_s8 , vst4q_s8 );
285 MERGE4_KERNEL_TEMPLATE(VMerge4, ushort, uint16x8x4_t, vld1q_u16, vst4q_u16);
286 MERGE4_KERNEL_TEMPLATE(VMerge4, short , int16x8x4_t, vld1q_s16, vst4q_s16);
287 MERGE4_KERNEL_TEMPLATE(VMerge4, int , int32x4x4_t, vld1q_s32, vst4q_s32);
288 MERGE4_KERNEL_TEMPLATE(VMerge4, float , float32x4x4_t, vld1q_f32, vst4q_f32);
289 MERGE4_KERNEL_TEMPLATE(VMerge4, int64 , int64x1x4_t, vld1_s64 , vst4_s64 );
292 template<typename T> static void
293 merge_( const T** src, T* dst, int len, int cn )
295 int k = cn % 4 ? cn % 4 : 4;
299 const T* src0 = src[0];
300 for( i = j = 0; i < len; i++, j += cn )
305 const T *src0 = src[0], *src1 = src[1];
310 int inc_i = (sizeof(T) == 8)? 1: 16/sizeof(T);
311 int inc_j = 2 * inc_i;
314 for( ; i < len - inc_i; i += inc_i, j += inc_j)
315 vmerge(src0 + i, src1 + i, dst + j);
318 for( ; i < len; i++, j += cn )
326 const T *src0 = src[0], *src1 = src[1], *src2 = src[2];
331 int inc_i = (sizeof(T) == 8)? 1: 16/sizeof(T);
332 int inc_j = 3 * inc_i;
335 for( ; i < len - inc_i; i += inc_i, j += inc_j)
336 vmerge(src0 + i, src1 + i, src2 + i, dst + j);
339 for( ; i < len; i++, j += cn )
348 const T *src0 = src[0], *src1 = src[1], *src2 = src[2], *src3 = src[3];
353 int inc_i = (sizeof(T) == 8)? 1: 16/sizeof(T);
354 int inc_j = 4 * inc_i;
357 for( ; i < len - inc_i; i += inc_i, j += inc_j)
358 vmerge(src0 + i, src1 + i, src2 + i, src3 + i, dst + j);
361 for( ; i < len; i++, j += cn )
363 dst[j] = src0[i]; dst[j+1] = src1[i];
364 dst[j+2] = src2[i]; dst[j+3] = src3[i];
368 for( ; k < cn; k += 4 )
370 const T *src0 = src[k], *src1 = src[k+1], *src2 = src[k+2], *src3 = src[k+3];
371 for( i = 0, j = k; i < len; i++, j += cn )
373 dst[j] = src0[i]; dst[j+1] = src1[i];
374 dst[j+2] = src2[i]; dst[j+3] = src3[i];
379 static void split8u(const uchar* src, uchar** dst, int len, int cn )
381 split_(src, dst, len, cn);
384 static void split16u(const ushort* src, ushort** dst, int len, int cn )
386 split_(src, dst, len, cn);
389 static void split32s(const int* src, int** dst, int len, int cn )
391 split_(src, dst, len, cn);
394 static void split64s(const int64* src, int64** dst, int len, int cn )
396 split_(src, dst, len, cn);
399 static void merge8u(const uchar** src, uchar* dst, int len, int cn )
401 merge_(src, dst, len, cn);
404 static void merge16u(const ushort** src, ushort* dst, int len, int cn )
406 merge_(src, dst, len, cn);
409 static void merge32s(const int** src, int* dst, int len, int cn )
411 merge_(src, dst, len, cn);
414 static void merge64s(const int64** src, int64* dst, int len, int cn )
416 merge_(src, dst, len, cn);
419 typedef void (*SplitFunc)(const uchar* src, uchar** dst, int len, int cn);
420 typedef void (*MergeFunc)(const uchar** src, uchar* dst, int len, int cn);
422 static SplitFunc getSplitFunc(int depth)
424 static SplitFunc splitTab[] =
426 (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split16u), (SplitFunc)GET_OPTIMIZED(split16u),
427 (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split64s), 0
430 return splitTab[depth];
433 static MergeFunc getMergeFunc(int depth)
435 static MergeFunc mergeTab[] =
437 (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge16u), (MergeFunc)GET_OPTIMIZED(merge16u),
438 (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge64s), 0
441 return mergeTab[depth];
446 void cv::split(const Mat& src, Mat* mv)
448 int k, depth = src.depth(), cn = src.channels();
455 SplitFunc func = getSplitFunc(depth);
456 CV_Assert( func != 0 );
458 int esz = (int)src.elemSize(), esz1 = (int)src.elemSize1();
459 int blocksize0 = (BLOCK_SIZE + esz-1)/esz;
460 AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16);
461 const Mat** arrays = (const Mat**)(uchar*)_buf;
462 uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16);
465 for( k = 0; k < cn; k++ )
467 mv[k].create(src.dims, src.size, depth);
468 arrays[k+1] = &mv[k];
471 NAryMatIterator it(arrays, ptrs, cn+1);
472 int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0);
474 for( size_t i = 0; i < it.nplanes; i++, ++it )
476 for( int j = 0; j < total; j += blocksize )
478 int bsz = std::min(total - j, blocksize);
479 func( ptrs[0], &ptrs[1], bsz, cn );
481 if( j + blocksize < total )
484 for( k = 0; k < cn; k++ )
485 ptrs[k+1] += bsz*esz1;
495 static bool ocl_split( InputArray _m, OutputArrayOfArrays _mv )
497 int type = _m.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
498 rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
500 String dstargs, processelem, indexdecl;
501 for (int i = 0; i < cn; ++i)
503 dstargs += format("DECLARE_DST_PARAM(%d)", i);
504 indexdecl += format("DECLARE_INDEX(%d)", i);
505 processelem += format("PROCESS_ELEM(%d)", i);
508 ocl::Kernel k("split", ocl::core::split_merge_oclsrc,
509 format("-D T=%s -D OP_SPLIT -D cn=%d -D DECLARE_DST_PARAMS=%s"
510 " -D PROCESS_ELEMS_N=%s -D DECLARE_INDEX_N=%s",
511 ocl::memopTypeToStr(depth), cn, dstargs.c_str(),
512 processelem.c_str(), indexdecl.c_str()));
516 Size size = _m.size();
517 _mv.create(cn, 1, depth);
518 for (int i = 0; i < cn; ++i)
519 _mv.create(size, depth, i);
521 std::vector<UMat> dst;
522 _mv.getUMatVector(dst);
524 int argidx = k.set(0, ocl::KernelArg::ReadOnly(_m.getUMat()));
525 for (int i = 0; i < cn; ++i)
526 argidx = k.set(argidx, ocl::KernelArg::WriteOnlyNoSize(dst[i]));
527 k.set(argidx, rowsPerWI);
529 size_t globalsize[2] = { size.width, (size.height + rowsPerWI - 1) / rowsPerWI };
530 return k.run(2, globalsize, NULL, false);
537 void cv::split(InputArray _m, OutputArrayOfArrays _mv)
539 CV_OCL_RUN(_m.dims() <= 2 && _mv.isUMatVector(),
549 CV_Assert( !_mv.fixedType() || _mv.empty() || _mv.type() == m.depth() );
551 Size size = m.size();
552 int depth = m.depth(), cn = m.channels();
553 _mv.create(cn, 1, depth);
554 for (int i = 0; i < cn; ++i)
555 _mv.create(size, depth, i);
557 std::vector<Mat> dst;
558 _mv.getMatVector(dst);
563 void cv::merge(const Mat* mv, size_t n, OutputArray _dst)
565 CV_Assert( mv && n > 0 );
567 int depth = mv[0].depth();
572 for( i = 0; i < n; i++ )
574 CV_Assert(mv[i].size == mv[0].size && mv[i].depth() == depth);
575 allch1 = allch1 && mv[i].channels() == 1;
576 cn += mv[i].channels();
579 CV_Assert( 0 < cn && cn <= CV_CN_MAX );
580 _dst.create(mv[0].dims, mv[0].size, CV_MAKETYPE(depth, cn));
581 Mat dst = _dst.getMat();
591 AutoBuffer<int> pairs(cn*2);
594 for( i = 0, j = 0; i < n; i++, j += ni )
596 ni = mv[i].channels();
597 for( k = 0; k < ni; k++ )
599 pairs[(j+k)*2] = j + k;
600 pairs[(j+k)*2+1] = j + k;
603 mixChannels( mv, n, &dst, 1, &pairs[0], cn );
607 size_t esz = dst.elemSize(), esz1 = dst.elemSize1();
608 int blocksize0 = (int)((BLOCK_SIZE + esz-1)/esz);
609 AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16);
610 const Mat** arrays = (const Mat**)(uchar*)_buf;
611 uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16);
614 for( k = 0; k < cn; k++ )
615 arrays[k+1] = &mv[k];
617 NAryMatIterator it(arrays, ptrs, cn+1);
618 int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0);
619 MergeFunc func = getMergeFunc(depth);
621 for( i = 0; i < it.nplanes; i++, ++it )
623 for( int j = 0; j < total; j += blocksize )
625 int bsz = std::min(total - j, blocksize);
626 func( (const uchar**)&ptrs[1], ptrs[0], bsz, cn );
628 if( j + blocksize < total )
631 for( int t = 0; t < cn; t++ )
632 ptrs[t+1] += bsz*esz1;
642 static bool ocl_merge( InputArrayOfArrays _mv, OutputArray _dst )
644 std::vector<UMat> src, ksrc;
645 _mv.getUMatVector(src);
646 CV_Assert(!src.empty());
648 int type = src[0].type(), depth = CV_MAT_DEPTH(type),
649 rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
650 Size size = src[0].size();
652 for (size_t i = 0, srcsize = src.size(); i < srcsize; ++i)
654 int itype = src[i].type(), icn = CV_MAT_CN(itype), idepth = CV_MAT_DEPTH(itype),
655 esz1 = CV_ELEM_SIZE1(idepth);
659 CV_Assert(size == src[i].size() && depth == idepth);
661 for (int cn = 0; cn < icn; ++cn)
664 tsrc.offset += cn * esz1;
665 ksrc.push_back(tsrc);
668 int dcn = (int)ksrc.size();
670 String srcargs, processelem, cndecl, indexdecl;
671 for (int i = 0; i < dcn; ++i)
673 srcargs += format("DECLARE_SRC_PARAM(%d)", i);
674 processelem += format("PROCESS_ELEM(%d)", i);
675 indexdecl += format("DECLARE_INDEX(%d)", i);
676 cndecl += format(" -D scn%d=%d", i, ksrc[i].channels());
679 ocl::Kernel k("merge", ocl::core::split_merge_oclsrc,
680 format("-D OP_MERGE -D cn=%d -D T=%s -D DECLARE_SRC_PARAMS_N=%s"
681 " -D DECLARE_INDEX_N=%s -D PROCESS_ELEMS_N=%s%s",
682 dcn, ocl::memopTypeToStr(depth), srcargs.c_str(),
683 indexdecl.c_str(), processelem.c_str(), cndecl.c_str()));
687 _dst.create(size, CV_MAKE_TYPE(depth, dcn));
688 UMat dst = _dst.getUMat();
691 for (int i = 0; i < dcn; ++i)
692 argidx = k.set(argidx, ocl::KernelArg::ReadOnlyNoSize(ksrc[i]));
693 argidx = k.set(argidx, ocl::KernelArg::WriteOnly(dst));
694 k.set(argidx, rowsPerWI);
696 size_t globalsize[2] = { dst.cols, (dst.rows + rowsPerWI - 1) / rowsPerWI };
697 return k.run(2, globalsize, NULL, false);
704 void cv::merge(InputArrayOfArrays _mv, OutputArray _dst)
706 CV_OCL_RUN(_mv.isUMatVector() && _dst.isUMat(),
707 ocl_merge(_mv, _dst))
710 _mv.getMatVector(mv);
711 merge(!mv.empty() ? &mv[0] : 0, mv.size(), _dst);
714 /****************************************************************************************\
715 * Generalized split/merge: mixing channels *
716 \****************************************************************************************/
721 template<typename T> static void
722 mixChannels_( const T** src, const int* sdelta,
723 T** dst, const int* ddelta,
724 int len, int npairs )
727 for( k = 0; k < npairs; k++ )
731 int ds = sdelta[k], dd = ddelta[k];
734 for( i = 0; i <= len - 2; i += 2, s += ds*2, d += dd*2 )
736 T t0 = s[0], t1 = s[ds];
737 d[0] = t0; d[dd] = t1;
744 for( i = 0; i <= len - 2; i += 2, d += dd*2 )
753 static void mixChannels8u( const uchar** src, const int* sdelta,
754 uchar** dst, const int* ddelta,
755 int len, int npairs )
757 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
760 static void mixChannels16u( const ushort** src, const int* sdelta,
761 ushort** dst, const int* ddelta,
762 int len, int npairs )
764 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
767 static void mixChannels32s( const int** src, const int* sdelta,
768 int** dst, const int* ddelta,
769 int len, int npairs )
771 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
774 static void mixChannels64s( const int64** src, const int* sdelta,
775 int64** dst, const int* ddelta,
776 int len, int npairs )
778 mixChannels_(src, sdelta, dst, ddelta, len, npairs);
781 typedef void (*MixChannelsFunc)( const uchar** src, const int* sdelta,
782 uchar** dst, const int* ddelta, int len, int npairs );
784 static MixChannelsFunc getMixchFunc(int depth)
786 static MixChannelsFunc mixchTab[] =
788 (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels16u,
789 (MixChannelsFunc)mixChannels16u, (MixChannelsFunc)mixChannels32s, (MixChannelsFunc)mixChannels32s,
790 (MixChannelsFunc)mixChannels64s, 0
793 return mixchTab[depth];
798 void cv::mixChannels( const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, const int* fromTo, size_t npairs )
802 CV_Assert( src && nsrcs > 0 && dst && ndsts > 0 && fromTo && npairs > 0 );
804 size_t i, j, k, esz1 = dst[0].elemSize1();
805 int depth = dst[0].depth();
807 AutoBuffer<uchar> buf((nsrcs + ndsts + 1)*(sizeof(Mat*) + sizeof(uchar*)) + npairs*(sizeof(uchar*)*2 + sizeof(int)*6));
808 const Mat** arrays = (const Mat**)(uchar*)buf;
809 uchar** ptrs = (uchar**)(arrays + nsrcs + ndsts);
810 const uchar** srcs = (const uchar**)(ptrs + nsrcs + ndsts + 1);
811 uchar** dsts = (uchar**)(srcs + npairs);
812 int* tab = (int*)(dsts + npairs);
813 int *sdelta = (int*)(tab + npairs*4), *ddelta = sdelta + npairs;
815 for( i = 0; i < nsrcs; i++ )
817 for( i = 0; i < ndsts; i++ )
818 arrays[i + nsrcs] = &dst[i];
819 ptrs[nsrcs + ndsts] = 0;
821 for( i = 0; i < npairs; i++ )
823 int i0 = fromTo[i*2], i1 = fromTo[i*2+1];
826 for( j = 0; j < nsrcs; i0 -= src[j].channels(), j++ )
827 if( i0 < src[j].channels() )
829 CV_Assert(j < nsrcs && src[j].depth() == depth);
830 tab[i*4] = (int)j; tab[i*4+1] = (int)(i0*esz1);
831 sdelta[i] = src[j].channels();
835 tab[i*4] = (int)(nsrcs + ndsts); tab[i*4+1] = 0;
839 for( j = 0; j < ndsts; i1 -= dst[j].channels(), j++ )
840 if( i1 < dst[j].channels() )
842 CV_Assert(i1 >= 0 && j < ndsts && dst[j].depth() == depth);
843 tab[i*4+2] = (int)(j + nsrcs); tab[i*4+3] = (int)(i1*esz1);
844 ddelta[i] = dst[j].channels();
847 NAryMatIterator it(arrays, ptrs, (int)(nsrcs + ndsts));
848 int total = (int)it.size, blocksize = std::min(total, (int)((BLOCK_SIZE + esz1-1)/esz1));
849 MixChannelsFunc func = getMixchFunc(depth);
851 for( i = 0; i < it.nplanes; i++, ++it )
853 for( k = 0; k < npairs; k++ )
855 srcs[k] = ptrs[tab[k*4]] + tab[k*4+1];
856 dsts[k] = ptrs[tab[k*4+2]] + tab[k*4+3];
859 for( int t = 0; t < total; t += blocksize )
861 int bsz = std::min(total - t, blocksize);
862 func( srcs, sdelta, dsts, ddelta, bsz, (int)npairs );
864 if( t + blocksize < total )
865 for( k = 0; k < npairs; k++ )
867 srcs[k] += blocksize*sdelta[k]*esz1;
868 dsts[k] += blocksize*ddelta[k]*esz1;
878 static void getUMatIndex(const std::vector<UMat> & um, int cn, int & idx, int & cnidx)
880 int totalChannels = 0;
881 for (size_t i = 0, size = um.size(); i < size; ++i)
883 int ccn = um[i].channels();
884 totalChannels += ccn;
886 if (totalChannels == cn)
892 else if (totalChannels > cn)
895 cnidx = i == 0 ? cn : (cn - totalChannels + ccn);
903 static bool ocl_mixChannels(InputArrayOfArrays _src, InputOutputArrayOfArrays _dst,
904 const int* fromTo, size_t npairs)
906 std::vector<UMat> src, dst;
907 _src.getUMatVector(src);
908 _dst.getUMatVector(dst);
910 size_t nsrc = src.size(), ndst = dst.size();
911 CV_Assert(nsrc > 0 && ndst > 0);
913 Size size = src[0].size();
914 int depth = src[0].depth(), esz = CV_ELEM_SIZE(depth),
915 rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
917 for (size_t i = 1, ssize = src.size(); i < ssize; ++i)
918 CV_Assert(src[i].size() == size && src[i].depth() == depth);
919 for (size_t i = 0, dsize = dst.size(); i < dsize; ++i)
920 CV_Assert(dst[i].size() == size && dst[i].depth() == depth);
922 String declsrc, decldst, declproc, declcn, indexdecl;
923 std::vector<UMat> srcargs(npairs), dstargs(npairs);
925 for (size_t i = 0; i < npairs; ++i)
927 int scn = fromTo[i<<1], dcn = fromTo[(i<<1) + 1];
928 int src_idx, src_cnidx, dst_idx, dst_cnidx;
930 getUMatIndex(src, scn, src_idx, src_cnidx);
931 getUMatIndex(dst, dcn, dst_idx, dst_cnidx);
933 CV_Assert(dst_idx >= 0 && src_idx >= 0);
935 srcargs[i] = src[src_idx];
936 srcargs[i].offset += src_cnidx * esz;
938 dstargs[i] = dst[dst_idx];
939 dstargs[i].offset += dst_cnidx * esz;
941 declsrc += format("DECLARE_INPUT_MAT(%d)", i);
942 decldst += format("DECLARE_OUTPUT_MAT(%d)", i);
943 indexdecl += format("DECLARE_INDEX(%d)", i);
944 declproc += format("PROCESS_ELEM(%d)", i);
945 declcn += format(" -D scn%d=%d -D dcn%d=%d", i, src[src_idx].channels(), i, dst[dst_idx].channels());
948 ocl::Kernel k("mixChannels", ocl::core::mixchannels_oclsrc,
949 format("-D T=%s -D DECLARE_INPUT_MAT_N=%s -D DECLARE_OUTPUT_MAT_N=%s"
950 " -D PROCESS_ELEM_N=%s -D DECLARE_INDEX_N=%s%s",
951 ocl::memopTypeToStr(depth), declsrc.c_str(), decldst.c_str(),
952 declproc.c_str(), indexdecl.c_str(), declcn.c_str()));
957 for (size_t i = 0; i < npairs; ++i)
958 argindex = k.set(argindex, ocl::KernelArg::ReadOnlyNoSize(srcargs[i]));
959 for (size_t i = 0; i < npairs; ++i)
960 argindex = k.set(argindex, ocl::KernelArg::WriteOnlyNoSize(dstargs[i]));
961 argindex = k.set(argindex, size.height);
962 argindex = k.set(argindex, size.width);
963 k.set(argindex, rowsPerWI);
965 size_t globalsize[2] = { size.width, (size.height + rowsPerWI - 1) / rowsPerWI };
966 return k.run(2, globalsize, NULL, false);
973 void cv::mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,
974 const int* fromTo, size_t npairs)
976 if (npairs == 0 || fromTo == NULL)
979 CV_OCL_RUN(dst.isUMatVector(),
980 ocl_mixChannels(src, dst, fromTo, npairs))
982 bool src_is_mat = src.kind() != _InputArray::STD_VECTOR_MAT &&
983 src.kind() != _InputArray::STD_VECTOR_VECTOR &&
984 src.kind() != _InputArray::STD_VECTOR_UMAT;
985 bool dst_is_mat = dst.kind() != _InputArray::STD_VECTOR_MAT &&
986 dst.kind() != _InputArray::STD_VECTOR_VECTOR &&
987 dst.kind() != _InputArray::STD_VECTOR_UMAT;
989 int nsrc = src_is_mat ? 1 : (int)src.total();
990 int ndst = dst_is_mat ? 1 : (int)dst.total();
992 CV_Assert(nsrc > 0 && ndst > 0);
993 cv::AutoBuffer<Mat> _buf(nsrc + ndst);
995 for( i = 0; i < nsrc; i++ )
996 buf[i] = src.getMat(src_is_mat ? -1 : i);
997 for( i = 0; i < ndst; i++ )
998 buf[nsrc + i] = dst.getMat(dst_is_mat ? -1 : i);
999 mixChannels(&buf[0], nsrc, &buf[nsrc], ndst, fromTo, npairs);
1002 void cv::mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,
1003 const std::vector<int>& fromTo)
1008 CV_OCL_RUN(dst.isUMatVector(),
1009 ocl_mixChannels(src, dst, &fromTo[0], fromTo.size()>>1))
1011 bool src_is_mat = src.kind() != _InputArray::STD_VECTOR_MAT &&
1012 src.kind() != _InputArray::STD_VECTOR_VECTOR &&
1013 src.kind() != _InputArray::STD_VECTOR_UMAT;
1014 bool dst_is_mat = dst.kind() != _InputArray::STD_VECTOR_MAT &&
1015 dst.kind() != _InputArray::STD_VECTOR_VECTOR &&
1016 dst.kind() != _InputArray::STD_VECTOR_UMAT;
1018 int nsrc = src_is_mat ? 1 : (int)src.total();
1019 int ndst = dst_is_mat ? 1 : (int)dst.total();
1021 CV_Assert(fromTo.size()%2 == 0 && nsrc > 0 && ndst > 0);
1022 cv::AutoBuffer<Mat> _buf(nsrc + ndst);
1024 for( i = 0; i < nsrc; i++ )
1025 buf[i] = src.getMat(src_is_mat ? -1 : i);
1026 for( i = 0; i < ndst; i++ )
1027 buf[nsrc + i] = dst.getMat(dst_is_mat ? -1 : i);
1028 mixChannels(&buf[0], nsrc, &buf[nsrc], ndst, &fromTo[0], fromTo.size()/2);
1031 void cv::extractChannel(InputArray _src, OutputArray _dst, int coi)
1033 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
1034 CV_Assert( 0 <= coi && coi < cn );
1035 int ch[] = { coi, 0 };
1037 if (ocl::useOpenCL() && _src.dims() <= 2 && _dst.isUMat())
1039 UMat src = _src.getUMat();
1040 _dst.create(src.dims, &src.size[0], depth);
1041 UMat dst = _dst.getUMat();
1042 mixChannels(std::vector<UMat>(1, src), std::vector<UMat>(1, dst), ch, 1);
1046 Mat src = _src.getMat();
1047 _dst.create(src.dims, &src.size[0], depth);
1048 Mat dst = _dst.getMat();
1049 mixChannels(&src, 1, &dst, 1, ch, 1);
1052 void cv::insertChannel(InputArray _src, InputOutputArray _dst, int coi)
1054 int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), scn = CV_MAT_CN(stype);
1055 int dtype = _dst.type(), ddepth = CV_MAT_DEPTH(dtype), dcn = CV_MAT_CN(dtype);
1056 CV_Assert( _src.sameSize(_dst) && sdepth == ddepth );
1057 CV_Assert( 0 <= coi && coi < dcn && scn == 1 );
1059 int ch[] = { 0, coi };
1060 if (ocl::useOpenCL() && _src.dims() <= 2 && _dst.isUMat())
1062 UMat src = _src.getUMat(), dst = _dst.getUMat();
1063 mixChannels(std::vector<UMat>(1, src), std::vector<UMat>(1, dst), ch, 1);
1067 Mat src = _src.getMat(), dst = _dst.getMat();
1068 mixChannels(&src, 1, &dst, 1, ch, 1);
1071 /****************************************************************************************\
1072 * convertScale[Abs] *
1073 \****************************************************************************************/
1078 template<typename T, typename DT, typename WT>
1079 struct cvtScaleAbs_SIMD
1081 int operator () (const T *, DT *, int, WT, WT) const
1090 struct cvtScaleAbs_SIMD<uchar, uchar, float>
1092 int operator () (const uchar * src, uchar * dst, int width,
1093 float scale, float shift) const
1099 __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
1100 v_zero_f = _mm_setzero_ps();
1101 __m128i v_zero_i = _mm_setzero_si128();
1103 for ( ; x <= width - 16; x += 16)
1105 __m128i v_src = _mm_loadu_si128((const __m128i *)(src + x));
1106 __m128i v_src12 = _mm_unpacklo_epi8(v_src, v_zero_i), v_src_34 = _mm_unpackhi_epi8(v_src, v_zero_i);
1107 __m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src12, v_zero_i)), v_scale), v_shift);
1108 v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1);
1109 __m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src12, v_zero_i)), v_scale), v_shift);
1110 v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2);
1111 __m128 v_dst3 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src_34, v_zero_i)), v_scale), v_shift);
1112 v_dst3 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst3), v_dst3);
1113 __m128 v_dst4 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src_34, v_zero_i)), v_scale), v_shift);
1114 v_dst4 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst4), v_dst4);
1116 __m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)),
1117 _mm_packs_epi32(_mm_cvtps_epi32(v_dst3), _mm_cvtps_epi32(v_dst4)));
1118 _mm_storeu_si128((__m128i *)(dst + x), v_dst_i);
1127 struct cvtScaleAbs_SIMD<ushort, uchar, float>
1129 int operator () (const ushort * src, uchar * dst, int width,
1130 float scale, float shift) const
1136 __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
1137 v_zero_f = _mm_setzero_ps();
1138 __m128i v_zero_i = _mm_setzero_si128();
1140 for ( ; x <= width - 8; x += 8)
1142 __m128i v_src = _mm_loadu_si128((const __m128i *)(src + x));
1143 __m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero_i)), v_scale), v_shift);
1144 v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1);
1145 __m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero_i)), v_scale), v_shift);
1146 v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2);
1148 __m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)), v_zero_i);
1149 _mm_storel_epi64((__m128i *)(dst + x), v_dst_i);
1158 struct cvtScaleAbs_SIMD<short, uchar, float>
1160 int operator () (const short * src, uchar * dst, int width,
1161 float scale, float shift) const
1167 __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
1168 v_zero_f = _mm_setzero_ps();
1169 __m128i v_zero_i = _mm_setzero_si128();
1171 for ( ; x <= width - 8; x += 8)
1173 __m128i v_src = _mm_loadu_si128((const __m128i *)(src + x));
1174 __m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_src, v_src), 16)), v_scale), v_shift);
1175 v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1);
1176 __m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_src, v_src), 16)), v_scale), v_shift);
1177 v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2);
1179 __m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)), v_zero_i);
1180 _mm_storel_epi64((__m128i *)(dst + x), v_dst_i);
1189 struct cvtScaleAbs_SIMD<int, uchar, float>
1191 int operator () (const int * src, uchar * dst, int width,
1192 float scale, float shift) const
1198 __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
1199 v_zero_f = _mm_setzero_ps();
1200 __m128i v_zero_i = _mm_setzero_si128();
1202 for ( ; x <= width - 8; x += 4)
1204 __m128i v_src = _mm_loadu_si128((const __m128i *)(src + x));
1205 __m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
1206 v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1);
1208 __m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), v_zero_i), v_zero_i);
1209 _mm_storel_epi64((__m128i *)(dst + x), v_dst_i);
1218 struct cvtScaleAbs_SIMD<float, uchar, float>
1220 int operator () (const float * src, uchar * dst, int width,
1221 float scale, float shift) const
1227 __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
1228 v_zero_f = _mm_setzero_ps();
1229 __m128i v_zero_i = _mm_setzero_si128();
1231 for ( ; x <= width - 8; x += 4)
1233 __m128 v_dst = _mm_add_ps(_mm_mul_ps(_mm_loadu_ps(src + x), v_scale), v_shift);
1234 v_dst = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst), v_dst);
1236 __m128i v_dst_i = _mm_packs_epi32(_mm_cvtps_epi32(v_dst), v_zero_i);
1237 _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst_i, v_zero_i));
1248 struct cvtScaleAbs_SIMD<float, uchar, float>
1250 int operator () (const float * src, uchar * dst, int width,
1251 float scale, float shift) const
1254 float32x4_t v_shift = vdupq_n_f32(shift);
1256 for ( ; x <= width - 8; x += 8)
1258 float32x4_t v_dst_0 = vmulq_n_f32(vld1q_f32(src + x), scale);
1259 v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
1260 uint16x4_t v_dsti_0 = vqmovun_s32(vcvtq_s32_f32(v_dst_0));
1262 float32x4_t v_dst_1 = vmulq_n_f32(vld1q_f32(src + x + 4), scale);
1263 v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
1264 uint16x4_t v_dsti_1 = vqmovun_s32(vcvtq_s32_f32(v_dst_1));
1266 uint16x8_t v_dst = vcombine_u16(v_dsti_0, v_dsti_1);
1267 vst1_u8(dst + x, vqmovn_u16(v_dst));
1276 template<typename T, typename DT, typename WT> static void
1277 cvtScaleAbs_( const T* src, size_t sstep,
1278 DT* dst, size_t dstep, Size size,
1279 WT scale, WT shift )
1281 sstep /= sizeof(src[0]);
1282 dstep /= sizeof(dst[0]);
1283 cvtScaleAbs_SIMD<T, DT, WT> vop;
1285 for( ; size.height--; src += sstep, dst += dstep )
1287 int x = vop(src, dst, size.width, scale, shift);
1289 #if CV_ENABLE_UNROLLED
1290 for( ; x <= size.width - 4; x += 4 )
1293 t0 = saturate_cast<DT>(std::abs(src[x]*scale + shift));
1294 t1 = saturate_cast<DT>(std::abs(src[x+1]*scale + shift));
1295 dst[x] = t0; dst[x+1] = t1;
1296 t0 = saturate_cast<DT>(std::abs(src[x+2]*scale + shift));
1297 t1 = saturate_cast<DT>(std::abs(src[x+3]*scale + shift));
1298 dst[x+2] = t0; dst[x+3] = t1;
1301 for( ; x < size.width; x++ )
1302 dst[x] = saturate_cast<DT>(std::abs(src[x]*scale + shift));
1306 template<typename T, typename DT, typename WT> static void
1307 cvtScale_( const T* src, size_t sstep,
1308 DT* dst, size_t dstep, Size size,
1309 WT scale, WT shift )
1311 sstep /= sizeof(src[0]);
1312 dstep /= sizeof(dst[0]);
1314 for( ; size.height--; src += sstep, dst += dstep )
1317 #if CV_ENABLE_UNROLLED
1318 for( ; x <= size.width - 4; x += 4 )
1321 t0 = saturate_cast<DT>(src[x]*scale + shift);
1322 t1 = saturate_cast<DT>(src[x+1]*scale + shift);
1323 dst[x] = t0; dst[x+1] = t1;
1324 t0 = saturate_cast<DT>(src[x+2]*scale + shift);
1325 t1 = saturate_cast<DT>(src[x+3]*scale + shift);
1326 dst[x+2] = t0; dst[x+3] = t1;
1330 for( ; x < size.width; x++ )
1331 dst[x] = saturate_cast<DT>(src[x]*scale + shift);
1335 //vz optimized template specialization
1337 cvtScale_<short, short, float>( const short* src, size_t sstep,
1338 short* dst, size_t dstep, Size size,
1339 float scale, float shift )
1341 sstep /= sizeof(src[0]);
1342 dstep /= sizeof(dst[0]);
1344 for( ; size.height--; src += sstep, dst += dstep )
1350 __m128 scale128 = _mm_set1_ps (scale);
1351 __m128 shift128 = _mm_set1_ps (shift);
1352 for(; x <= size.width - 8; x += 8 )
1354 __m128i r0 = _mm_loadl_epi64((const __m128i*)(src + x));
1355 __m128i r1 = _mm_loadl_epi64((const __m128i*)(src + x + 4));
1356 __m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16));
1357 __m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r1, r1), 16));
1358 rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128);
1359 rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128);
1360 r0 = _mm_cvtps_epi32(rf0);
1361 r1 = _mm_cvtps_epi32(rf1);
1362 r0 = _mm_packs_epi32(r0, r1);
1363 _mm_storeu_si128((__m128i*)(dst + x), r0);
1368 for(; x < size.width; x++ )
1369 dst[x] = saturate_cast<short>(src[x]*scale + shift);
1374 cvtScale_<short, int, float>( const short* src, size_t sstep,
1375 int* dst, size_t dstep, Size size,
1376 float scale, float shift )
1378 sstep /= sizeof(src[0]);
1379 dstep /= sizeof(dst[0]);
1381 for( ; size.height--; src += sstep, dst += dstep )
1388 __m128 scale128 = _mm_set1_ps (scale);
1389 __m128 shift128 = _mm_set1_ps (shift);
1390 for(; x <= size.width - 8; x += 8 )
1392 __m128i r0 = _mm_loadl_epi64((const __m128i*)(src + x));
1393 __m128i r1 = _mm_loadl_epi64((const __m128i*)(src + x + 4));
1394 __m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16));
1395 __m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r1, r1), 16));
1396 rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128);
1397 rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128);
1398 r0 = _mm_cvtps_epi32(rf0);
1399 r1 = _mm_cvtps_epi32(rf1);
1401 _mm_storeu_si128((__m128i*)(dst + x), r0);
1402 _mm_storeu_si128((__m128i*)(dst + x + 4), r1);
1407 //We will wait Haswell
1410 if(USE_AVX)//2X - bad variant
1412 ////TODO:AVX implementation (optimization?) required
1413 __m256 scale256 = _mm256_set1_ps (scale);
1414 __m256 shift256 = _mm256_set1_ps (shift);
1415 for(; x <= size.width - 8; x += 8 )
1417 __m256i buf = _mm256_set_epi32((int)(*(src+x+7)),(int)(*(src+x+6)),(int)(*(src+x+5)),(int)(*(src+x+4)),(int)(*(src+x+3)),(int)(*(src+x+2)),(int)(*(src+x+1)),(int)(*(src+x)));
1418 __m256 r0 = _mm256_add_ps( _mm256_mul_ps(_mm256_cvtepi32_ps (buf), scale256), shift256);
1419 __m256i res = _mm256_cvtps_epi32(r0);
1420 _mm256_storeu_si256 ((__m256i*)(dst+x), res);
1425 for(; x < size.width; x++ )
1426 dst[x] = saturate_cast<int>(src[x]*scale + shift);
1430 template<typename T, typename DT> static void
1431 cvt_( const T* src, size_t sstep,
1432 DT* dst, size_t dstep, Size size )
1434 sstep /= sizeof(src[0]);
1435 dstep /= sizeof(dst[0]);
1437 for( ; size.height--; src += sstep, dst += dstep )
1440 #if CV_ENABLE_UNROLLED
1441 for( ; x <= size.width - 4; x += 4 )
1444 t0 = saturate_cast<DT>(src[x]);
1445 t1 = saturate_cast<DT>(src[x+1]);
1446 dst[x] = t0; dst[x+1] = t1;
1447 t0 = saturate_cast<DT>(src[x+2]);
1448 t1 = saturate_cast<DT>(src[x+3]);
1449 dst[x+2] = t0; dst[x+3] = t1;
1452 for( ; x < size.width; x++ )
1453 dst[x] = saturate_cast<DT>(src[x]);
1457 //vz optimized template specialization, test Core_ConvertScale/ElemWiseTest
1459 cvt_<float, short>( const float* src, size_t sstep,
1460 short* dst, size_t dstep, Size size )
1462 sstep /= sizeof(src[0]);
1463 dstep /= sizeof(dst[0]);
1465 for( ; size.height--; src += sstep, dst += dstep )
1470 for( ; x <= size.width - 8; x += 8 )
1472 __m128 src128 = _mm_loadu_ps (src + x);
1473 __m128i src_int128 = _mm_cvtps_epi32 (src128);
1475 src128 = _mm_loadu_ps (src + x + 4);
1476 __m128i src1_int128 = _mm_cvtps_epi32 (src128);
1478 src1_int128 = _mm_packs_epi32(src_int128, src1_int128);
1479 _mm_storeu_si128((__m128i*)(dst + x),src1_int128);
1483 for( ; x < size.width; x++ )
1484 dst[x] = saturate_cast<short>(src[x]);
1490 template<typename T> static void
1491 cpy_( const T* src, size_t sstep, T* dst, size_t dstep, Size size )
1493 sstep /= sizeof(src[0]);
1494 dstep /= sizeof(dst[0]);
1496 for( ; size.height--; src += sstep, dst += dstep )
1497 memcpy(dst, src, size.width*sizeof(src[0]));
1500 #define DEF_CVT_SCALE_ABS_FUNC(suffix, tfunc, stype, dtype, wtype) \
1501 static void cvtScaleAbs##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1502 dtype* dst, size_t dstep, Size size, double* scale) \
1504 tfunc(src, sstep, dst, dstep, size, (wtype)scale[0], (wtype)scale[1]); \
1507 #define DEF_CVT_SCALE_FUNC(suffix, stype, dtype, wtype) \
1508 static void cvtScale##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1509 dtype* dst, size_t dstep, Size size, double* scale) \
1511 cvtScale_(src, sstep, dst, dstep, size, (wtype)scale[0], (wtype)scale[1]); \
1514 #if defined(HAVE_IPP)
1515 #define DEF_CVT_FUNC_F(suffix, stype, dtype, ippFavor) \
1516 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1517 dtype* dst, size_t dstep, Size size, double*) \
1521 if (ippiConvert_##ippFavor(src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height)) >= 0) \
1523 setIppErrorStatus(); \
1525 cvt_(src, sstep, dst, dstep, size); \
1528 #define DEF_CVT_FUNC_F2(suffix, stype, dtype, ippFavor) \
1529 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1530 dtype* dst, size_t dstep, Size size, double*) \
1534 if (ippiConvert_##ippFavor(src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height), ippRndFinancial, 0) >= 0) \
1536 setIppErrorStatus(); \
1538 cvt_(src, sstep, dst, dstep, size); \
1541 #define DEF_CVT_FUNC_F(suffix, stype, dtype, ippFavor) \
1542 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1543 dtype* dst, size_t dstep, Size size, double*) \
1545 cvt_(src, sstep, dst, dstep, size); \
1547 #define DEF_CVT_FUNC_F2 DEF_CVT_FUNC_F
1550 #define DEF_CVT_FUNC(suffix, stype, dtype) \
1551 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1552 dtype* dst, size_t dstep, Size size, double*) \
1554 cvt_(src, sstep, dst, dstep, size); \
1557 #define DEF_CPY_FUNC(suffix, stype) \
1558 static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
1559 stype* dst, size_t dstep, Size size, double*) \
1561 cpy_(src, sstep, dst, dstep, size); \
1565 DEF_CVT_SCALE_ABS_FUNC(8u, cvtScaleAbs_, uchar, uchar, float)
1566 DEF_CVT_SCALE_ABS_FUNC(8s8u, cvtScaleAbs_, schar, uchar, float)
1567 DEF_CVT_SCALE_ABS_FUNC(16u8u, cvtScaleAbs_, ushort, uchar, float)
1568 DEF_CVT_SCALE_ABS_FUNC(16s8u, cvtScaleAbs_, short, uchar, float)
1569 DEF_CVT_SCALE_ABS_FUNC(32s8u, cvtScaleAbs_, int, uchar, float)
1570 DEF_CVT_SCALE_ABS_FUNC(32f8u, cvtScaleAbs_, float, uchar, float)
1571 DEF_CVT_SCALE_ABS_FUNC(64f8u, cvtScaleAbs_, double, uchar, float)
1573 DEF_CVT_SCALE_FUNC(8u, uchar, uchar, float)
1574 DEF_CVT_SCALE_FUNC(8s8u, schar, uchar, float)
1575 DEF_CVT_SCALE_FUNC(16u8u, ushort, uchar, float)
1576 DEF_CVT_SCALE_FUNC(16s8u, short, uchar, float)
1577 DEF_CVT_SCALE_FUNC(32s8u, int, uchar, float)
1578 DEF_CVT_SCALE_FUNC(32f8u, float, uchar, float)
1579 DEF_CVT_SCALE_FUNC(64f8u, double, uchar, float)
1581 DEF_CVT_SCALE_FUNC(8u8s, uchar, schar, float)
1582 DEF_CVT_SCALE_FUNC(8s, schar, schar, float)
1583 DEF_CVT_SCALE_FUNC(16u8s, ushort, schar, float)
1584 DEF_CVT_SCALE_FUNC(16s8s, short, schar, float)
1585 DEF_CVT_SCALE_FUNC(32s8s, int, schar, float)
1586 DEF_CVT_SCALE_FUNC(32f8s, float, schar, float)
1587 DEF_CVT_SCALE_FUNC(64f8s, double, schar, float)
1589 DEF_CVT_SCALE_FUNC(8u16u, uchar, ushort, float)
1590 DEF_CVT_SCALE_FUNC(8s16u, schar, ushort, float)
1591 DEF_CVT_SCALE_FUNC(16u, ushort, ushort, float)
1592 DEF_CVT_SCALE_FUNC(16s16u, short, ushort, float)
1593 DEF_CVT_SCALE_FUNC(32s16u, int, ushort, float)
1594 DEF_CVT_SCALE_FUNC(32f16u, float, ushort, float)
1595 DEF_CVT_SCALE_FUNC(64f16u, double, ushort, float)
1597 DEF_CVT_SCALE_FUNC(8u16s, uchar, short, float)
1598 DEF_CVT_SCALE_FUNC(8s16s, schar, short, float)
1599 DEF_CVT_SCALE_FUNC(16u16s, ushort, short, float)
1600 DEF_CVT_SCALE_FUNC(16s, short, short, float)
1601 DEF_CVT_SCALE_FUNC(32s16s, int, short, float)
1602 DEF_CVT_SCALE_FUNC(32f16s, float, short, float)
1603 DEF_CVT_SCALE_FUNC(64f16s, double, short, float)
1605 DEF_CVT_SCALE_FUNC(8u32s, uchar, int, float)
1606 DEF_CVT_SCALE_FUNC(8s32s, schar, int, float)
1607 DEF_CVT_SCALE_FUNC(16u32s, ushort, int, float)
1608 DEF_CVT_SCALE_FUNC(16s32s, short, int, float)
1609 DEF_CVT_SCALE_FUNC(32s, int, int, double)
1610 DEF_CVT_SCALE_FUNC(32f32s, float, int, float)
1611 DEF_CVT_SCALE_FUNC(64f32s, double, int, double)
1613 DEF_CVT_SCALE_FUNC(8u32f, uchar, float, float)
1614 DEF_CVT_SCALE_FUNC(8s32f, schar, float, float)
1615 DEF_CVT_SCALE_FUNC(16u32f, ushort, float, float)
1616 DEF_CVT_SCALE_FUNC(16s32f, short, float, float)
1617 DEF_CVT_SCALE_FUNC(32s32f, int, float, double)
1618 DEF_CVT_SCALE_FUNC(32f, float, float, float)
1619 DEF_CVT_SCALE_FUNC(64f32f, double, float, double)
1621 DEF_CVT_SCALE_FUNC(8u64f, uchar, double, double)
1622 DEF_CVT_SCALE_FUNC(8s64f, schar, double, double)
1623 DEF_CVT_SCALE_FUNC(16u64f, ushort, double, double)
1624 DEF_CVT_SCALE_FUNC(16s64f, short, double, double)
1625 DEF_CVT_SCALE_FUNC(32s64f, int, double, double)
1626 DEF_CVT_SCALE_FUNC(32f64f, float, double, double)
1627 DEF_CVT_SCALE_FUNC(64f, double, double, double)
1629 DEF_CPY_FUNC(8u, uchar)
1630 DEF_CVT_FUNC_F(8s8u, schar, uchar, 8s8u_C1Rs)
1631 DEF_CVT_FUNC_F(16u8u, ushort, uchar, 16u8u_C1R)
1632 DEF_CVT_FUNC_F(16s8u, short, uchar, 16s8u_C1R)
1633 DEF_CVT_FUNC_F(32s8u, int, uchar, 32s8u_C1R)
1634 DEF_CVT_FUNC_F2(32f8u, float, uchar, 32f8u_C1RSfs)
1635 DEF_CVT_FUNC(64f8u, double, uchar)
1637 DEF_CVT_FUNC_F2(8u8s, uchar, schar, 8u8s_C1RSfs)
1638 DEF_CVT_FUNC_F2(16u8s, ushort, schar, 16u8s_C1RSfs)
1639 DEF_CVT_FUNC_F2(16s8s, short, schar, 16s8s_C1RSfs)
1640 DEF_CVT_FUNC_F(32s8s, int, schar, 32s8s_C1R)
1641 DEF_CVT_FUNC_F2(32f8s, float, schar, 32f8s_C1RSfs)
1642 DEF_CVT_FUNC(64f8s, double, schar)
1644 DEF_CVT_FUNC_F(8u16u, uchar, ushort, 8u16u_C1R)
1645 DEF_CVT_FUNC_F(8s16u, schar, ushort, 8s16u_C1Rs)
1646 DEF_CPY_FUNC(16u, ushort)
1647 DEF_CVT_FUNC_F(16s16u, short, ushort, 16s16u_C1Rs)
1648 DEF_CVT_FUNC_F2(32s16u, int, ushort, 32s16u_C1RSfs)
1649 DEF_CVT_FUNC_F2(32f16u, float, ushort, 32f16u_C1RSfs)
1650 DEF_CVT_FUNC(64f16u, double, ushort)
1652 DEF_CVT_FUNC_F(8u16s, uchar, short, 8u16s_C1R)
1653 DEF_CVT_FUNC_F(8s16s, schar, short, 8s16s_C1R)
1654 DEF_CVT_FUNC_F2(16u16s, ushort, short, 16u16s_C1RSfs)
1655 DEF_CVT_FUNC_F2(32s16s, int, short, 32s16s_C1RSfs)
1656 DEF_CVT_FUNC(32f16s, float, short)
1657 DEF_CVT_FUNC(64f16s, double, short)
1659 DEF_CVT_FUNC_F(8u32s, uchar, int, 8u32s_C1R)
1660 DEF_CVT_FUNC_F(8s32s, schar, int, 8s32s_C1R)
1661 DEF_CVT_FUNC_F(16u32s, ushort, int, 16u32s_C1R)
1662 DEF_CVT_FUNC_F(16s32s, short, int, 16s32s_C1R)
1663 DEF_CPY_FUNC(32s, int)
1664 DEF_CVT_FUNC_F2(32f32s, float, int, 32f32s_C1RSfs)
1665 DEF_CVT_FUNC(64f32s, double, int)
1667 DEF_CVT_FUNC_F(8u32f, uchar, float, 8u32f_C1R)
1668 DEF_CVT_FUNC_F(8s32f, schar, float, 8s32f_C1R)
1669 DEF_CVT_FUNC_F(16u32f, ushort, float, 16u32f_C1R)
1670 DEF_CVT_FUNC_F(16s32f, short, float, 16s32f_C1R)
1671 DEF_CVT_FUNC_F(32s32f, int, float, 32s32f_C1R)
1672 DEF_CVT_FUNC(64f32f, double, float)
1674 DEF_CVT_FUNC(8u64f, uchar, double)
1675 DEF_CVT_FUNC(8s64f, schar, double)
1676 DEF_CVT_FUNC(16u64f, ushort, double)
1677 DEF_CVT_FUNC(16s64f, short, double)
1678 DEF_CVT_FUNC(32s64f, int, double)
1679 DEF_CVT_FUNC(32f64f, float, double)
1680 DEF_CPY_FUNC(64s, int64)
1682 static BinaryFunc getCvtScaleAbsFunc(int depth)
1684 static BinaryFunc cvtScaleAbsTab[] =
1686 (BinaryFunc)cvtScaleAbs8u, (BinaryFunc)cvtScaleAbs8s8u, (BinaryFunc)cvtScaleAbs16u8u,
1687 (BinaryFunc)cvtScaleAbs16s8u, (BinaryFunc)cvtScaleAbs32s8u, (BinaryFunc)cvtScaleAbs32f8u,
1688 (BinaryFunc)cvtScaleAbs64f8u, 0
1691 return cvtScaleAbsTab[depth];
1694 BinaryFunc getConvertFunc(int sdepth, int ddepth)
1696 static BinaryFunc cvtTab[][8] =
1699 (BinaryFunc)(cvt8u), (BinaryFunc)GET_OPTIMIZED(cvt8s8u), (BinaryFunc)GET_OPTIMIZED(cvt16u8u),
1700 (BinaryFunc)GET_OPTIMIZED(cvt16s8u), (BinaryFunc)GET_OPTIMIZED(cvt32s8u), (BinaryFunc)GET_OPTIMIZED(cvt32f8u),
1701 (BinaryFunc)GET_OPTIMIZED(cvt64f8u), 0
1704 (BinaryFunc)GET_OPTIMIZED(cvt8u8s), (BinaryFunc)cvt8u, (BinaryFunc)GET_OPTIMIZED(cvt16u8s),
1705 (BinaryFunc)GET_OPTIMIZED(cvt16s8s), (BinaryFunc)GET_OPTIMIZED(cvt32s8s), (BinaryFunc)GET_OPTIMIZED(cvt32f8s),
1706 (BinaryFunc)GET_OPTIMIZED(cvt64f8s), 0
1709 (BinaryFunc)GET_OPTIMIZED(cvt8u16u), (BinaryFunc)GET_OPTIMIZED(cvt8s16u), (BinaryFunc)cvt16u,
1710 (BinaryFunc)GET_OPTIMIZED(cvt16s16u), (BinaryFunc)GET_OPTIMIZED(cvt32s16u), (BinaryFunc)GET_OPTIMIZED(cvt32f16u),
1711 (BinaryFunc)GET_OPTIMIZED(cvt64f16u), 0
1714 (BinaryFunc)GET_OPTIMIZED(cvt8u16s), (BinaryFunc)GET_OPTIMIZED(cvt8s16s), (BinaryFunc)GET_OPTIMIZED(cvt16u16s),
1715 (BinaryFunc)cvt16u, (BinaryFunc)GET_OPTIMIZED(cvt32s16s), (BinaryFunc)GET_OPTIMIZED(cvt32f16s),
1716 (BinaryFunc)GET_OPTIMIZED(cvt64f16s), 0
1719 (BinaryFunc)GET_OPTIMIZED(cvt8u32s), (BinaryFunc)GET_OPTIMIZED(cvt8s32s), (BinaryFunc)GET_OPTIMIZED(cvt16u32s),
1720 (BinaryFunc)GET_OPTIMIZED(cvt16s32s), (BinaryFunc)cvt32s, (BinaryFunc)GET_OPTIMIZED(cvt32f32s),
1721 (BinaryFunc)GET_OPTIMIZED(cvt64f32s), 0
1724 (BinaryFunc)GET_OPTIMIZED(cvt8u32f), (BinaryFunc)GET_OPTIMIZED(cvt8s32f), (BinaryFunc)GET_OPTIMIZED(cvt16u32f),
1725 (BinaryFunc)GET_OPTIMIZED(cvt16s32f), (BinaryFunc)GET_OPTIMIZED(cvt32s32f), (BinaryFunc)cvt32s,
1726 (BinaryFunc)GET_OPTIMIZED(cvt64f32f), 0
1729 (BinaryFunc)GET_OPTIMIZED(cvt8u64f), (BinaryFunc)GET_OPTIMIZED(cvt8s64f), (BinaryFunc)GET_OPTIMIZED(cvt16u64f),
1730 (BinaryFunc)GET_OPTIMIZED(cvt16s64f), (BinaryFunc)GET_OPTIMIZED(cvt32s64f), (BinaryFunc)GET_OPTIMIZED(cvt32f64f),
1731 (BinaryFunc)(cvt64s), 0
1734 0, 0, 0, 0, 0, 0, 0, 0
1738 return cvtTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
1741 static BinaryFunc getConvertScaleFunc(int sdepth, int ddepth)
1743 static BinaryFunc cvtScaleTab[][8] =
1746 (BinaryFunc)GET_OPTIMIZED(cvtScale8u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8u),
1747 (BinaryFunc)GET_OPTIMIZED(cvtScale16s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8u),
1748 (BinaryFunc)cvtScale64f8u, 0
1751 (BinaryFunc)GET_OPTIMIZED(cvtScale8u8s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8s),
1752 (BinaryFunc)GET_OPTIMIZED(cvtScale16s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8s),
1753 (BinaryFunc)cvtScale64f8s, 0
1756 (BinaryFunc)GET_OPTIMIZED(cvtScale8u16u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u),
1757 (BinaryFunc)GET_OPTIMIZED(cvtScale16s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16u),
1758 (BinaryFunc)cvtScale64f16u, 0
1761 (BinaryFunc)GET_OPTIMIZED(cvtScale8u16s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u16s),
1762 (BinaryFunc)GET_OPTIMIZED(cvtScale16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16s),
1763 (BinaryFunc)cvtScale64f16s, 0
1766 (BinaryFunc)GET_OPTIMIZED(cvtScale8u32s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32s),
1767 (BinaryFunc)GET_OPTIMIZED(cvtScale16s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f32s),
1768 (BinaryFunc)cvtScale64f32s, 0
1771 (BinaryFunc)GET_OPTIMIZED(cvtScale8u32f), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32f),
1772 (BinaryFunc)GET_OPTIMIZED(cvtScale16s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32f),
1773 (BinaryFunc)cvtScale64f32f, 0
1776 (BinaryFunc)cvtScale8u64f, (BinaryFunc)cvtScale8s64f, (BinaryFunc)cvtScale16u64f,
1777 (BinaryFunc)cvtScale16s64f, (BinaryFunc)cvtScale32s64f, (BinaryFunc)cvtScale32f64f,
1778 (BinaryFunc)cvtScale64f, 0
1781 0, 0, 0, 0, 0, 0, 0, 0
1785 return cvtScaleTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
1790 static bool ocl_convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
1792 const ocl::Device & d = ocl::Device::getDefault();
1793 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
1794 kercn = ocl::predictOptimalVectorWidth(_src, _dst), rowsPerWI = d.isIntel() ? 4 : 1;
1795 bool doubleSupport = d.doubleFPConfig() > 0;
1797 if (!doubleSupport && depth == CV_64F)
1801 int wdepth = std::max(depth, CV_32F);
1802 String build_opt = format("-D OP_CONVERT_SCALE_ABS -D UNARY_OP -D dstT=%s -D srcT1=%s"
1803 " -D workT=%s -D wdepth=%d -D convertToWT1=%s -D convertToDT=%s"
1804 " -D workT1=%s -D rowsPerWI=%d%s",
1805 ocl::typeToStr(CV_8UC(kercn)),
1806 ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)),
1807 ocl::typeToStr(CV_MAKE_TYPE(wdepth, kercn)), wdepth,
1808 ocl::convertTypeStr(depth, wdepth, kercn, cvt[0]),
1809 ocl::convertTypeStr(wdepth, CV_8U, kercn, cvt[1]),
1810 ocl::typeToStr(wdepth), rowsPerWI,
1811 doubleSupport ? " -D DOUBLE_SUPPORT" : "");
1812 ocl::Kernel k("KF", ocl::core::arithm_oclsrc, build_opt);
1816 UMat src = _src.getUMat();
1817 _dst.create(src.size(), CV_8UC(cn));
1818 UMat dst = _dst.getUMat();
1820 ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
1821 dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);
1823 if (wdepth == CV_32F)
1824 k.args(srcarg, dstarg, (float)alpha, (float)beta);
1825 else if (wdepth == CV_64F)
1826 k.args(srcarg, dstarg, alpha, beta);
1828 size_t globalsize[2] = { src.cols * cn / kercn, (src.rows + rowsPerWI - 1) / rowsPerWI };
1829 return k.run(2, globalsize, NULL, false);
1836 void cv::convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
1838 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
1839 ocl_convertScaleAbs(_src, _dst, alpha, beta))
1841 Mat src = _src.getMat();
1842 int cn = src.channels();
1843 double scale[] = {alpha, beta};
1844 _dst.create( src.dims, src.size, CV_8UC(cn) );
1845 Mat dst = _dst.getMat();
1846 BinaryFunc func = getCvtScaleAbsFunc(src.depth());
1847 CV_Assert( func != 0 );
1851 Size sz = getContinuousSize(src, dst, cn);
1852 func( src.ptr(), src.step, 0, 0, dst.ptr(), dst.step, sz, scale );
1856 const Mat* arrays[] = {&src, &dst, 0};
1858 NAryMatIterator it(arrays, ptrs);
1859 Size sz((int)it.size*cn, 1);
1861 for( size_t i = 0; i < it.nplanes; i++, ++it )
1862 func( ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale );
1866 void cv::Mat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const
1868 bool noScale = fabs(alpha-1) < DBL_EPSILON && fabs(beta) < DBL_EPSILON;
1871 _type = _dst.fixedType() ? _dst.type() : type();
1873 _type = CV_MAKETYPE(CV_MAT_DEPTH(_type), channels());
1875 int sdepth = depth(), ddepth = CV_MAT_DEPTH(_type);
1876 if( sdepth == ddepth && noScale )
1884 BinaryFunc func = noScale ? getConvertFunc(sdepth, ddepth) : getConvertScaleFunc(sdepth, ddepth);
1885 double scale[] = {alpha, beta};
1886 int cn = channels();
1887 CV_Assert( func != 0 );
1891 _dst.create( size(), _type );
1892 Mat dst = _dst.getMat();
1893 Size sz = getContinuousSize(src, dst, cn);
1894 func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale );
1898 _dst.create( dims, size, _type );
1899 Mat dst = _dst.getMat();
1900 const Mat* arrays[] = {&src, &dst, 0};
1902 NAryMatIterator it(arrays, ptrs);
1903 Size sz((int)(it.size*cn), 1);
1905 for( size_t i = 0; i < it.nplanes; i++, ++it )
1906 func(ptrs[0], 1, 0, 0, ptrs[1], 1, sz, scale);
1910 /****************************************************************************************\
1912 \****************************************************************************************/
1917 template<typename T> static void
1918 LUT8u_( const uchar* src, const T* lut, T* dst, int len, int cn, int lutcn )
1922 for( int i = 0; i < len*cn; i++ )
1923 dst[i] = lut[src[i]];
1927 for( int i = 0; i < len*cn; i += cn )
1928 for( int k = 0; k < cn; k++ )
1929 dst[i+k] = lut[src[i+k]*cn+k];
1933 static void LUT8u_8u( const uchar* src, const uchar* lut, uchar* dst, int len, int cn, int lutcn )
1935 LUT8u_( src, lut, dst, len, cn, lutcn );
1938 static void LUT8u_8s( const uchar* src, const schar* lut, schar* dst, int len, int cn, int lutcn )
1940 LUT8u_( src, lut, dst, len, cn, lutcn );
1943 static void LUT8u_16u( const uchar* src, const ushort* lut, ushort* dst, int len, int cn, int lutcn )
1945 LUT8u_( src, lut, dst, len, cn, lutcn );
1948 static void LUT8u_16s( const uchar* src, const short* lut, short* dst, int len, int cn, int lutcn )
1950 LUT8u_( src, lut, dst, len, cn, lutcn );
1953 static void LUT8u_32s( const uchar* src, const int* lut, int* dst, int len, int cn, int lutcn )
1955 LUT8u_( src, lut, dst, len, cn, lutcn );
1958 static void LUT8u_32f( const uchar* src, const float* lut, float* dst, int len, int cn, int lutcn )
1960 LUT8u_( src, lut, dst, len, cn, lutcn );
1963 static void LUT8u_64f( const uchar* src, const double* lut, double* dst, int len, int cn, int lutcn )
1965 LUT8u_( src, lut, dst, len, cn, lutcn );
1968 typedef void (*LUTFunc)( const uchar* src, const uchar* lut, uchar* dst, int len, int cn, int lutcn );
1970 static LUTFunc lutTab[] =
1972 (LUTFunc)LUT8u_8u, (LUTFunc)LUT8u_8s, (LUTFunc)LUT8u_16u, (LUTFunc)LUT8u_16s,
1973 (LUTFunc)LUT8u_32s, (LUTFunc)LUT8u_32f, (LUTFunc)LUT8u_64f, 0
1978 static bool ocl_LUT(InputArray _src, InputArray _lut, OutputArray _dst)
1980 int lcn = _lut.channels(), dcn = _src.channels(), ddepth = _lut.depth();
1982 UMat src = _src.getUMat(), lut = _lut.getUMat();
1983 _dst.create(src.size(), CV_MAKETYPE(ddepth, dcn));
1984 UMat dst = _dst.getUMat();
1985 int kercn = lcn == 1 ? std::min(4, ocl::predictOptimalVectorWidth(_src, _dst)) : dcn;
1987 ocl::Kernel k("LUT", ocl::core::lut_oclsrc,
1988 format("-D dcn=%d -D lcn=%d -D srcT=%s -D dstT=%s", kercn, lcn,
1989 ocl::typeToStr(src.depth()), ocl::memopTypeToStr(ddepth)));
1993 k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::ReadOnlyNoSize(lut),
1994 ocl::KernelArg::WriteOnly(dst, dcn, kercn));
1996 size_t globalSize[2] = { dst.cols * dcn / kercn, (dst.rows + 3) / 4 };
1997 return k.run(2, globalSize, NULL, false);
2002 #if defined(HAVE_IPP)
2005 #if 0 // there are no performance benefits (PR #2653)
2006 class IppLUTParallelBody_LUTC1 : public ParallelLoopBody
2014 typedef IppStatus (*IppFn)(const Ipp8u* pSrc, int srcStep, void* pDst, int dstStep,
2015 IppiSize roiSize, const void* pTable, int nBitSize);
2020 IppLUTParallelBody_LUTC1(const Mat& src, const Mat& lut, Mat& dst, bool* _ok)
2021 : ok(_ok), src_(src), lut_(lut), dst_(dst)
2023 width = dst.cols * dst.channels();
2025 size_t elemSize1 = CV_ELEM_SIZE1(dst.depth());
2028 elemSize1 == 1 ? (IppFn)ippiLUTPalette_8u_C1R :
2029 elemSize1 == 4 ? (IppFn)ippiLUTPalette_8u32u_C1R :
2035 void operator()( const cv::Range& range ) const
2040 const int row0 = range.start;
2041 const int row1 = range.end;
2043 Mat src = src_.rowRange(row0, row1);
2044 Mat dst = dst_.rowRange(row0, row1);
2046 IppiSize sz = { width, dst.rows };
2048 CV_DbgAssert(fn != NULL);
2049 if (fn(src.data, (int)src.step[0], dst.data, (int)dst.step[0], sz, lut_.data, 8) < 0)
2051 setIppErrorStatus();
2056 IppLUTParallelBody_LUTC1(const IppLUTParallelBody_LUTC1&);
2057 IppLUTParallelBody_LUTC1& operator=(const IppLUTParallelBody_LUTC1&);
2061 class IppLUTParallelBody_LUTCN : public ParallelLoopBody
2074 IppLUTParallelBody_LUTCN(const Mat& src, const Mat& lut, Mat& dst, bool* _ok)
2075 : ok(_ok), src_(src), lut_(lut), dst_(dst), lutBuffer(NULL)
2077 lutcn = lut.channels();
2078 IppiSize sz256 = {256, 1};
2080 size_t elemSize1 = dst.elemSize1();
2081 CV_DbgAssert(elemSize1 == 1);
2082 lutBuffer = (uchar*)ippMalloc(256 * (int)elemSize1 * 4);
2083 lutTable[0] = lutBuffer + 0;
2084 lutTable[1] = lutBuffer + 1 * 256 * elemSize1;
2085 lutTable[2] = lutBuffer + 2 * 256 * elemSize1;
2086 lutTable[3] = lutBuffer + 3 * 256 * elemSize1;
2088 CV_DbgAssert(lutcn == 3 || lutcn == 4);
2091 IppStatus status = ippiCopy_8u_C3P3R(lut.ptr(), (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
2094 setIppErrorStatus();
2098 else if (lutcn == 4)
2100 IppStatus status = ippiCopy_8u_C4P4R(lut.ptr(), (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
2103 setIppErrorStatus();
2111 ~IppLUTParallelBody_LUTCN()
2113 if (lutBuffer != NULL)
2119 void operator()( const cv::Range& range ) const
2124 const int row0 = range.start;
2125 const int row1 = range.end;
2127 Mat src = src_.rowRange(row0, row1);
2128 Mat dst = dst_.rowRange(row0, row1);
2132 if (ippiLUTPalette_8u_C3R(
2133 src.ptr(), (int)src.step[0], dst.ptr(), (int)dst.step[0],
2134 ippiSize(dst.size()), lutTable, 8) >= 0)
2137 else if (lutcn == 4)
2139 if (ippiLUTPalette_8u_C4R(
2140 src.ptr(), (int)src.step[0], dst.ptr(), (int)dst.step[0],
2141 ippiSize(dst.size()), lutTable, 8) >= 0)
2144 setIppErrorStatus();
2148 IppLUTParallelBody_LUTCN(const IppLUTParallelBody_LUTCN&);
2149 IppLUTParallelBody_LUTCN& operator=(const IppLUTParallelBody_LUTCN&);
2154 class LUTParallelBody : public ParallelLoopBody
2164 LUTParallelBody(const Mat& src, const Mat& lut, Mat& dst, bool* _ok)
2165 : ok(_ok), src_(src), lut_(lut), dst_(dst)
2167 func = lutTab[lut.depth()];
2168 *ok = (func != NULL);
2171 void operator()( const cv::Range& range ) const
2175 const int row0 = range.start;
2176 const int row1 = range.end;
2178 Mat src = src_.rowRange(row0, row1);
2179 Mat dst = dst_.rowRange(row0, row1);
2181 int cn = src.channels();
2182 int lutcn = lut_.channels();
2184 const Mat* arrays[] = {&src, &dst, 0};
2186 NAryMatIterator it(arrays, ptrs);
2187 int len = (int)it.size;
2189 for( size_t i = 0; i < it.nplanes; i++, ++it )
2190 func(ptrs[0], lut_.ptr(), ptrs[1], len, cn, lutcn);
2193 LUTParallelBody(const LUTParallelBody&);
2194 LUTParallelBody& operator=(const LUTParallelBody&);
2199 void cv::LUT( InputArray _src, InputArray _lut, OutputArray _dst )
2201 int cn = _src.channels(), depth = _src.depth();
2202 int lutcn = _lut.channels();
2204 CV_Assert( (lutcn == cn || lutcn == 1) &&
2205 _lut.total() == 256 && _lut.isContinuous() &&
2206 (depth == CV_8U || depth == CV_8S) );
2208 CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
2209 ocl_LUT(_src, _lut, _dst))
2211 Mat src = _src.getMat(), lut = _lut.getMat();
2212 _dst.create(src.dims, src.size, CV_MAKETYPE(_lut.depth(), cn));
2213 Mat dst = _dst.getMat();
2215 if (_src.dims() <= 2)
2218 Ptr<ParallelLoopBody> body;
2219 #if defined(HAVE_IPP)
2220 size_t elemSize1 = CV_ELEM_SIZE1(dst.depth());
2221 #if 0 // there are no performance benefits (PR #2653)
2224 ParallelLoopBody* p = new ipp::IppLUTParallelBody_LUTC1(src, lut, dst, &ok);
2229 if ((lutcn == 3 || lutcn == 4) && elemSize1 == 1)
2231 ParallelLoopBody* p = new ipp::IppLUTParallelBody_LUTCN(src, lut, dst, &ok);
2235 if (body == NULL || ok == false)
2238 ParallelLoopBody* p = new LUTParallelBody(src, lut, dst, &ok);
2241 if (body != NULL && ok)
2243 Range all(0, dst.rows);
2244 if (dst.total()>>18)
2245 parallel_for_(all, *body, (double)std::max((size_t)1, dst.total()>>16));
2253 LUTFunc func = lutTab[lut.depth()];
2254 CV_Assert( func != 0 );
2256 const Mat* arrays[] = {&src, &dst, 0};
2258 NAryMatIterator it(arrays, ptrs);
2259 int len = (int)it.size;
2261 for( size_t i = 0; i < it.nplanes; i++, ++it )
2262 func(ptrs[0], lut.ptr(), ptrs[1], len, cn, lutcn);
2269 static bool ocl_normalize( InputArray _src, InputOutputArray _dst, InputArray _mask, int dtype,
2270 double scale, double delta )
2272 UMat src = _src.getUMat();
2275 src.convertTo( _dst, dtype, scale, delta );
2276 else if (src.channels() <= 4)
2278 const ocl::Device & dev = ocl::Device::getDefault();
2280 int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype),
2281 ddepth = CV_MAT_DEPTH(dtype), wdepth = std::max(CV_32F, std::max(sdepth, ddepth)),
2282 rowsPerWI = dev.isIntel() ? 4 : 1;
2284 float fscale = static_cast<float>(scale), fdelta = static_cast<float>(delta);
2285 bool haveScale = std::fabs(scale - 1) > DBL_EPSILON,
2286 haveZeroScale = !(std::fabs(scale) > DBL_EPSILON),
2287 haveDelta = std::fabs(delta) > DBL_EPSILON,
2288 doubleSupport = dev.doubleFPConfig() > 0;
2290 if (!haveScale && !haveDelta && stype == dtype)
2292 _src.copyTo(_dst, _mask);
2297 _dst.setTo(Scalar(delta), _mask);
2301 if ((sdepth == CV_64F || ddepth == CV_64F) && !doubleSupport)
2305 String opts = format("-D srcT=%s -D dstT=%s -D convertToWT=%s -D cn=%d -D rowsPerWI=%d"
2306 " -D convertToDT=%s -D workT=%s%s%s%s -D srcT1=%s -D dstT1=%s",
2307 ocl::typeToStr(stype), ocl::typeToStr(dtype),
2308 ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]), cn,
2309 rowsPerWI, ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]),
2310 ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
2311 doubleSupport ? " -D DOUBLE_SUPPORT" : "",
2312 haveScale ? " -D HAVE_SCALE" : "",
2313 haveDelta ? " -D HAVE_DELTA" : "",
2314 ocl::typeToStr(sdepth), ocl::typeToStr(ddepth));
2316 ocl::Kernel k("normalizek", ocl::core::normalize_oclsrc, opts);
2320 UMat mask = _mask.getUMat(), dst = _dst.getUMat();
2322 ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
2323 maskarg = ocl::KernelArg::ReadOnlyNoSize(mask),
2324 dstarg = ocl::KernelArg::ReadWrite(dst);
2329 k.args(srcarg, maskarg, dstarg, fscale, fdelta);
2331 k.args(srcarg, maskarg, dstarg, fscale);
2336 k.args(srcarg, maskarg, dstarg, fdelta);
2338 k.args(srcarg, maskarg, dstarg);
2341 size_t globalsize[2] = { src.cols, (src.rows + rowsPerWI - 1) / rowsPerWI };
2342 return k.run(2, globalsize, NULL, false);
2347 src.convertTo( temp, dtype, scale, delta );
2348 temp.copyTo( _dst, _mask );
2358 void cv::normalize( InputArray _src, InputOutputArray _dst, double a, double b,
2359 int norm_type, int rtype, InputArray _mask )
2361 double scale = 1, shift = 0;
2362 if( norm_type == CV_MINMAX )
2364 double smin = 0, smax = 0;
2365 double dmin = MIN( a, b ), dmax = MAX( a, b );
2366 minMaxLoc( _src, &smin, &smax, 0, 0, _mask );
2367 scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0);
2368 shift = dmin - smin*scale;
2370 else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
2372 scale = norm( _src, norm_type, _mask );
2373 scale = scale > DBL_EPSILON ? a/scale : 0.;
2377 CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
2379 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
2381 rtype = _dst.fixedType() ? _dst.depth() : depth;
2382 _dst.createSameSize(_src, CV_MAKETYPE(rtype, cn));
2384 CV_OCL_RUN(_dst.isUMat(),
2385 ocl_normalize(_src, _dst, _mask, rtype, scale, shift))
2387 Mat src = _src.getMat(), dst = _dst.getMat();
2389 src.convertTo( dst, rtype, scale, shift );
2393 src.convertTo( temp, rtype, scale, shift );
2394 temp.copyTo( dst, _mask );
2399 cvSplit( const void* srcarr, void* dstarr0, void* dstarr1, void* dstarr2, void* dstarr3 )
2401 void* dptrs[] = { dstarr0, dstarr1, dstarr2, dstarr3 };
2402 cv::Mat src = cv::cvarrToMat(srcarr);
2404 for( i = 0; i < 4; i++ )
2405 nz += dptrs[i] != 0;
2406 CV_Assert( nz > 0 );
2407 std::vector<cv::Mat> dvec(nz);
2408 std::vector<int> pairs(nz*2);
2410 for( i = j = 0; i < 4; i++ )
2414 dvec[j] = cv::cvarrToMat(dptrs[i]);
2415 CV_Assert( dvec[j].size() == src.size() );
2416 CV_Assert( dvec[j].depth() == src.depth() );
2417 CV_Assert( dvec[j].channels() == 1 );
2418 CV_Assert( i < src.channels() );
2424 if( nz == src.channels() )
2425 cv::split( src, dvec );
2428 cv::mixChannels( &src, 1, &dvec[0], nz, &pairs[0], nz );
2434 cvMerge( const void* srcarr0, const void* srcarr1, const void* srcarr2,
2435 const void* srcarr3, void* dstarr )
2437 const void* sptrs[] = { srcarr0, srcarr1, srcarr2, srcarr3 };
2438 cv::Mat dst = cv::cvarrToMat(dstarr);
2440 for( i = 0; i < 4; i++ )
2441 nz += sptrs[i] != 0;
2442 CV_Assert( nz > 0 );
2443 std::vector<cv::Mat> svec(nz);
2444 std::vector<int> pairs(nz*2);
2446 for( i = j = 0; i < 4; i++ )
2450 svec[j] = cv::cvarrToMat(sptrs[i]);
2451 CV_Assert( svec[j].size == dst.size &&
2452 svec[j].depth() == dst.depth() &&
2453 svec[j].channels() == 1 && i < dst.channels() );
2460 if( nz == dst.channels() )
2461 cv::merge( svec, dst );
2464 cv::mixChannels( &svec[0], nz, &dst, 1, &pairs[0], nz );
2470 cvMixChannels( const CvArr** src, int src_count,
2471 CvArr** dst, int dst_count,
2472 const int* from_to, int pair_count )
2474 cv::AutoBuffer<cv::Mat> buf(src_count + dst_count);
2477 for( i = 0; i < src_count; i++ )
2478 buf[i] = cv::cvarrToMat(src[i]);
2479 for( i = 0; i < dst_count; i++ )
2480 buf[i+src_count] = cv::cvarrToMat(dst[i]);
2481 cv::mixChannels(&buf[0], src_count, &buf[src_count], dst_count, from_to, pair_count);
2485 cvConvertScaleAbs( const void* srcarr, void* dstarr,
2486 double scale, double shift )
2488 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
2489 CV_Assert( src.size == dst.size && dst.type() == CV_8UC(src.channels()));
2490 cv::convertScaleAbs( src, dst, scale, shift );
2494 cvConvertScale( const void* srcarr, void* dstarr,
2495 double scale, double shift )
2497 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
2499 CV_Assert( src.size == dst.size && src.channels() == dst.channels() );
2500 src.convertTo(dst, dst.type(), scale, shift);
2503 CV_IMPL void cvLUT( const void* srcarr, void* dstarr, const void* lutarr )
2505 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), lut = cv::cvarrToMat(lutarr);
2507 CV_Assert( dst.size() == src.size() && dst.type() == CV_MAKETYPE(lut.depth(), src.channels()) );
2508 cv::LUT( src, lut, dst );
2511 CV_IMPL void cvNormalize( const CvArr* srcarr, CvArr* dstarr,
2512 double a, double b, int norm_type, const CvArr* maskarr )
2514 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
2516 mask = cv::cvarrToMat(maskarr);
2517 CV_Assert( dst.size() == src.size() && src.channels() == dst.channels() );
2518 cv::normalize( src, dst, a, b, norm_type, dst.type(), mask );