1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 /* ////////////////////////////////////////////////////////////////////
45 // Geometrical transforms on images and matrices: rotation, zoom etc.
49 #include "precomp.hpp"
50 #include "opencl_kernels.hpp"
52 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
53 static IppStatus sts = ippInit();
59 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
60 typedef IppStatus (CV_STDCALL* ippiSetFunc)(const void*, void *, int, IppiSize);
61 typedef IppStatus (CV_STDCALL* ippiWarpPerspectiveBackFunc)(const void*, IppiSize, int, IppiRect, void *, int, IppiRect, double [3][3], int);
62 typedef IppStatus (CV_STDCALL* ippiWarpAffineBackFunc)(const void*, IppiSize, int, IppiRect, void *, int, IppiRect, double [2][3], int);
63 typedef IppStatus (CV_STDCALL* ippiResizeSqrPixelFunc)(const void*, IppiSize, int, IppiRect, void*, int, IppiRect, double, double, double, double, int, Ipp8u *);
65 template <int channels, typename Type>
66 bool IPPSetSimple(cv::Scalar value, void *dataPointer, int step, IppiSize &size, ippiSetFunc func)
68 Type values[channels];
69 for( int i = 0; i < channels; i++ )
70 values[i] = (Type)value[i];
71 return func(values, dataPointer, step, size) >= 0;
74 bool IPPSet(const cv::Scalar &value, void *dataPointer, int step, IppiSize &size, int channels, int depth)
81 return ippiSet_8u_C1R((Ipp8u)value[0], (Ipp8u *)dataPointer, step, size) >= 0;
83 return ippiSet_16u_C1R((Ipp16u)value[0], (Ipp16u *)dataPointer, step, size) >= 0;
85 return ippiSet_32f_C1R((Ipp32f)value[0], (Ipp32f *)dataPointer, step, size) >= 0;
95 return IPPSetSimple<3, Ipp8u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_8u_C3R);
97 return IPPSetSimple<3, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C3R);
99 return IPPSetSimple<3, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C3R);
102 else if( channels == 4 )
107 return IPPSetSimple<4, Ipp8u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_8u_C4R);
109 return IPPSetSimple<4, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C4R);
111 return IPPSetSimple<4, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C4R);
119 /************** interpolation formulas and tables ***************/
121 const int INTER_RESIZE_COEF_BITS=11;
122 const int INTER_RESIZE_COEF_SCALE=1 << INTER_RESIZE_COEF_BITS;
124 const int INTER_REMAP_COEF_BITS=15;
125 const int INTER_REMAP_COEF_SCALE=1 << INTER_REMAP_COEF_BITS;
127 static uchar NNDeltaTab_i[INTER_TAB_SIZE2][2];
129 static float BilinearTab_f[INTER_TAB_SIZE2][2][2];
130 static short BilinearTab_i[INTER_TAB_SIZE2][2][2];
133 static short BilinearTab_iC4_buf[INTER_TAB_SIZE2+2][2][8];
134 static short (*BilinearTab_iC4)[2][8] = (short (*)[2][8])alignPtr(BilinearTab_iC4_buf, 16);
137 static float BicubicTab_f[INTER_TAB_SIZE2][4][4];
138 static short BicubicTab_i[INTER_TAB_SIZE2][4][4];
140 static float Lanczos4Tab_f[INTER_TAB_SIZE2][8][8];
141 static short Lanczos4Tab_i[INTER_TAB_SIZE2][8][8];
143 static inline void interpolateLinear( float x, float* coeffs )
149 static inline void interpolateCubic( float x, float* coeffs )
151 const float A = -0.75f;
153 coeffs[0] = ((A*(x + 1) - 5*A)*(x + 1) + 8*A)*(x + 1) - 4*A;
154 coeffs[1] = ((A + 2)*x - (A + 3))*x*x + 1;
155 coeffs[2] = ((A + 2)*(1 - x) - (A + 3))*(1 - x)*(1 - x) + 1;
156 coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2];
159 static inline void interpolateLanczos4( float x, float* coeffs )
161 static const double s45 = 0.70710678118654752440084436210485;
162 static const double cs[][2]=
163 {{1, 0}, {-s45, -s45}, {0, 1}, {s45, -s45}, {-1, 0}, {s45, s45}, {0, -1}, {-s45, s45}};
165 if( x < FLT_EPSILON )
167 for( int i = 0; i < 8; i++ )
174 double y0=-(x+3)*CV_PI*0.25, s0 = sin(y0), c0=cos(y0);
175 for(int i = 0; i < 8; i++ )
177 double y = -(x+3-i)*CV_PI*0.25;
178 coeffs[i] = (float)((cs[i][0]*s0 + cs[i][1]*c0)/(y*y));
183 for(int i = 0; i < 8; i++ )
187 static void initInterTab1D(int method, float* tab, int tabsz)
189 float scale = 1.f/tabsz;
190 if( method == INTER_LINEAR )
192 for( int i = 0; i < tabsz; i++, tab += 2 )
193 interpolateLinear( i*scale, tab );
195 else if( method == INTER_CUBIC )
197 for( int i = 0; i < tabsz; i++, tab += 4 )
198 interpolateCubic( i*scale, tab );
200 else if( method == INTER_LANCZOS4 )
202 for( int i = 0; i < tabsz; i++, tab += 8 )
203 interpolateLanczos4( i*scale, tab );
206 CV_Error( CV_StsBadArg, "Unknown interpolation method" );
210 static const void* initInterTab2D( int method, bool fixpt )
212 static bool inittab[INTER_MAX+1] = {false};
216 if( method == INTER_LINEAR )
217 tab = BilinearTab_f[0][0], itab = BilinearTab_i[0][0], ksize=2;
218 else if( method == INTER_CUBIC )
219 tab = BicubicTab_f[0][0], itab = BicubicTab_i[0][0], ksize=4;
220 else if( method == INTER_LANCZOS4 )
221 tab = Lanczos4Tab_f[0][0], itab = Lanczos4Tab_i[0][0], ksize=8;
223 CV_Error( CV_StsBadArg, "Unknown/unsupported interpolation type" );
225 if( !inittab[method] )
227 AutoBuffer<float> _tab(8*INTER_TAB_SIZE);
229 initInterTab1D(method, _tab, INTER_TAB_SIZE);
230 for( i = 0; i < INTER_TAB_SIZE; i++ )
231 for( j = 0; j < INTER_TAB_SIZE; j++, tab += ksize*ksize, itab += ksize*ksize )
234 NNDeltaTab_i[i*INTER_TAB_SIZE+j][0] = j < INTER_TAB_SIZE/2;
235 NNDeltaTab_i[i*INTER_TAB_SIZE+j][1] = i < INTER_TAB_SIZE/2;
237 for( k1 = 0; k1 < ksize; k1++ )
239 float vy = _tab[i*ksize + k1];
240 for( k2 = 0; k2 < ksize; k2++ )
242 float v = vy*_tab[j*ksize + k2];
243 tab[k1*ksize + k2] = v;
244 isum += itab[k1*ksize + k2] = saturate_cast<short>(v*INTER_REMAP_COEF_SCALE);
248 if( isum != INTER_REMAP_COEF_SCALE )
250 int diff = isum - INTER_REMAP_COEF_SCALE;
251 int ksize2 = ksize/2, Mk1=ksize2, Mk2=ksize2, mk1=ksize2, mk2=ksize2;
252 for( k1 = ksize2; k1 < ksize2+2; k1++ )
253 for( k2 = ksize2; k2 < ksize2+2; k2++ )
255 if( itab[k1*ksize+k2] < itab[mk1*ksize+mk2] )
257 else if( itab[k1*ksize+k2] > itab[Mk1*ksize+Mk2] )
261 itab[Mk1*ksize + Mk2] = (short)(itab[Mk1*ksize + Mk2] - diff);
263 itab[mk1*ksize + mk2] = (short)(itab[mk1*ksize + mk2] - diff);
266 tab -= INTER_TAB_SIZE2*ksize*ksize;
267 itab -= INTER_TAB_SIZE2*ksize*ksize;
269 if( method == INTER_LINEAR )
271 for( i = 0; i < INTER_TAB_SIZE2; i++ )
272 for( j = 0; j < 4; j++ )
274 BilinearTab_iC4[i][0][j*2] = BilinearTab_i[i][0][0];
275 BilinearTab_iC4[i][0][j*2+1] = BilinearTab_i[i][0][1];
276 BilinearTab_iC4[i][1][j*2] = BilinearTab_i[i][1][0];
277 BilinearTab_iC4[i][1][j*2+1] = BilinearTab_i[i][1][1];
281 inittab[method] = true;
283 return fixpt ? (const void*)itab : (const void*)tab;
287 static bool initAllInterTab2D()
289 return initInterTab2D( INTER_LINEAR, false ) &&
290 initInterTab2D( INTER_LINEAR, true ) &&
291 initInterTab2D( INTER_CUBIC, false ) &&
292 initInterTab2D( INTER_CUBIC, true ) &&
293 initInterTab2D( INTER_LANCZOS4, false ) &&
294 initInterTab2D( INTER_LANCZOS4, true );
297 static volatile bool doInitAllInterTab2D = initAllInterTab2D();
300 template<typename ST, typename DT> struct Cast
305 DT operator()(ST val) const { return saturate_cast<DT>(val); }
308 template<typename ST, typename DT, int bits> struct FixedPtCast
312 enum { SHIFT = bits, DELTA = 1 << (bits-1) };
314 DT operator()(ST val) const { return saturate_cast<DT>((val + DELTA)>>SHIFT); }
317 /****************************************************************************************\
319 \****************************************************************************************/
321 class resizeNNInvoker :
322 public ParallelLoopBody
325 resizeNNInvoker(const Mat& _src, Mat &_dst, int *_x_ofs, int _pix_size4, double _ify) :
326 ParallelLoopBody(), src(_src), dst(_dst), x_ofs(_x_ofs), pix_size4(_pix_size4),
331 virtual void operator() (const Range& range) const
333 Size ssize = src.size(), dsize = dst.size();
334 int y, x, pix_size = (int)src.elemSize();
336 for( y = range.start; y < range.end; y++ )
338 uchar* D = dst.data + dst.step*y;
339 int sy = std::min(cvFloor(y*ify), ssize.height-1);
340 const uchar* S = src.data + src.step*sy;
345 for( x = 0; x <= dsize.width - 2; x += 2 )
347 uchar t0 = S[x_ofs[x]];
348 uchar t1 = S[x_ofs[x+1]];
353 for( ; x < dsize.width; x++ )
357 for( x = 0; x < dsize.width; x++ )
358 *(ushort*)(D + x*2) = *(ushort*)(S + x_ofs[x]);
361 for( x = 0; x < dsize.width; x++, D += 3 )
363 const uchar* _tS = S + x_ofs[x];
364 D[0] = _tS[0]; D[1] = _tS[1]; D[2] = _tS[2];
368 for( x = 0; x < dsize.width; x++ )
369 *(int*)(D + x*4) = *(int*)(S + x_ofs[x]);
372 for( x = 0; x < dsize.width; x++, D += 6 )
374 const ushort* _tS = (const ushort*)(S + x_ofs[x]);
375 ushort* _tD = (ushort*)D;
376 _tD[0] = _tS[0]; _tD[1] = _tS[1]; _tD[2] = _tS[2];
380 for( x = 0; x < dsize.width; x++, D += 8 )
382 const int* _tS = (const int*)(S + x_ofs[x]);
384 _tD[0] = _tS[0]; _tD[1] = _tS[1];
388 for( x = 0; x < dsize.width; x++, D += 12 )
390 const int* _tS = (const int*)(S + x_ofs[x]);
392 _tD[0] = _tS[0]; _tD[1] = _tS[1]; _tD[2] = _tS[2];
396 for( x = 0; x < dsize.width; x++, D += pix_size )
398 const int* _tS = (const int*)(S + x_ofs[x]);
400 for( int k = 0; k < pix_size4; k++ )
410 int* x_ofs, pix_size4;
413 resizeNNInvoker(const resizeNNInvoker&);
414 resizeNNInvoker& operator=(const resizeNNInvoker&);
418 resizeNN( const Mat& src, Mat& dst, double fx, double fy )
420 Size ssize = src.size(), dsize = dst.size();
421 AutoBuffer<int> _x_ofs(dsize.width);
423 int pix_size = (int)src.elemSize();
424 int pix_size4 = (int)(pix_size / sizeof(int));
425 double ifx = 1./fx, ify = 1./fy;
428 for( x = 0; x < dsize.width; x++ )
430 int sx = cvFloor(x*ifx);
431 x_ofs[x] = std::min(sx, ssize.width-1)*pix_size;
434 Range range(0, dsize.height);
435 resizeNNInvoker invoker(src, dst, x_ofs, pix_size4, ify);
436 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
442 int operator()(const uchar**, uchar*, const uchar*, int ) const { return 0; }
447 int operator()(const uchar**, uchar**, int, const int*,
448 const uchar*, int, int, int, int, int) const { return 0; }
453 struct VResizeLinearVec_32s8u
455 int operator()(const uchar** _src, uchar* dst, const uchar* _beta, int width ) const
457 if( !checkHardwareSupport(CV_CPU_SSE2) )
460 const int** src = (const int**)_src;
461 const short* beta = (const short*)_beta;
462 const int *S0 = src[0], *S1 = src[1];
464 __m128i b0 = _mm_set1_epi16(beta[0]), b1 = _mm_set1_epi16(beta[1]);
465 __m128i delta = _mm_set1_epi16(2);
467 if( (((size_t)S0|(size_t)S1)&15) == 0 )
468 for( ; x <= width - 16; x += 16 )
470 __m128i x0, x1, x2, y0, y1, y2;
471 x0 = _mm_load_si128((const __m128i*)(S0 + x));
472 x1 = _mm_load_si128((const __m128i*)(S0 + x + 4));
473 y0 = _mm_load_si128((const __m128i*)(S1 + x));
474 y1 = _mm_load_si128((const __m128i*)(S1 + x + 4));
475 x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4));
476 y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4));
478 x1 = _mm_load_si128((const __m128i*)(S0 + x + 8));
479 x2 = _mm_load_si128((const __m128i*)(S0 + x + 12));
480 y1 = _mm_load_si128((const __m128i*)(S1 + x + 8));
481 y2 = _mm_load_si128((const __m128i*)(S1 + x + 12));
482 x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4));
483 y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4));
485 x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 ));
486 x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 ));
488 x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
489 x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2);
490 _mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1));
493 for( ; x <= width - 16; x += 16 )
495 __m128i x0, x1, x2, y0, y1, y2;
496 x0 = _mm_loadu_si128((const __m128i*)(S0 + x));
497 x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4));
498 y0 = _mm_loadu_si128((const __m128i*)(S1 + x));
499 y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4));
500 x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4));
501 y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4));
503 x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 8));
504 x2 = _mm_loadu_si128((const __m128i*)(S0 + x + 12));
505 y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 8));
506 y2 = _mm_loadu_si128((const __m128i*)(S1 + x + 12));
507 x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4));
508 y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4));
510 x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 ));
511 x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 ));
513 x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
514 x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2);
515 _mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1));
518 for( ; x < width - 4; x += 4 )
521 x0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S0 + x)), 4);
522 y0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S1 + x)), 4);
523 x0 = _mm_packs_epi32(x0, x0);
524 y0 = _mm_packs_epi32(y0, y0);
525 x0 = _mm_adds_epi16(_mm_mulhi_epi16(x0, b0), _mm_mulhi_epi16(y0, b1));
526 x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
527 x0 = _mm_packus_epi16(x0, x0);
528 *(int*)(dst + x) = _mm_cvtsi128_si32(x0);
536 template<int shiftval> struct VResizeLinearVec_32f16
538 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
540 if( !checkHardwareSupport(CV_CPU_SSE2) )
543 const float** src = (const float**)_src;
544 const float* beta = (const float*)_beta;
545 const float *S0 = src[0], *S1 = src[1];
546 ushort* dst = (ushort*)_dst;
549 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]);
550 __m128i preshift = _mm_set1_epi32(shiftval);
551 __m128i postshift = _mm_set1_epi16((short)shiftval);
553 if( (((size_t)S0|(size_t)S1)&15) == 0 )
554 for( ; x <= width - 16; x += 16 )
556 __m128 x0, x1, y0, y1;
558 x0 = _mm_load_ps(S0 + x);
559 x1 = _mm_load_ps(S0 + x + 4);
560 y0 = _mm_load_ps(S1 + x);
561 y1 = _mm_load_ps(S1 + x + 4);
563 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
564 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
565 t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
566 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
567 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift);
569 x0 = _mm_load_ps(S0 + x + 8);
570 x1 = _mm_load_ps(S0 + x + 12);
571 y0 = _mm_load_ps(S1 + x + 8);
572 y1 = _mm_load_ps(S1 + x + 12);
574 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
575 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
576 t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
577 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
578 t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift);
580 _mm_storeu_si128( (__m128i*)(dst + x), t0);
581 _mm_storeu_si128( (__m128i*)(dst + x + 8), t1);
584 for( ; x <= width - 16; x += 16 )
586 __m128 x0, x1, y0, y1;
588 x0 = _mm_loadu_ps(S0 + x);
589 x1 = _mm_loadu_ps(S0 + x + 4);
590 y0 = _mm_loadu_ps(S1 + x);
591 y1 = _mm_loadu_ps(S1 + x + 4);
593 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
594 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
595 t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
596 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
597 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift);
599 x0 = _mm_loadu_ps(S0 + x + 8);
600 x1 = _mm_loadu_ps(S0 + x + 12);
601 y0 = _mm_loadu_ps(S1 + x + 8);
602 y1 = _mm_loadu_ps(S1 + x + 12);
604 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
605 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
606 t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
607 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
608 t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift);
610 _mm_storeu_si128( (__m128i*)(dst + x), t0);
611 _mm_storeu_si128( (__m128i*)(dst + x + 8), t1);
614 for( ; x < width - 4; x += 4 )
618 x0 = _mm_loadu_ps(S0 + x);
619 y0 = _mm_loadu_ps(S1 + x);
621 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
622 t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
623 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t0), postshift);
624 _mm_storel_epi64( (__m128i*)(dst + x), t0);
631 typedef VResizeLinearVec_32f16<SHRT_MIN> VResizeLinearVec_32f16u;
632 typedef VResizeLinearVec_32f16<0> VResizeLinearVec_32f16s;
634 struct VResizeLinearVec_32f
636 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
638 if( !checkHardwareSupport(CV_CPU_SSE) )
641 const float** src = (const float**)_src;
642 const float* beta = (const float*)_beta;
643 const float *S0 = src[0], *S1 = src[1];
644 float* dst = (float*)_dst;
647 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]);
649 if( (((size_t)S0|(size_t)S1)&15) == 0 )
650 for( ; x <= width - 8; x += 8 )
652 __m128 x0, x1, y0, y1;
653 x0 = _mm_load_ps(S0 + x);
654 x1 = _mm_load_ps(S0 + x + 4);
655 y0 = _mm_load_ps(S1 + x);
656 y1 = _mm_load_ps(S1 + x + 4);
658 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
659 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
661 _mm_storeu_ps( dst + x, x0);
662 _mm_storeu_ps( dst + x + 4, x1);
665 for( ; x <= width - 8; x += 8 )
667 __m128 x0, x1, y0, y1;
668 x0 = _mm_loadu_ps(S0 + x);
669 x1 = _mm_loadu_ps(S0 + x + 4);
670 y0 = _mm_loadu_ps(S1 + x);
671 y1 = _mm_loadu_ps(S1 + x + 4);
673 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
674 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
676 _mm_storeu_ps( dst + x, x0);
677 _mm_storeu_ps( dst + x + 4, x1);
685 struct VResizeCubicVec_32s8u
687 int operator()(const uchar** _src, uchar* dst, const uchar* _beta, int width ) const
689 if( !checkHardwareSupport(CV_CPU_SSE2) )
692 const int** src = (const int**)_src;
693 const short* beta = (const short*)_beta;
694 const int *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
696 float scale = 1.f/(INTER_RESIZE_COEF_SCALE*INTER_RESIZE_COEF_SCALE);
697 __m128 b0 = _mm_set1_ps(beta[0]*scale), b1 = _mm_set1_ps(beta[1]*scale),
698 b2 = _mm_set1_ps(beta[2]*scale), b3 = _mm_set1_ps(beta[3]*scale);
700 if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&15) == 0 )
701 for( ; x <= width - 8; x += 8 )
703 __m128i x0, x1, y0, y1;
704 __m128 s0, s1, f0, f1;
705 x0 = _mm_load_si128((const __m128i*)(S0 + x));
706 x1 = _mm_load_si128((const __m128i*)(S0 + x + 4));
707 y0 = _mm_load_si128((const __m128i*)(S1 + x));
708 y1 = _mm_load_si128((const __m128i*)(S1 + x + 4));
710 s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0);
711 s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0);
712 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1);
713 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1);
714 s0 = _mm_add_ps(s0, f0);
715 s1 = _mm_add_ps(s1, f1);
717 x0 = _mm_load_si128((const __m128i*)(S2 + x));
718 x1 = _mm_load_si128((const __m128i*)(S2 + x + 4));
719 y0 = _mm_load_si128((const __m128i*)(S3 + x));
720 y1 = _mm_load_si128((const __m128i*)(S3 + x + 4));
722 f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2);
723 f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2);
724 s0 = _mm_add_ps(s0, f0);
725 s1 = _mm_add_ps(s1, f1);
726 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3);
727 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3);
728 s0 = _mm_add_ps(s0, f0);
729 s1 = _mm_add_ps(s1, f1);
731 x0 = _mm_cvtps_epi32(s0);
732 x1 = _mm_cvtps_epi32(s1);
734 x0 = _mm_packs_epi32(x0, x1);
735 _mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0));
738 for( ; x <= width - 8; x += 8 )
740 __m128i x0, x1, y0, y1;
741 __m128 s0, s1, f0, f1;
742 x0 = _mm_loadu_si128((const __m128i*)(S0 + x));
743 x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4));
744 y0 = _mm_loadu_si128((const __m128i*)(S1 + x));
745 y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4));
747 s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0);
748 s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0);
749 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1);
750 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1);
751 s0 = _mm_add_ps(s0, f0);
752 s1 = _mm_add_ps(s1, f1);
754 x0 = _mm_loadu_si128((const __m128i*)(S2 + x));
755 x1 = _mm_loadu_si128((const __m128i*)(S2 + x + 4));
756 y0 = _mm_loadu_si128((const __m128i*)(S3 + x));
757 y1 = _mm_loadu_si128((const __m128i*)(S3 + x + 4));
759 f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2);
760 f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2);
761 s0 = _mm_add_ps(s0, f0);
762 s1 = _mm_add_ps(s1, f1);
763 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3);
764 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3);
765 s0 = _mm_add_ps(s0, f0);
766 s1 = _mm_add_ps(s1, f1);
768 x0 = _mm_cvtps_epi32(s0);
769 x1 = _mm_cvtps_epi32(s1);
771 x0 = _mm_packs_epi32(x0, x1);
772 _mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0));
780 template<int shiftval> struct VResizeCubicVec_32f16
782 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
784 if( !checkHardwareSupport(CV_CPU_SSE2) )
787 const float** src = (const float**)_src;
788 const float* beta = (const float*)_beta;
789 const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
790 ushort* dst = (ushort*)_dst;
792 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]),
793 b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]);
794 __m128i preshift = _mm_set1_epi32(shiftval);
795 __m128i postshift = _mm_set1_epi16((short)shiftval);
797 for( ; x <= width - 8; x += 8 )
799 __m128 x0, x1, y0, y1, s0, s1;
801 x0 = _mm_loadu_ps(S0 + x);
802 x1 = _mm_loadu_ps(S0 + x + 4);
803 y0 = _mm_loadu_ps(S1 + x);
804 y1 = _mm_loadu_ps(S1 + x + 4);
806 s0 = _mm_mul_ps(x0, b0);
807 s1 = _mm_mul_ps(x1, b0);
808 y0 = _mm_mul_ps(y0, b1);
809 y1 = _mm_mul_ps(y1, b1);
810 s0 = _mm_add_ps(s0, y0);
811 s1 = _mm_add_ps(s1, y1);
813 x0 = _mm_loadu_ps(S2 + x);
814 x1 = _mm_loadu_ps(S2 + x + 4);
815 y0 = _mm_loadu_ps(S3 + x);
816 y1 = _mm_loadu_ps(S3 + x + 4);
818 x0 = _mm_mul_ps(x0, b2);
819 x1 = _mm_mul_ps(x1, b2);
820 y0 = _mm_mul_ps(y0, b3);
821 y1 = _mm_mul_ps(y1, b3);
822 s0 = _mm_add_ps(s0, x0);
823 s1 = _mm_add_ps(s1, x1);
824 s0 = _mm_add_ps(s0, y0);
825 s1 = _mm_add_ps(s1, y1);
827 t0 = _mm_add_epi32(_mm_cvtps_epi32(s0), preshift);
828 t1 = _mm_add_epi32(_mm_cvtps_epi32(s1), preshift);
830 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t1), postshift);
831 _mm_storeu_si128( (__m128i*)(dst + x), t0);
838 typedef VResizeCubicVec_32f16<SHRT_MIN> VResizeCubicVec_32f16u;
839 typedef VResizeCubicVec_32f16<0> VResizeCubicVec_32f16s;
841 struct VResizeCubicVec_32f
843 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
845 if( !checkHardwareSupport(CV_CPU_SSE) )
848 const float** src = (const float**)_src;
849 const float* beta = (const float*)_beta;
850 const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
851 float* dst = (float*)_dst;
853 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]),
854 b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]);
856 for( ; x <= width - 8; x += 8 )
858 __m128 x0, x1, y0, y1, s0, s1;
859 x0 = _mm_loadu_ps(S0 + x);
860 x1 = _mm_loadu_ps(S0 + x + 4);
861 y0 = _mm_loadu_ps(S1 + x);
862 y1 = _mm_loadu_ps(S1 + x + 4);
864 s0 = _mm_mul_ps(x0, b0);
865 s1 = _mm_mul_ps(x1, b0);
866 y0 = _mm_mul_ps(y0, b1);
867 y1 = _mm_mul_ps(y1, b1);
868 s0 = _mm_add_ps(s0, y0);
869 s1 = _mm_add_ps(s1, y1);
871 x0 = _mm_loadu_ps(S2 + x);
872 x1 = _mm_loadu_ps(S2 + x + 4);
873 y0 = _mm_loadu_ps(S3 + x);
874 y1 = _mm_loadu_ps(S3 + x + 4);
876 x0 = _mm_mul_ps(x0, b2);
877 x1 = _mm_mul_ps(x1, b2);
878 y0 = _mm_mul_ps(y0, b3);
879 y1 = _mm_mul_ps(y1, b3);
880 s0 = _mm_add_ps(s0, x0);
881 s1 = _mm_add_ps(s1, x1);
882 s0 = _mm_add_ps(s0, y0);
883 s1 = _mm_add_ps(s1, y1);
885 _mm_storeu_ps( dst + x, s0);
886 _mm_storeu_ps( dst + x + 4, s1);
895 typedef VResizeNoVec VResizeLinearVec_32s8u;
896 typedef VResizeNoVec VResizeLinearVec_32f16u;
897 typedef VResizeNoVec VResizeLinearVec_32f16s;
898 typedef VResizeNoVec VResizeLinearVec_32f;
900 typedef VResizeNoVec VResizeCubicVec_32s8u;
901 typedef VResizeNoVec VResizeCubicVec_32f16u;
902 typedef VResizeNoVec VResizeCubicVec_32f16s;
903 typedef VResizeNoVec VResizeCubicVec_32f;
907 typedef HResizeNoVec HResizeLinearVec_8u32s;
908 typedef HResizeNoVec HResizeLinearVec_16u32f;
909 typedef HResizeNoVec HResizeLinearVec_16s32f;
910 typedef HResizeNoVec HResizeLinearVec_32f;
911 typedef HResizeNoVec HResizeLinearVec_64f;
914 template<typename T, typename WT, typename AT, int ONE, class VecOp>
917 typedef T value_type;
919 typedef AT alpha_type;
921 void operator()(const T** src, WT** dst, int count,
922 const int* xofs, const AT* alpha,
923 int swidth, int dwidth, int cn, int xmin, int xmax ) const
928 int dx0 = vecOp((const uchar**)src, (uchar**)dst, count,
929 xofs, (const uchar*)alpha, swidth, dwidth, cn, xmin, xmax );
931 for( k = 0; k <= count - 2; k++ )
933 const T *S0 = src[k], *S1 = src[k+1];
934 WT *D0 = dst[k], *D1 = dst[k+1];
935 for( dx = dx0; dx < xmax; dx++ )
938 WT a0 = alpha[dx*2], a1 = alpha[dx*2+1];
939 WT t0 = S0[sx]*a0 + S0[sx + cn]*a1;
940 WT t1 = S1[sx]*a0 + S1[sx + cn]*a1;
941 D0[dx] = t0; D1[dx] = t1;
944 for( ; dx < dwidth; dx++ )
947 D0[dx] = WT(S0[sx]*ONE); D1[dx] = WT(S1[sx]*ONE);
951 for( ; k < count; k++ )
955 for( dx = 0; dx < xmax; dx++ )
958 D[dx] = S[sx]*alpha[dx*2] + S[sx+cn]*alpha[dx*2+1];
961 for( ; dx < dwidth; dx++ )
962 D[dx] = WT(S[xofs[dx]]*ONE);
968 template<typename T, typename WT, typename AT, class CastOp, class VecOp>
971 typedef T value_type;
973 typedef AT alpha_type;
975 void operator()(const WT** src, T* dst, const AT* beta, int width ) const
977 WT b0 = beta[0], b1 = beta[1];
978 const WT *S0 = src[0], *S1 = src[1];
982 int x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
983 #if CV_ENABLE_UNROLLED
984 for( ; x <= width - 4; x += 4 )
987 t0 = S0[x]*b0 + S1[x]*b1;
988 t1 = S0[x+1]*b0 + S1[x+1]*b1;
989 dst[x] = castOp(t0); dst[x+1] = castOp(t1);
990 t0 = S0[x+2]*b0 + S1[x+2]*b1;
991 t1 = S0[x+3]*b0 + S1[x+3]*b1;
992 dst[x+2] = castOp(t0); dst[x+3] = castOp(t1);
995 for( ; x < width; x++ )
996 dst[x] = castOp(S0[x]*b0 + S1[x]*b1);
1001 struct VResizeLinear<uchar, int, short, FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>, VResizeLinearVec_32s8u>
1003 typedef uchar value_type;
1004 typedef int buf_type;
1005 typedef short alpha_type;
1007 void operator()(const buf_type** src, value_type* dst, const alpha_type* beta, int width ) const
1009 alpha_type b0 = beta[0], b1 = beta[1];
1010 const buf_type *S0 = src[0], *S1 = src[1];
1011 VResizeLinearVec_32s8u vecOp;
1013 int x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
1014 #if CV_ENABLE_UNROLLED
1015 for( ; x <= width - 4; x += 4 )
1017 dst[x+0] = uchar(( ((b0 * (S0[x+0] >> 4)) >> 16) + ((b1 * (S1[x+0] >> 4)) >> 16) + 2)>>2);
1018 dst[x+1] = uchar(( ((b0 * (S0[x+1] >> 4)) >> 16) + ((b1 * (S1[x+1] >> 4)) >> 16) + 2)>>2);
1019 dst[x+2] = uchar(( ((b0 * (S0[x+2] >> 4)) >> 16) + ((b1 * (S1[x+2] >> 4)) >> 16) + 2)>>2);
1020 dst[x+3] = uchar(( ((b0 * (S0[x+3] >> 4)) >> 16) + ((b1 * (S1[x+3] >> 4)) >> 16) + 2)>>2);
1023 for( ; x < width; x++ )
1024 dst[x] = uchar(( ((b0 * (S0[x] >> 4)) >> 16) + ((b1 * (S1[x] >> 4)) >> 16) + 2)>>2);
1029 template<typename T, typename WT, typename AT>
1032 typedef T value_type;
1033 typedef WT buf_type;
1034 typedef AT alpha_type;
1036 void operator()(const T** src, WT** dst, int count,
1037 const int* xofs, const AT* alpha,
1038 int swidth, int dwidth, int cn, int xmin, int xmax ) const
1040 for( int k = 0; k < count; k++ )
1042 const T *S = src[k];
1044 int dx = 0, limit = xmin;
1047 for( ; dx < limit; dx++, alpha += 4 )
1049 int j, sx = xofs[dx] - cn;
1051 for( j = 0; j < 4; j++ )
1053 int sxj = sx + j*cn;
1054 if( (unsigned)sxj >= (unsigned)swidth )
1058 while( sxj >= swidth )
1061 v += S[sxj]*alpha[j];
1065 if( limit == dwidth )
1067 for( ; dx < xmax; dx++, alpha += 4 )
1070 D[dx] = S[sx-cn]*alpha[0] + S[sx]*alpha[1] +
1071 S[sx+cn]*alpha[2] + S[sx+cn*2]*alpha[3];
1081 template<typename T, typename WT, typename AT, class CastOp, class VecOp>
1084 typedef T value_type;
1085 typedef WT buf_type;
1086 typedef AT alpha_type;
1088 void operator()(const WT** src, T* dst, const AT* beta, int width ) const
1090 WT b0 = beta[0], b1 = beta[1], b2 = beta[2], b3 = beta[3];
1091 const WT *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
1095 int x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
1096 for( ; x < width; x++ )
1097 dst[x] = castOp(S0[x]*b0 + S1[x]*b1 + S2[x]*b2 + S3[x]*b3);
1102 template<typename T, typename WT, typename AT>
1103 struct HResizeLanczos4
1105 typedef T value_type;
1106 typedef WT buf_type;
1107 typedef AT alpha_type;
1109 void operator()(const T** src, WT** dst, int count,
1110 const int* xofs, const AT* alpha,
1111 int swidth, int dwidth, int cn, int xmin, int xmax ) const
1113 for( int k = 0; k < count; k++ )
1115 const T *S = src[k];
1117 int dx = 0, limit = xmin;
1120 for( ; dx < limit; dx++, alpha += 8 )
1122 int j, sx = xofs[dx] - cn*3;
1124 for( j = 0; j < 8; j++ )
1126 int sxj = sx + j*cn;
1127 if( (unsigned)sxj >= (unsigned)swidth )
1131 while( sxj >= swidth )
1134 v += S[sxj]*alpha[j];
1138 if( limit == dwidth )
1140 for( ; dx < xmax; dx++, alpha += 8 )
1143 D[dx] = S[sx-cn*3]*alpha[0] + S[sx-cn*2]*alpha[1] +
1144 S[sx-cn]*alpha[2] + S[sx]*alpha[3] +
1145 S[sx+cn]*alpha[4] + S[sx+cn*2]*alpha[5] +
1146 S[sx+cn*3]*alpha[6] + S[sx+cn*4]*alpha[7];
1156 template<typename T, typename WT, typename AT, class CastOp, class VecOp>
1157 struct VResizeLanczos4
1159 typedef T value_type;
1160 typedef WT buf_type;
1161 typedef AT alpha_type;
1163 void operator()(const WT** src, T* dst, const AT* beta, int width ) const
1167 int k, x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
1168 #if CV_ENABLE_UNROLLED
1169 for( ; x <= width - 4; x += 4 )
1172 const WT* S = src[0];
1173 WT s0 = S[x]*b, s1 = S[x+1]*b, s2 = S[x+2]*b, s3 = S[x+3]*b;
1175 for( k = 1; k < 8; k++ )
1177 b = beta[k]; S = src[k];
1178 s0 += S[x]*b; s1 += S[x+1]*b;
1179 s2 += S[x+2]*b; s3 += S[x+3]*b;
1182 dst[x] = castOp(s0); dst[x+1] = castOp(s1);
1183 dst[x+2] = castOp(s2); dst[x+3] = castOp(s3);
1186 for( ; x < width; x++ )
1188 dst[x] = castOp(src[0][x]*beta[0] + src[1][x]*beta[1] +
1189 src[2][x]*beta[2] + src[3][x]*beta[3] + src[4][x]*beta[4] +
1190 src[5][x]*beta[5] + src[6][x]*beta[6] + src[7][x]*beta[7]);
1196 static inline int clip(int x, int a, int b)
1198 return x >= a ? (x < b ? x : b-1) : a;
1201 static const int MAX_ESIZE=16;
1203 template <typename HResize, typename VResize>
1204 class resizeGeneric_Invoker :
1205 public ParallelLoopBody
1208 typedef typename HResize::value_type T;
1209 typedef typename HResize::buf_type WT;
1210 typedef typename HResize::alpha_type AT;
1212 resizeGeneric_Invoker(const Mat& _src, Mat &_dst, const int *_xofs, const int *_yofs,
1213 const AT* _alpha, const AT* __beta, const Size& _ssize, const Size &_dsize,
1214 int _ksize, int _xmin, int _xmax) :
1215 ParallelLoopBody(), src(_src), dst(_dst), xofs(_xofs), yofs(_yofs),
1216 alpha(_alpha), _beta(__beta), ssize(_ssize), dsize(_dsize),
1217 ksize(_ksize), xmin(_xmin), xmax(_xmax)
1221 virtual void operator() (const Range& range) const
1223 int dy, cn = src.channels();
1227 int bufstep = (int)alignSize(dsize.width, 16);
1228 AutoBuffer<WT> _buffer(bufstep*ksize);
1229 const T* srows[MAX_ESIZE]={0};
1230 WT* rows[MAX_ESIZE]={0};
1231 int prev_sy[MAX_ESIZE];
1233 for(int k = 0; k < ksize; k++ )
1236 rows[k] = (WT*)_buffer + bufstep*k;
1239 const AT* beta = _beta + ksize * range.start;
1241 for( dy = range.start; dy < range.end; dy++, beta += ksize )
1243 int sy0 = yofs[dy], k0=ksize, k1=0, ksize2 = ksize/2;
1245 for(int k = 0; k < ksize; k++ )
1247 int sy = clip(sy0 - ksize2 + 1 + k, 0, ssize.height);
1248 for( k1 = std::max(k1, k); k1 < ksize; k1++ )
1250 if( sy == prev_sy[k1] ) // if the sy-th row has been computed already, reuse it.
1253 memcpy( rows[k], rows[k1], bufstep*sizeof(rows[0][0]) );
1258 k0 = std::min(k0, k); // remember the first row that needs to be computed
1259 srows[k] = (T*)(src.data + src.step*sy);
1264 hresize( (const T**)(srows + k0), (WT**)(rows + k0), ksize - k0, xofs, (const AT*)(alpha),
1265 ssize.width, dsize.width, cn, xmin, xmax );
1266 vresize( (const WT**)rows, (T*)(dst.data + dst.step*dy), beta, dsize.width );
1273 const int* xofs, *yofs;
1274 const AT* alpha, *_beta;
1276 int ksize, xmin, xmax;
1279 template<class HResize, class VResize>
1280 static void resizeGeneric_( const Mat& src, Mat& dst,
1281 const int* xofs, const void* _alpha,
1282 const int* yofs, const void* _beta,
1283 int xmin, int xmax, int ksize )
1285 typedef typename HResize::alpha_type AT;
1287 const AT* beta = (const AT*)_beta;
1288 Size ssize = src.size(), dsize = dst.size();
1289 int cn = src.channels();
1294 // image resize is a separable operation. In case of not too strong
1296 Range range(0, dsize.height);
1297 resizeGeneric_Invoker<HResize, VResize> invoker(src, dst, xofs, yofs, (const AT*)_alpha, beta,
1298 ssize, dsize, ksize, xmin, xmax);
1299 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
1302 template <typename T, typename WT>
1303 struct ResizeAreaFastNoVec
1305 ResizeAreaFastNoVec(int, int) { }
1306 ResizeAreaFastNoVec(int, int, int, int) { }
1307 int operator() (const T*, T*, int) const
1312 class ResizeAreaFastVec_SIMD_8u
1315 ResizeAreaFastVec_SIMD_8u(int _cn, int _step) :
1316 cn(_cn), step(_step)
1318 use_simd = checkHardwareSupport(CV_CPU_SSE2);
1321 int operator() (const uchar* S, uchar* D, int w) const
1327 const uchar* S0 = S;
1328 const uchar* S1 = S0 + step;
1329 __m128i zero = _mm_setzero_si128();
1330 __m128i delta2 = _mm_set1_epi16(2);
1334 __m128i masklow = _mm_set1_epi16(0x00ff);
1335 for ( ; dx <= w - 8; dx += 8, S0 += 16, S1 += 16, D += 8)
1337 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1338 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1340 __m128i s0 = _mm_add_epi16(_mm_srli_epi16(r0, 8), _mm_and_si128(r0, masklow));
1341 __m128i s1 = _mm_add_epi16(_mm_srli_epi16(r1, 8), _mm_and_si128(r1, masklow));
1342 s0 = _mm_add_epi16(_mm_add_epi16(s0, s1), delta2);
1343 s0 = _mm_packus_epi16(_mm_srli_epi16(s0, 2), zero);
1345 _mm_storel_epi64((__m128i*)D, s0);
1349 for ( ; dx <= w - 11; dx += 6, S0 += 12, S1 += 12, D += 6)
1351 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1352 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1354 __m128i r0_16l = _mm_unpacklo_epi8(r0, zero);
1355 __m128i r0_16h = _mm_unpacklo_epi8(_mm_srli_si128(r0, 6), zero);
1356 __m128i r1_16l = _mm_unpacklo_epi8(r1, zero);
1357 __m128i r1_16h = _mm_unpacklo_epi8(_mm_srli_si128(r1, 6), zero);
1359 __m128i s0 = _mm_add_epi16(r0_16l, _mm_srli_si128(r0_16l, 6));
1360 __m128i s1 = _mm_add_epi16(r1_16l, _mm_srli_si128(r1_16l, 6));
1361 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1362 s0 = _mm_packus_epi16(_mm_srli_epi16(s0, 2), zero);
1363 _mm_storel_epi64((__m128i*)D, s0);
1365 s0 = _mm_add_epi16(r0_16h, _mm_srli_si128(r0_16h, 6));
1366 s1 = _mm_add_epi16(r1_16h, _mm_srli_si128(r1_16h, 6));
1367 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1368 s0 = _mm_packus_epi16(_mm_srli_epi16(s0, 2), zero);
1369 _mm_storel_epi64((__m128i*)(D+3), s0);
1374 int v[] = { 0, 0, -1, -1 };
1375 __m128i mask = _mm_loadu_si128((const __m128i*)v);
1377 for ( ; dx <= w - 8; dx += 8, S0 += 16, S1 += 16, D += 8)
1379 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1380 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1382 __m128i r0_16l = _mm_unpacklo_epi8(r0, zero);
1383 __m128i r0_16h = _mm_unpackhi_epi8(r0, zero);
1384 __m128i r1_16l = _mm_unpacklo_epi8(r1, zero);
1385 __m128i r1_16h = _mm_unpackhi_epi8(r1, zero);
1387 __m128i s0 = _mm_add_epi16(r0_16l, _mm_srli_si128(r0_16l, 8));
1388 __m128i s1 = _mm_add_epi16(r1_16l, _mm_srli_si128(r1_16l, 8));
1389 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1390 __m128i res0 = _mm_srli_epi16(s0, 2);
1392 s0 = _mm_add_epi16(r0_16h, _mm_srli_si128(r0_16h, 8));
1393 s1 = _mm_add_epi16(r1_16h, _mm_srli_si128(r1_16h, 8));
1394 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1395 __m128i res1 = _mm_srli_epi16(s0, 2);
1396 s0 = _mm_packus_epi16(_mm_or_si128(_mm_andnot_si128(mask, res0),
1397 _mm_and_si128(mask, _mm_slli_si128(res1, 8))), zero);
1398 _mm_storel_epi64((__m128i*)(D), s0);
1411 class ResizeAreaFastVec_SIMD_16u
1414 ResizeAreaFastVec_SIMD_16u(int _cn, int _step) :
1415 cn(_cn), step(_step)
1417 use_simd = checkHardwareSupport(CV_CPU_SSE2);
1420 int operator() (const ushort* S, ushort* D, int w) const
1426 const ushort* S0 = (const ushort*)S;
1427 const ushort* S1 = (const ushort*)((const uchar*)(S) + step);
1428 __m128i masklow = _mm_set1_epi32(0x0000ffff);
1429 __m128i zero = _mm_setzero_si128();
1430 __m128i delta2 = _mm_set1_epi32(2);
1432 #define _mm_packus_epi32(a, zero) _mm_packs_epi32(_mm_srai_epi32(_mm_slli_epi32(a, 16), 16), zero)
1436 for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4)
1438 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1439 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1441 __m128i s0 = _mm_add_epi32(_mm_srli_epi32(r0, 16), _mm_and_si128(r0, masklow));
1442 __m128i s1 = _mm_add_epi32(_mm_srli_epi32(r1, 16), _mm_and_si128(r1, masklow));
1443 s0 = _mm_add_epi32(_mm_add_epi32(s0, s1), delta2);
1444 s0 = _mm_srli_epi32(s0, 2);
1445 s0 = _mm_packus_epi32(s0, zero);
1447 _mm_storel_epi64((__m128i*)D, s0);
1451 for ( ; dx <= w - 4; dx += 3, S0 += 6, S1 += 6, D += 3)
1453 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1454 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1456 __m128i r0_16l = _mm_unpacklo_epi16(r0, zero);
1457 __m128i r0_16h = _mm_unpacklo_epi16(_mm_srli_si128(r0, 6), zero);
1458 __m128i r1_16l = _mm_unpacklo_epi16(r1, zero);
1459 __m128i r1_16h = _mm_unpacklo_epi16(_mm_srli_si128(r1, 6), zero);
1461 __m128i s0 = _mm_add_epi32(r0_16l, r0_16h);
1462 __m128i s1 = _mm_add_epi32(r1_16l, r1_16h);
1463 s0 = _mm_add_epi32(delta2, _mm_add_epi32(s0, s1));
1464 s0 = _mm_packus_epi32(_mm_srli_epi32(s0, 2), zero);
1465 _mm_storel_epi64((__m128i*)D, s0);
1470 for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4)
1472 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1473 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1475 __m128i r0_32l = _mm_unpacklo_epi16(r0, zero);
1476 __m128i r0_32h = _mm_unpackhi_epi16(r0, zero);
1477 __m128i r1_32l = _mm_unpacklo_epi16(r1, zero);
1478 __m128i r1_32h = _mm_unpackhi_epi16(r1, zero);
1480 __m128i s0 = _mm_add_epi32(r0_32l, r0_32h);
1481 __m128i s1 = _mm_add_epi32(r1_32l, r1_32h);
1482 s0 = _mm_add_epi32(s1, _mm_add_epi32(s0, delta2));
1483 s0 = _mm_packus_epi32(_mm_srli_epi32(s0, 2), zero);
1484 _mm_storel_epi64((__m128i*)D, s0);
1488 #undef _mm_packus_epi32
1500 typedef ResizeAreaFastNoVec<uchar, uchar> ResizeAreaFastVec_SIMD_8u;
1501 typedef ResizeAreaFastNoVec<ushort, ushort> ResizeAreaFastVec_SIMD_16u;
1504 template<typename T, typename SIMDVecOp>
1505 struct ResizeAreaFastVec
1507 ResizeAreaFastVec(int _scale_x, int _scale_y, int _cn, int _step) :
1508 scale_x(_scale_x), scale_y(_scale_y), cn(_cn), step(_step), vecOp(_cn, _step)
1510 fast_mode = scale_x == 2 && scale_y == 2 && (cn == 1 || cn == 3 || cn == 4);
1513 int operator() (const T* S, T* D, int w) const
1518 const T* nextS = (const T*)((const uchar*)S + step);
1519 int dx = vecOp(S, D, w);
1522 for( ; dx < w; ++dx )
1525 D[dx] = (T)((S[index] + S[index+1] + nextS[index] + nextS[index+1] + 2) >> 2);
1528 for( ; dx < w; dx += 3 )
1531 D[dx] = (T)((S[index] + S[index+3] + nextS[index] + nextS[index+3] + 2) >> 2);
1532 D[dx+1] = (T)((S[index+1] + S[index+4] + nextS[index+1] + nextS[index+4] + 2) >> 2);
1533 D[dx+2] = (T)((S[index+2] + S[index+5] + nextS[index+2] + nextS[index+5] + 2) >> 2);
1538 for( ; dx < w; dx += 4 )
1541 D[dx] = (T)((S[index] + S[index+4] + nextS[index] + nextS[index+4] + 2) >> 2);
1542 D[dx+1] = (T)((S[index+1] + S[index+5] + nextS[index+1] + nextS[index+5] + 2) >> 2);
1543 D[dx+2] = (T)((S[index+2] + S[index+6] + nextS[index+2] + nextS[index+6] + 2) >> 2);
1544 D[dx+3] = (T)((S[index+3] + S[index+7] + nextS[index+3] + nextS[index+7] + 2) >> 2);
1552 int scale_x, scale_y;
1559 template <typename T, typename WT, typename VecOp>
1560 class resizeAreaFast_Invoker :
1561 public ParallelLoopBody
1564 resizeAreaFast_Invoker(const Mat &_src, Mat &_dst,
1565 int _scale_x, int _scale_y, const int* _ofs, const int* _xofs) :
1566 ParallelLoopBody(), src(_src), dst(_dst), scale_x(_scale_x),
1567 scale_y(_scale_y), ofs(_ofs), xofs(_xofs)
1571 virtual void operator() (const Range& range) const
1573 Size ssize = src.size(), dsize = dst.size();
1574 int cn = src.channels();
1575 int area = scale_x*scale_y;
1576 float scale = 1.f/(area);
1577 int dwidth1 = (ssize.width/scale_x)*cn;
1582 VecOp vop(scale_x, scale_y, src.channels(), (int)src.step/*, area_ofs*/);
1584 for( dy = range.start; dy < range.end; dy++ )
1586 T* D = (T*)(dst.data + dst.step*dy);
1587 int sy0 = dy*scale_y;
1588 int w = sy0 + scale_y <= ssize.height ? dwidth1 : 0;
1590 if( sy0 >= ssize.height )
1592 for( dx = 0; dx < dsize.width; dx++ )
1597 dx = vop((const T*)(src.data + src.step * sy0), D, w);
1598 for( ; dx < w; dx++ )
1600 const T* S = (const T*)(src.data + src.step * sy0) + xofs[dx];
1603 #if CV_ENABLE_UNROLLED
1604 for( ; k <= area - 4; k += 4 )
1605 sum += S[ofs[k]] + S[ofs[k+1]] + S[ofs[k+2]] + S[ofs[k+3]];
1607 for( ; k < area; k++ )
1610 D[dx] = saturate_cast<T>(sum * scale);
1613 for( ; dx < dsize.width; dx++ )
1616 int count = 0, sx0 = xofs[dx];
1617 if( sx0 >= ssize.width )
1620 for( int sy = 0; sy < scale_y; sy++ )
1622 if( sy0 + sy >= ssize.height )
1624 const T* S = (const T*)(src.data + src.step*(sy0 + sy)) + sx0;
1625 for( int sx = 0; sx < scale_x*cn; sx += cn )
1627 if( sx0 + sx >= ssize.width )
1634 D[dx] = saturate_cast<T>((float)sum/count);
1642 int scale_x, scale_y;
1643 const int *ofs, *xofs;
1646 template<typename T, typename WT, typename VecOp>
1647 static void resizeAreaFast_( const Mat& src, Mat& dst, const int* ofs, const int* xofs,
1648 int scale_x, int scale_y )
1650 Range range(0, dst.rows);
1651 resizeAreaFast_Invoker<T, WT, VecOp> invoker(src, dst, scale_x,
1652 scale_y, ofs, xofs);
1653 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
1656 struct DecimateAlpha
1663 template<typename T, typename WT> class ResizeArea_Invoker :
1664 public ParallelLoopBody
1667 ResizeArea_Invoker( const Mat& _src, Mat& _dst,
1668 const DecimateAlpha* _xtab, int _xtab_size,
1669 const DecimateAlpha* _ytab, int _ytab_size,
1670 const int* _tabofs )
1675 xtab_size0 = _xtab_size;
1677 ytab_size = _ytab_size;
1681 virtual void operator() (const Range& range) const
1683 Size dsize = dst->size();
1684 int cn = dst->channels();
1686 AutoBuffer<WT> _buffer(dsize.width*2);
1687 const DecimateAlpha* xtab = xtab0;
1688 int xtab_size = xtab_size0;
1689 WT *buf = _buffer, *sum = buf + dsize.width;
1690 int j_start = tabofs[range.start], j_end = tabofs[range.end], j, k, dx, prev_dy = ytab[j_start].di;
1692 for( dx = 0; dx < dsize.width; dx++ )
1695 for( j = j_start; j < j_end; j++ )
1697 WT beta = ytab[j].alpha;
1698 int dy = ytab[j].di;
1699 int sy = ytab[j].si;
1702 const T* S = (const T*)(src->data + src->step*sy);
1703 for( dx = 0; dx < dsize.width; dx++ )
1707 for( k = 0; k < xtab_size; k++ )
1709 int dxn = xtab[k].di;
1710 WT alpha = xtab[k].alpha;
1711 buf[dxn] += S[xtab[k].si]*alpha;
1714 for( k = 0; k < xtab_size; k++ )
1716 int sxn = xtab[k].si;
1717 int dxn = xtab[k].di;
1718 WT alpha = xtab[k].alpha;
1719 WT t0 = buf[dxn] + S[sxn]*alpha;
1720 WT t1 = buf[dxn+1] + S[sxn+1]*alpha;
1721 buf[dxn] = t0; buf[dxn+1] = t1;
1724 for( k = 0; k < xtab_size; k++ )
1726 int sxn = xtab[k].si;
1727 int dxn = xtab[k].di;
1728 WT alpha = xtab[k].alpha;
1729 WT t0 = buf[dxn] + S[sxn]*alpha;
1730 WT t1 = buf[dxn+1] + S[sxn+1]*alpha;
1731 WT t2 = buf[dxn+2] + S[sxn+2]*alpha;
1732 buf[dxn] = t0; buf[dxn+1] = t1; buf[dxn+2] = t2;
1736 for( k = 0; k < xtab_size; k++ )
1738 int sxn = xtab[k].si;
1739 int dxn = xtab[k].di;
1740 WT alpha = xtab[k].alpha;
1741 WT t0 = buf[dxn] + S[sxn]*alpha;
1742 WT t1 = buf[dxn+1] + S[sxn+1]*alpha;
1743 buf[dxn] = t0; buf[dxn+1] = t1;
1744 t0 = buf[dxn+2] + S[sxn+2]*alpha;
1745 t1 = buf[dxn+3] + S[sxn+3]*alpha;
1746 buf[dxn+2] = t0; buf[dxn+3] = t1;
1751 for( k = 0; k < xtab_size; k++ )
1753 int sxn = xtab[k].si;
1754 int dxn = xtab[k].di;
1755 WT alpha = xtab[k].alpha;
1756 for( int c = 0; c < cn; c++ )
1757 buf[dxn + c] += S[sxn + c]*alpha;
1764 T* D = (T*)(dst->data + dst->step*prev_dy);
1766 for( dx = 0; dx < dsize.width; dx++ )
1768 D[dx] = saturate_cast<T>(sum[dx]);
1769 sum[dx] = beta*buf[dx];
1775 for( dx = 0; dx < dsize.width; dx++ )
1776 sum[dx] += beta*buf[dx];
1781 T* D = (T*)(dst->data + dst->step*prev_dy);
1782 for( dx = 0; dx < dsize.width; dx++ )
1783 D[dx] = saturate_cast<T>(sum[dx]);
1790 const DecimateAlpha* xtab0;
1791 const DecimateAlpha* ytab;
1792 int xtab_size0, ytab_size;
1797 template <typename T, typename WT>
1798 static void resizeArea_( const Mat& src, Mat& dst,
1799 const DecimateAlpha* xtab, int xtab_size,
1800 const DecimateAlpha* ytab, int ytab_size,
1803 parallel_for_(Range(0, dst.rows),
1804 ResizeArea_Invoker<T, WT>(src, dst, xtab, xtab_size, ytab, ytab_size, tabofs),
1805 dst.total()/((double)(1 << 16)));
1809 typedef void (*ResizeFunc)( const Mat& src, Mat& dst,
1810 const int* xofs, const void* alpha,
1811 const int* yofs, const void* beta,
1812 int xmin, int xmax, int ksize );
1814 typedef void (*ResizeAreaFastFunc)( const Mat& src, Mat& dst,
1815 const int* ofs, const int *xofs,
1816 int scale_x, int scale_y );
1818 typedef void (*ResizeAreaFunc)( const Mat& src, Mat& dst,
1819 const DecimateAlpha* xtab, int xtab_size,
1820 const DecimateAlpha* ytab, int ytab_size,
1824 static int computeResizeAreaTab( int ssize, int dsize, int cn, double scale, DecimateAlpha* tab )
1827 for(int dx = 0; dx < dsize; dx++ )
1829 double fsx1 = dx * scale;
1830 double fsx2 = fsx1 + scale;
1831 double cellWidth = std::min(scale, ssize - fsx1);
1833 int sx1 = cvCeil(fsx1), sx2 = cvFloor(fsx2);
1835 sx2 = std::min(sx2, ssize - 1);
1836 sx1 = std::min(sx1, sx2);
1838 if( sx1 - fsx1 > 1e-3 )
1840 assert( k < ssize*2 );
1841 tab[k].di = dx * cn;
1842 tab[k].si = (sx1 - 1) * cn;
1843 tab[k++].alpha = (float)((sx1 - fsx1) / cellWidth);
1846 for(int sx = sx1; sx < sx2; sx++ )
1848 assert( k < ssize*2 );
1849 tab[k].di = dx * cn;
1850 tab[k].si = sx * cn;
1851 tab[k++].alpha = float(1.0 / cellWidth);
1854 if( fsx2 - sx2 > 1e-3 )
1856 assert( k < ssize*2 );
1857 tab[k].di = dx * cn;
1858 tab[k].si = sx2 * cn;
1859 tab[k++].alpha = (float)(std::min(std::min(fsx2 - sx2, 1.), cellWidth) / cellWidth);
1865 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
1866 class IPPresizeInvoker :
1867 public ParallelLoopBody
1870 IPPresizeInvoker(Mat &_src, Mat &_dst, double &_inv_scale_x, double &_inv_scale_y, int _mode, ippiResizeSqrPixelFunc _func, bool *_ok) :
1871 ParallelLoopBody(), src(_src), dst(_dst), inv_scale_x(_inv_scale_x), inv_scale_y(_inv_scale_y), mode(_mode), func(_func), ok(_ok)
1876 virtual void operator() (const Range& range) const
1878 int cn = src.channels();
1879 IppiRect srcroi = { 0, range.start, src.cols, range.end - range.start };
1880 int dsty = CV_IMIN(cvRound(range.start * inv_scale_y), dst.rows);
1881 int dstwidth = CV_IMIN(cvRound(src.cols * inv_scale_x), dst.cols);
1882 int dstheight = CV_IMIN(cvRound(range.end * inv_scale_y), dst.rows);
1883 IppiRect dstroi = { 0, dsty, dstwidth, dstheight - dsty };
1885 ippiResizeGetBufSize( srcroi, dstroi, cn, mode, &bufsize );
1886 AutoBuffer<uchar> buf(bufsize + 64);
1887 uchar* bufptr = alignPtr((uchar*)buf, 32);
1888 if( func( src.data, ippiSize(src.cols, src.rows), (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, inv_scale_x, inv_scale_y, 0, 0, mode, bufptr ) < 0 )
1897 ippiResizeSqrPixelFunc func;
1899 const IPPresizeInvoker& operator= (const IPPresizeInvoker&);
1905 static void ocl_computeResizeAreaTabs(int ssize, int dsize, double scale, int * const map_tab,
1906 float * const alpha_tab, int * const ofs_tab)
1909 for ( ; dx < dsize; dx++)
1913 double fsx1 = dx * scale;
1914 double fsx2 = fsx1 + scale;
1915 double cellWidth = std::min(scale, ssize - fsx1);
1917 int sx1 = cvCeil(fsx1), sx2 = cvFloor(fsx2);
1919 sx2 = std::min(sx2, ssize - 1);
1920 sx1 = std::min(sx1, sx2);
1922 if (sx1 - fsx1 > 1e-3)
1924 map_tab[k] = sx1 - 1;
1925 alpha_tab[k++] = (float)((sx1 - fsx1) / cellWidth);
1928 for (int sx = sx1; sx < sx2; sx++)
1931 alpha_tab[k++] = float(1.0 / cellWidth);
1934 if (fsx2 - sx2 > 1e-3)
1937 alpha_tab[k++] = (float)(std::min(std::min(fsx2 - sx2, 1.), cellWidth) / cellWidth);
1943 static void ocl_computeResizeAreaFastTabs(int * dmap_tab, int * smap_tab, int scale, int dcols, int scol)
1945 for (int i = 0; i < dcols; ++i)
1946 dmap_tab[i] = scale * i;
1948 for (int i = 0, size = dcols * scale; i < size; ++i)
1949 smap_tab[i] = std::min(scol - 1, i);
1952 static bool ocl_resize( InputArray _src, OutputArray _dst, Size dsize,
1953 double fx, double fy, int interpolation)
1955 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
1957 double inv_fx = 1. / fx, inv_fy = 1. / fy;
1958 float inv_fxf = (float)inv_fx, inv_fyf = (float)inv_fy;
1961 (interpolation == INTER_NEAREST || interpolation == INTER_LINEAR ||
1962 (interpolation == INTER_AREA && inv_fx >= 1 && inv_fy >= 1) )) )
1965 UMat src = _src.getUMat();
1966 _dst.create(dsize, type);
1967 UMat dst = _dst.getUMat();
1970 size_t globalsize[] = { dst.cols, dst.rows };
1972 if (interpolation == INTER_LINEAR)
1974 int wdepth = std::max(depth, CV_32S);
1975 int wtype = CV_MAKETYPE(wdepth, cn);
1977 k.create("resizeLN", ocl::imgproc::resize_oclsrc,
1978 format("-D INTER_LINEAR -D depth=%d -D PIXTYPE=%s -D PIXTYPE1=%s "
1979 "-D WORKTYPE=%s -D convertToWT=%s -D convertToDT=%s -D cn=%d",
1980 depth, ocl::typeToStr(type), ocl::typeToStr(depth), ocl::typeToStr(wtype),
1981 ocl::convertTypeStr(depth, wdepth, cn, buf[0]),
1982 ocl::convertTypeStr(wdepth, depth, cn, buf[1]),
1985 else if (interpolation == INTER_NEAREST)
1987 k.create("resizeNN", ocl::imgproc::resize_oclsrc,
1988 format("-D INTER_NEAREST -D PIXTYPE=%s -D PIXTYPE1=%s -D cn=%d",
1989 ocl::memopTypeToStr(type), ocl::memopTypeToStr(depth), cn));
1991 else if (interpolation == INTER_AREA)
1993 int iscale_x = saturate_cast<int>(inv_fx);
1994 int iscale_y = saturate_cast<int>(inv_fy);
1995 bool is_area_fast = std::abs(inv_fx - iscale_x) < DBL_EPSILON &&
1996 std::abs(inv_fy - iscale_y) < DBL_EPSILON;
1997 int wdepth = std::max(depth, is_area_fast ? CV_32S : CV_32F);
1998 int wtype = CV_MAKE_TYPE(wdepth, cn);
2001 String buildOption = format("-D INTER_AREA -D PIXTYPE=%s -D PIXTYPE1=%s -D WTV=%s -D convertToWTV=%s -D cn=%d",
2002 ocl::typeToStr(type), ocl::typeToStr(depth), ocl::typeToStr(wtype),
2003 ocl::convertTypeStr(depth, wdepth, cn, cvt[0]), cn);
2005 UMat alphaOcl, tabofsOcl, mapOcl;
2010 int wdepth2 = std::max(CV_32F, depth), wtype2 = CV_MAKE_TYPE(wdepth2, cn);
2011 buildOption = buildOption + format(" -D convertToPIXTYPE=%s -D WT2V=%s -D convertToWT2V=%s -D INTER_AREA_FAST"
2012 " -D XSCALE=%d -D YSCALE=%d -D SCALE=%ff",
2013 ocl::convertTypeStr(wdepth2, depth, cn, cvt[0]),
2014 ocl::typeToStr(wtype2), ocl::convertTypeStr(wdepth, wdepth2, cn, cvt[1]),
2015 iscale_x, iscale_y, 1.0f / (iscale_x * iscale_y));
2017 k.create("resizeAREA_FAST", ocl::imgproc::resize_oclsrc, buildOption);
2021 int smap_tab_size = dst.cols * iscale_x + dst.rows * iscale_y;
2022 AutoBuffer<int> dmap_tab(dst.cols + dst.rows), smap_tab(smap_tab_size);
2023 int * dxmap_tab = dmap_tab, * dymap_tab = dxmap_tab + dst.cols;
2024 int * sxmap_tab = smap_tab, * symap_tab = smap_tab + dst.cols * iscale_y;
2026 ocl_computeResizeAreaFastTabs(dxmap_tab, sxmap_tab, iscale_x, dst.cols, src.cols);
2027 ocl_computeResizeAreaFastTabs(dymap_tab, symap_tab, iscale_y, dst.rows, src.rows);
2029 Mat(1, dst.cols + dst.rows, CV_32SC1, (void *)dmap_tab).copyTo(dmap);
2030 Mat(1, smap_tab_size, CV_32SC1, (void *)smap_tab).copyTo(smap);
2034 buildOption = buildOption + format(" -D convertToPIXTYPE=%s", ocl::convertTypeStr(wdepth, depth, cn, cvt[0]));
2035 k.create("resizeAREA", ocl::imgproc::resize_oclsrc, buildOption);
2039 Size ssize = src.size();
2040 int xytab_size = (ssize.width + ssize.height) << 1;
2041 int tabofs_size = dsize.height + dsize.width + 2;
2043 AutoBuffer<int> _xymap_tab(xytab_size), _xyofs_tab(tabofs_size);
2044 AutoBuffer<float> _xyalpha_tab(xytab_size);
2045 int * xmap_tab = _xymap_tab, * ymap_tab = _xymap_tab + (ssize.width << 1);
2046 float * xalpha_tab = _xyalpha_tab, * yalpha_tab = _xyalpha_tab + (ssize.width << 1);
2047 int * xofs_tab = _xyofs_tab, * yofs_tab = _xyofs_tab + dsize.width + 1;
2049 ocl_computeResizeAreaTabs(ssize.width, dsize.width, inv_fx, xmap_tab, xalpha_tab, xofs_tab);
2050 ocl_computeResizeAreaTabs(ssize.height, dsize.height, inv_fy, ymap_tab, yalpha_tab, yofs_tab);
2052 // loading precomputed arrays to GPU
2053 Mat(1, xytab_size, CV_32FC1, (void *)_xyalpha_tab).copyTo(alphaOcl);
2054 Mat(1, xytab_size, CV_32SC1, (void *)_xymap_tab).copyTo(mapOcl);
2055 Mat(1, tabofs_size, CV_32SC1, (void *)_xyofs_tab).copyTo(tabofsOcl);
2058 ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src), dstarg = ocl::KernelArg::WriteOnly(dst);
2061 k.args(srcarg, dstarg, ocl::KernelArg::PtrReadOnly(dmap), ocl::KernelArg::PtrReadOnly(smap));
2063 k.args(srcarg, dstarg, inv_fxf, inv_fyf, ocl::KernelArg::PtrReadOnly(tabofsOcl),
2064 ocl::KernelArg::PtrReadOnly(mapOcl), ocl::KernelArg::PtrReadOnly(alphaOcl));
2066 return k.run(2, globalsize, NULL, false);
2071 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
2072 (float)inv_fx, (float)inv_fy);
2074 return k.run(2, globalsize, 0, false);
2081 //////////////////////////////////////////////////////////////////////////////////////////
2083 void cv::resize( InputArray _src, OutputArray _dst, Size dsize,
2084 double inv_scale_x, double inv_scale_y, int interpolation )
2086 static ResizeFunc linear_tab[] =
2089 HResizeLinear<uchar, int, short,
2090 INTER_RESIZE_COEF_SCALE,
2091 HResizeLinearVec_8u32s>,
2092 VResizeLinear<uchar, int, short,
2093 FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>,
2094 VResizeLinearVec_32s8u> >,
2097 HResizeLinear<ushort, float, float, 1,
2098 HResizeLinearVec_16u32f>,
2099 VResizeLinear<ushort, float, float, Cast<float, ushort>,
2100 VResizeLinearVec_32f16u> >,
2102 HResizeLinear<short, float, float, 1,
2103 HResizeLinearVec_16s32f>,
2104 VResizeLinear<short, float, float, Cast<float, short>,
2105 VResizeLinearVec_32f16s> >,
2108 HResizeLinear<float, float, float, 1,
2109 HResizeLinearVec_32f>,
2110 VResizeLinear<float, float, float, Cast<float, float>,
2111 VResizeLinearVec_32f> >,
2113 HResizeLinear<double, double, float, 1,
2115 VResizeLinear<double, double, float, Cast<double, double>,
2120 static ResizeFunc cubic_tab[] =
2123 HResizeCubic<uchar, int, short>,
2124 VResizeCubic<uchar, int, short,
2125 FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>,
2126 VResizeCubicVec_32s8u> >,
2129 HResizeCubic<ushort, float, float>,
2130 VResizeCubic<ushort, float, float, Cast<float, ushort>,
2131 VResizeCubicVec_32f16u> >,
2133 HResizeCubic<short, float, float>,
2134 VResizeCubic<short, float, float, Cast<float, short>,
2135 VResizeCubicVec_32f16s> >,
2138 HResizeCubic<float, float, float>,
2139 VResizeCubic<float, float, float, Cast<float, float>,
2140 VResizeCubicVec_32f> >,
2142 HResizeCubic<double, double, float>,
2143 VResizeCubic<double, double, float, Cast<double, double>,
2148 static ResizeFunc lanczos4_tab[] =
2150 resizeGeneric_<HResizeLanczos4<uchar, int, short>,
2151 VResizeLanczos4<uchar, int, short,
2152 FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>,
2155 resizeGeneric_<HResizeLanczos4<ushort, float, float>,
2156 VResizeLanczos4<ushort, float, float, Cast<float, ushort>,
2158 resizeGeneric_<HResizeLanczos4<short, float, float>,
2159 VResizeLanczos4<short, float, float, Cast<float, short>,
2162 resizeGeneric_<HResizeLanczos4<float, float, float>,
2163 VResizeLanczos4<float, float, float, Cast<float, float>,
2165 resizeGeneric_<HResizeLanczos4<double, double, float>,
2166 VResizeLanczos4<double, double, float, Cast<double, double>,
2171 static ResizeAreaFastFunc areafast_tab[] =
2173 resizeAreaFast_<uchar, int, ResizeAreaFastVec<uchar, ResizeAreaFastVec_SIMD_8u> >,
2175 resizeAreaFast_<ushort, float, ResizeAreaFastVec<ushort, ResizeAreaFastVec_SIMD_16u> >,
2176 resizeAreaFast_<short, float, ResizeAreaFastVec<short, ResizeAreaFastNoVec<short, float> > >,
2178 resizeAreaFast_<float, float, ResizeAreaFastNoVec<float, float> >,
2179 resizeAreaFast_<double, double, ResizeAreaFastNoVec<double, double> >,
2183 static ResizeAreaFunc area_tab[] =
2185 resizeArea_<uchar, float>, 0, resizeArea_<ushort, float>,
2186 resizeArea_<short, float>, 0, resizeArea_<float, float>,
2187 resizeArea_<double, double>, 0
2190 Size ssize = _src.size();
2192 CV_Assert( ssize.area() > 0 );
2193 CV_Assert( dsize.area() > 0 || (inv_scale_x > 0 && inv_scale_y > 0) );
2194 if( dsize.area() == 0 )
2196 dsize = Size(saturate_cast<int>(ssize.width*inv_scale_x),
2197 saturate_cast<int>(ssize.height*inv_scale_y));
2198 CV_Assert( dsize.area() > 0 );
2202 inv_scale_x = (double)dsize.width/ssize.width;
2203 inv_scale_y = (double)dsize.height/ssize.height;
2206 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
2207 ocl_resize(_src, _dst, dsize, inv_scale_x, inv_scale_y, interpolation))
2209 Mat src = _src.getMat();
2210 _dst.create(dsize, src.type());
2211 Mat dst = _dst.getMat();
2213 #ifdef HAVE_TEGRA_OPTIMIZATION
2214 if (tegra::resize(src, dst, (float)inv_scale_x, (float)inv_scale_y, interpolation))
2218 int depth = src.depth(), cn = src.channels();
2219 double scale_x = 1./inv_scale_x, scale_y = 1./inv_scale_y;
2220 int k, sx, sy, dx, dy;
2222 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
2223 int mode = interpolation == INTER_LINEAR ? IPPI_INTER_LINEAR : 0;
2224 int type = src.type();
2225 ippiResizeSqrPixelFunc ippFunc =
2226 type == CV_8UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C1R :
2227 type == CV_8UC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C3R :
2228 type == CV_8UC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C4R :
2229 type == CV_16UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C1R :
2230 type == CV_16UC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C3R :
2231 type == CV_16UC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C4R :
2232 type == CV_16SC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C1R :
2233 type == CV_16SC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C3R :
2234 type == CV_16SC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C4R :
2235 type == CV_32FC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C1R :
2236 type == CV_32FC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C3R :
2237 type == CV_32FC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C4R :
2239 if( ippFunc && mode != 0 )
2242 Range range(0, src.rows);
2243 IPPresizeInvoker invoker(src, dst, inv_scale_x, inv_scale_y, mode, ippFunc, &ok);
2244 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
2250 if( interpolation == INTER_NEAREST )
2252 resizeNN( src, dst, inv_scale_x, inv_scale_y );
2257 int iscale_x = saturate_cast<int>(scale_x);
2258 int iscale_y = saturate_cast<int>(scale_y);
2260 bool is_area_fast = std::abs(scale_x - iscale_x) < DBL_EPSILON &&
2261 std::abs(scale_y - iscale_y) < DBL_EPSILON;
2263 // in case of scale_x && scale_y is equal to 2
2264 // INTER_AREA (fast) also is equal to INTER_LINEAR
2265 if( interpolation == INTER_LINEAR && is_area_fast && iscale_x == 2 && iscale_y == 2 )
2266 interpolation = INTER_AREA;
2268 // true "area" interpolation is only implemented for the case (scale_x <= 1 && scale_y <= 1).
2269 // In other cases it is emulated using some variant of bilinear interpolation
2270 if( interpolation == INTER_AREA && scale_x >= 1 && scale_y >= 1 )
2274 int area = iscale_x*iscale_y;
2275 size_t srcstep = src.step / src.elemSize1();
2276 AutoBuffer<int> _ofs(area + dsize.width*cn);
2278 int* xofs = ofs + area;
2279 ResizeAreaFastFunc func = areafast_tab[depth];
2280 CV_Assert( func != 0 );
2282 for( sy = 0, k = 0; sy < iscale_y; sy++ )
2283 for( sx = 0; sx < iscale_x; sx++ )
2284 ofs[k++] = (int)(sy*srcstep + sx*cn);
2286 for( dx = 0; dx < dsize.width; dx++ )
2290 for( k = 0; k < cn; k++ )
2291 xofs[j + k] = sx + k;
2294 func( src, dst, ofs, xofs, iscale_x, iscale_y );
2298 ResizeAreaFunc func = area_tab[depth];
2299 CV_Assert( func != 0 && cn <= 4 );
2301 AutoBuffer<DecimateAlpha> _xytab((ssize.width + ssize.height)*2);
2302 DecimateAlpha* xtab = _xytab, *ytab = xtab + ssize.width*2;
2304 int xtab_size = computeResizeAreaTab(ssize.width, dsize.width, cn, scale_x, xtab);
2305 int ytab_size = computeResizeAreaTab(ssize.height, dsize.height, 1, scale_y, ytab);
2307 AutoBuffer<int> _tabofs(dsize.height + 1);
2308 int* tabofs = _tabofs;
2309 for( k = 0, dy = 0; k < ytab_size; k++ )
2311 if( k == 0 || ytab[k].di != ytab[k-1].di )
2313 assert( ytab[k].di == dy );
2317 tabofs[dy] = ytab_size;
2319 func( src, dst, xtab, xtab_size, ytab, ytab_size, tabofs );
2324 int xmin = 0, xmax = dsize.width, width = dsize.width*cn;
2325 bool area_mode = interpolation == INTER_AREA;
2326 bool fixpt = depth == CV_8U;
2329 int ksize=0, ksize2;
2330 if( interpolation == INTER_CUBIC )
2331 ksize = 4, func = cubic_tab[depth];
2332 else if( interpolation == INTER_LANCZOS4 )
2333 ksize = 8, func = lanczos4_tab[depth];
2334 else if( interpolation == INTER_LINEAR || interpolation == INTER_AREA )
2335 ksize = 2, func = linear_tab[depth];
2337 CV_Error( CV_StsBadArg, "Unknown interpolation method" );
2340 CV_Assert( func != 0 );
2342 AutoBuffer<uchar> _buffer((width + dsize.height)*(sizeof(int) + sizeof(float)*ksize));
2343 int* xofs = (int*)(uchar*)_buffer;
2344 int* yofs = xofs + width;
2345 float* alpha = (float*)(yofs + dsize.height);
2346 short* ialpha = (short*)alpha;
2347 float* beta = alpha + width*ksize;
2348 short* ibeta = ialpha + width*ksize;
2349 float cbuf[MAX_ESIZE];
2351 for( dx = 0; dx < dsize.width; dx++ )
2355 fx = (float)((dx+0.5)*scale_x - 0.5);
2361 sx = cvFloor(dx*scale_x);
2362 fx = (float)((dx+1) - (sx+1)*inv_scale_x);
2363 fx = fx <= 0 ? 0.f : fx - cvFloor(fx);
2373 if( sx + ksize2 >= ssize.width )
2375 xmax = std::min( xmax, dx );
2376 if( sx >= ssize.width-1 )
2377 fx = 0, sx = ssize.width-1;
2380 for( k = 0, sx *= cn; k < cn; k++ )
2381 xofs[dx*cn + k] = sx + k;
2383 if( interpolation == INTER_CUBIC )
2384 interpolateCubic( fx, cbuf );
2385 else if( interpolation == INTER_LANCZOS4 )
2386 interpolateLanczos4( fx, cbuf );
2394 for( k = 0; k < ksize; k++ )
2395 ialpha[dx*cn*ksize + k] = saturate_cast<short>(cbuf[k]*INTER_RESIZE_COEF_SCALE);
2396 for( ; k < cn*ksize; k++ )
2397 ialpha[dx*cn*ksize + k] = ialpha[dx*cn*ksize + k - ksize];
2401 for( k = 0; k < ksize; k++ )
2402 alpha[dx*cn*ksize + k] = cbuf[k];
2403 for( ; k < cn*ksize; k++ )
2404 alpha[dx*cn*ksize + k] = alpha[dx*cn*ksize + k - ksize];
2408 for( dy = 0; dy < dsize.height; dy++ )
2412 fy = (float)((dy+0.5)*scale_y - 0.5);
2418 sy = cvFloor(dy*scale_y);
2419 fy = (float)((dy+1) - (sy+1)*inv_scale_y);
2420 fy = fy <= 0 ? 0.f : fy - cvFloor(fy);
2424 if( interpolation == INTER_CUBIC )
2425 interpolateCubic( fy, cbuf );
2426 else if( interpolation == INTER_LANCZOS4 )
2427 interpolateLanczos4( fy, cbuf );
2436 for( k = 0; k < ksize; k++ )
2437 ibeta[dy*ksize + k] = saturate_cast<short>(cbuf[k]*INTER_RESIZE_COEF_SCALE);
2441 for( k = 0; k < ksize; k++ )
2442 beta[dy*ksize + k] = cbuf[k];
2446 func( src, dst, xofs, fixpt ? (void*)ialpha : (void*)alpha, yofs,
2447 fixpt ? (void*)ibeta : (void*)beta, xmin, xmax, ksize );
2451 /****************************************************************************************\
2452 * General warping (affine, perspective, remap) *
2453 \****************************************************************************************/
2458 template<typename T>
2459 static void remapNearest( const Mat& _src, Mat& _dst, const Mat& _xy,
2460 int borderType, const Scalar& _borderValue )
2462 Size ssize = _src.size(), dsize = _dst.size();
2463 int cn = _src.channels();
2464 const T* S0 = (const T*)_src.data;
2465 size_t sstep = _src.step/sizeof(S0[0]);
2466 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
2467 saturate_cast<T>(_borderValue[1]),
2468 saturate_cast<T>(_borderValue[2]),
2469 saturate_cast<T>(_borderValue[3]));
2472 unsigned width1 = ssize.width, height1 = ssize.height;
2474 if( _dst.isContinuous() && _xy.isContinuous() )
2476 dsize.width *= dsize.height;
2480 for( dy = 0; dy < dsize.height; dy++ )
2482 T* D = (T*)(_dst.data + _dst.step*dy);
2483 const short* XY = (const short*)(_xy.data + _xy.step*dy);
2487 for( dx = 0; dx < dsize.width; dx++ )
2489 int sx = XY[dx*2], sy = XY[dx*2+1];
2490 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
2491 D[dx] = S0[sy*sstep + sx];
2494 if( borderType == BORDER_REPLICATE )
2496 sx = clip(sx, 0, ssize.width);
2497 sy = clip(sy, 0, ssize.height);
2498 D[dx] = S0[sy*sstep + sx];
2500 else if( borderType == BORDER_CONSTANT )
2502 else if( borderType != BORDER_TRANSPARENT )
2504 sx = borderInterpolate(sx, ssize.width, borderType);
2505 sy = borderInterpolate(sy, ssize.height, borderType);
2506 D[dx] = S0[sy*sstep + sx];
2513 for( dx = 0; dx < dsize.width; dx++, D += cn )
2515 int sx = XY[dx*2], sy = XY[dx*2+1], k;
2517 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
2521 S = S0 + sy*sstep + sx*3;
2522 D[0] = S[0], D[1] = S[1], D[2] = S[2];
2526 S = S0 + sy*sstep + sx*4;
2527 D[0] = S[0], D[1] = S[1], D[2] = S[2], D[3] = S[3];
2531 S = S0 + sy*sstep + sx*cn;
2532 for( k = 0; k < cn; k++ )
2536 else if( borderType != BORDER_TRANSPARENT )
2538 if( borderType == BORDER_REPLICATE )
2540 sx = clip(sx, 0, ssize.width);
2541 sy = clip(sy, 0, ssize.height);
2542 S = S0 + sy*sstep + sx*cn;
2544 else if( borderType == BORDER_CONSTANT )
2548 sx = borderInterpolate(sx, ssize.width, borderType);
2549 sy = borderInterpolate(sy, ssize.height, borderType);
2550 S = S0 + sy*sstep + sx*cn;
2552 for( k = 0; k < cn; k++ )
2563 int operator()( const Mat&, void*, const short*, const ushort*,
2564 const void*, int ) const { return 0; }
2571 int operator()( const Mat& _src, void* _dst, const short* XY,
2572 const ushort* FXY, const void* _wtab, int width ) const
2574 int cn = _src.channels(), x = 0, sstep = (int)_src.step;
2576 if( (cn != 1 && cn != 3 && cn != 4) || !checkHardwareSupport(CV_CPU_SSE2) ||
2580 const uchar *S0 = _src.data, *S1 = _src.data + _src.step;
2581 const short* wtab = cn == 1 ? (const short*)_wtab : &BilinearTab_iC4[0][0][0];
2582 uchar* D = (uchar*)_dst;
2583 __m128i delta = _mm_set1_epi32(INTER_REMAP_COEF_SCALE/2);
2584 __m128i xy2ofs = _mm_set1_epi32(cn + (sstep << 16));
2585 __m128i z = _mm_setzero_si128();
2586 int CV_DECL_ALIGNED(16) iofs0[4], iofs1[4];
2590 for( ; x <= width - 8; x += 8 )
2592 __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2));
2593 __m128i xy1 = _mm_loadu_si128( (const __m128i*)(XY + x*2 + 8));
2594 __m128i v0, v1, v2, v3, a0, a1, b0, b1;
2597 xy0 = _mm_madd_epi16( xy0, xy2ofs );
2598 xy1 = _mm_madd_epi16( xy1, xy2ofs );
2599 _mm_store_si128( (__m128i*)iofs0, xy0 );
2600 _mm_store_si128( (__m128i*)iofs1, xy1 );
2602 i0 = *(ushort*)(S0 + iofs0[0]) + (*(ushort*)(S0 + iofs0[1]) << 16);
2603 i1 = *(ushort*)(S0 + iofs0[2]) + (*(ushort*)(S0 + iofs0[3]) << 16);
2604 v0 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2605 i0 = *(ushort*)(S1 + iofs0[0]) + (*(ushort*)(S1 + iofs0[1]) << 16);
2606 i1 = *(ushort*)(S1 + iofs0[2]) + (*(ushort*)(S1 + iofs0[3]) << 16);
2607 v1 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2608 v0 = _mm_unpacklo_epi8(v0, z);
2609 v1 = _mm_unpacklo_epi8(v1, z);
2611 a0 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x]*4)),
2612 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+1]*4)));
2613 a1 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+2]*4)),
2614 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+3]*4)));
2615 b0 = _mm_unpacklo_epi64(a0, a1);
2616 b1 = _mm_unpackhi_epi64(a0, a1);
2617 v0 = _mm_madd_epi16(v0, b0);
2618 v1 = _mm_madd_epi16(v1, b1);
2619 v0 = _mm_add_epi32(_mm_add_epi32(v0, v1), delta);
2621 i0 = *(ushort*)(S0 + iofs1[0]) + (*(ushort*)(S0 + iofs1[1]) << 16);
2622 i1 = *(ushort*)(S0 + iofs1[2]) + (*(ushort*)(S0 + iofs1[3]) << 16);
2623 v2 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2624 i0 = *(ushort*)(S1 + iofs1[0]) + (*(ushort*)(S1 + iofs1[1]) << 16);
2625 i1 = *(ushort*)(S1 + iofs1[2]) + (*(ushort*)(S1 + iofs1[3]) << 16);
2626 v3 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2627 v2 = _mm_unpacklo_epi8(v2, z);
2628 v3 = _mm_unpacklo_epi8(v3, z);
2630 a0 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+4]*4)),
2631 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+5]*4)));
2632 a1 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+6]*4)),
2633 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+7]*4)));
2634 b0 = _mm_unpacklo_epi64(a0, a1);
2635 b1 = _mm_unpackhi_epi64(a0, a1);
2636 v2 = _mm_madd_epi16(v2, b0);
2637 v3 = _mm_madd_epi16(v3, b1);
2638 v2 = _mm_add_epi32(_mm_add_epi32(v2, v3), delta);
2640 v0 = _mm_srai_epi32(v0, INTER_REMAP_COEF_BITS);
2641 v2 = _mm_srai_epi32(v2, INTER_REMAP_COEF_BITS);
2642 v0 = _mm_packus_epi16(_mm_packs_epi32(v0, v2), z);
2643 _mm_storel_epi64( (__m128i*)(D + x), v0 );
2648 for( ; x <= width - 5; x += 4, D += 12 )
2650 __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2));
2651 __m128i u0, v0, u1, v1;
2653 xy0 = _mm_madd_epi16( xy0, xy2ofs );
2654 _mm_store_si128( (__m128i*)iofs0, xy0 );
2655 const __m128i *w0, *w1;
2656 w0 = (const __m128i*)(wtab + FXY[x]*16);
2657 w1 = (const __m128i*)(wtab + FXY[x+1]*16);
2659 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[0])),
2660 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[0] + 3)));
2661 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[0])),
2662 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[0] + 3)));
2663 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[1])),
2664 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[1] + 3)));
2665 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[1])),
2666 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[1] + 3)));
2667 u0 = _mm_unpacklo_epi8(u0, z);
2668 v0 = _mm_unpacklo_epi8(v0, z);
2669 u1 = _mm_unpacklo_epi8(u1, z);
2670 v1 = _mm_unpacklo_epi8(v1, z);
2671 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2672 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2673 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2674 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2675 u0 = _mm_slli_si128(u0, 4);
2676 u0 = _mm_packs_epi32(u0, u1);
2677 u0 = _mm_packus_epi16(u0, u0);
2678 _mm_storel_epi64((__m128i*)D, _mm_srli_si128(u0,1));
2680 w0 = (const __m128i*)(wtab + FXY[x+2]*16);
2681 w1 = (const __m128i*)(wtab + FXY[x+3]*16);
2683 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[2])),
2684 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[2] + 3)));
2685 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[2])),
2686 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[2] + 3)));
2687 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[3])),
2688 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[3] + 3)));
2689 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[3])),
2690 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[3] + 3)));
2691 u0 = _mm_unpacklo_epi8(u0, z);
2692 v0 = _mm_unpacklo_epi8(v0, z);
2693 u1 = _mm_unpacklo_epi8(u1, z);
2694 v1 = _mm_unpacklo_epi8(v1, z);
2695 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2696 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2697 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2698 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2699 u0 = _mm_slli_si128(u0, 4);
2700 u0 = _mm_packs_epi32(u0, u1);
2701 u0 = _mm_packus_epi16(u0, u0);
2702 _mm_storel_epi64((__m128i*)(D + 6), _mm_srli_si128(u0,1));
2707 for( ; x <= width - 4; x += 4, D += 16 )
2709 __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2));
2710 __m128i u0, v0, u1, v1;
2712 xy0 = _mm_madd_epi16( xy0, xy2ofs );
2713 _mm_store_si128( (__m128i*)iofs0, xy0 );
2714 const __m128i *w0, *w1;
2715 w0 = (const __m128i*)(wtab + FXY[x]*16);
2716 w1 = (const __m128i*)(wtab + FXY[x+1]*16);
2718 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[0])),
2719 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[0] + 4)));
2720 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[0])),
2721 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[0] + 4)));
2722 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[1])),
2723 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[1] + 4)));
2724 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[1])),
2725 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[1] + 4)));
2726 u0 = _mm_unpacklo_epi8(u0, z);
2727 v0 = _mm_unpacklo_epi8(v0, z);
2728 u1 = _mm_unpacklo_epi8(u1, z);
2729 v1 = _mm_unpacklo_epi8(v1, z);
2730 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2731 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2732 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2733 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2734 u0 = _mm_packs_epi32(u0, u1);
2735 u0 = _mm_packus_epi16(u0, u0);
2736 _mm_storel_epi64((__m128i*)D, u0);
2738 w0 = (const __m128i*)(wtab + FXY[x+2]*16);
2739 w1 = (const __m128i*)(wtab + FXY[x+3]*16);
2741 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[2])),
2742 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[2] + 4)));
2743 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[2])),
2744 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[2] + 4)));
2745 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[3])),
2746 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[3] + 4)));
2747 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[3])),
2748 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[3] + 4)));
2749 u0 = _mm_unpacklo_epi8(u0, z);
2750 v0 = _mm_unpacklo_epi8(v0, z);
2751 u1 = _mm_unpacklo_epi8(u1, z);
2752 v1 = _mm_unpacklo_epi8(v1, z);
2753 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2754 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2755 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2756 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2757 u0 = _mm_packs_epi32(u0, u1);
2758 u0 = _mm_packus_epi16(u0, u0);
2759 _mm_storel_epi64((__m128i*)(D + 8), u0);
2769 typedef RemapNoVec RemapVec_8u;
2774 template<class CastOp, class VecOp, typename AT>
2775 static void remapBilinear( const Mat& _src, Mat& _dst, const Mat& _xy,
2776 const Mat& _fxy, const void* _wtab,
2777 int borderType, const Scalar& _borderValue )
2779 typedef typename CastOp::rtype T;
2780 typedef typename CastOp::type1 WT;
2781 Size ssize = _src.size(), dsize = _dst.size();
2782 int cn = _src.channels();
2783 const AT* wtab = (const AT*)_wtab;
2784 const T* S0 = (const T*)_src.data;
2785 size_t sstep = _src.step/sizeof(S0[0]);
2786 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
2787 saturate_cast<T>(_borderValue[1]),
2788 saturate_cast<T>(_borderValue[2]),
2789 saturate_cast<T>(_borderValue[3]));
2794 unsigned width1 = std::max(ssize.width-1, 0), height1 = std::max(ssize.height-1, 0);
2795 CV_Assert( cn <= 4 && ssize.area() > 0 );
2797 if( _src.type() == CV_8UC3 )
2798 width1 = std::max(ssize.width-2, 0);
2801 for( dy = 0; dy < dsize.height; dy++ )
2803 T* D = (T*)(_dst.data + _dst.step*dy);
2804 const short* XY = (const short*)(_xy.data + _xy.step*dy);
2805 const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
2807 bool prevInlier = false;
2809 for( dx = 0; dx <= dsize.width; dx++ )
2811 bool curInlier = dx < dsize.width ?
2812 (unsigned)XY[dx*2] < width1 &&
2813 (unsigned)XY[dx*2+1] < height1 : !prevInlier;
2814 if( curInlier == prevInlier )
2820 prevInlier = curInlier;
2824 int len = vecOp( _src, D, XY + dx*2, FXY + dx, wtab, X1 - dx );
2830 for( ; dx < X1; dx++, D++ )
2832 int sx = XY[dx*2], sy = XY[dx*2+1];
2833 const AT* w = wtab + FXY[dx]*4;
2834 const T* S = S0 + sy*sstep + sx;
2835 *D = castOp(WT(S[0]*w[0] + S[1]*w[1] + S[sstep]*w[2] + S[sstep+1]*w[3]));
2839 for( ; dx < X1; dx++, D += 2 )
2841 int sx = XY[dx*2], sy = XY[dx*2+1];
2842 const AT* w = wtab + FXY[dx]*4;
2843 const T* S = S0 + sy*sstep + sx*2;
2844 WT t0 = S[0]*w[0] + S[2]*w[1] + S[sstep]*w[2] + S[sstep+2]*w[3];
2845 WT t1 = S[1]*w[0] + S[3]*w[1] + S[sstep+1]*w[2] + S[sstep+3]*w[3];
2846 D[0] = castOp(t0); D[1] = castOp(t1);
2849 for( ; dx < X1; dx++, D += 3 )
2851 int sx = XY[dx*2], sy = XY[dx*2+1];
2852 const AT* w = wtab + FXY[dx]*4;
2853 const T* S = S0 + sy*sstep + sx*3;
2854 WT t0 = S[0]*w[0] + S[3]*w[1] + S[sstep]*w[2] + S[sstep+3]*w[3];
2855 WT t1 = S[1]*w[0] + S[4]*w[1] + S[sstep+1]*w[2] + S[sstep+4]*w[3];
2856 WT t2 = S[2]*w[0] + S[5]*w[1] + S[sstep+2]*w[2] + S[sstep+5]*w[3];
2857 D[0] = castOp(t0); D[1] = castOp(t1); D[2] = castOp(t2);
2860 for( ; dx < X1; dx++, D += 4 )
2862 int sx = XY[dx*2], sy = XY[dx*2+1];
2863 const AT* w = wtab + FXY[dx]*4;
2864 const T* S = S0 + sy*sstep + sx*4;
2865 WT t0 = S[0]*w[0] + S[4]*w[1] + S[sstep]*w[2] + S[sstep+4]*w[3];
2866 WT t1 = S[1]*w[0] + S[5]*w[1] + S[sstep+1]*w[2] + S[sstep+5]*w[3];
2867 D[0] = castOp(t0); D[1] = castOp(t1);
2868 t0 = S[2]*w[0] + S[6]*w[1] + S[sstep+2]*w[2] + S[sstep+6]*w[3];
2869 t1 = S[3]*w[0] + S[7]*w[1] + S[sstep+3]*w[2] + S[sstep+7]*w[3];
2870 D[2] = castOp(t0); D[3] = castOp(t1);
2875 if( borderType == BORDER_TRANSPARENT && cn != 3 )
2883 for( ; dx < X1; dx++, D++ )
2885 int sx = XY[dx*2], sy = XY[dx*2+1];
2886 if( borderType == BORDER_CONSTANT &&
2887 (sx >= ssize.width || sx+1 < 0 ||
2888 sy >= ssize.height || sy+1 < 0) )
2894 int sx0, sx1, sy0, sy1;
2896 const AT* w = wtab + FXY[dx]*4;
2897 if( borderType == BORDER_REPLICATE )
2899 sx0 = clip(sx, 0, ssize.width);
2900 sx1 = clip(sx+1, 0, ssize.width);
2901 sy0 = clip(sy, 0, ssize.height);
2902 sy1 = clip(sy+1, 0, ssize.height);
2903 v0 = S0[sy0*sstep + sx0];
2904 v1 = S0[sy0*sstep + sx1];
2905 v2 = S0[sy1*sstep + sx0];
2906 v3 = S0[sy1*sstep + sx1];
2910 sx0 = borderInterpolate(sx, ssize.width, borderType);
2911 sx1 = borderInterpolate(sx+1, ssize.width, borderType);
2912 sy0 = borderInterpolate(sy, ssize.height, borderType);
2913 sy1 = borderInterpolate(sy+1, ssize.height, borderType);
2914 v0 = sx0 >= 0 && sy0 >= 0 ? S0[sy0*sstep + sx0] : cval[0];
2915 v1 = sx1 >= 0 && sy0 >= 0 ? S0[sy0*sstep + sx1] : cval[0];
2916 v2 = sx0 >= 0 && sy1 >= 0 ? S0[sy1*sstep + sx0] : cval[0];
2917 v3 = sx1 >= 0 && sy1 >= 0 ? S0[sy1*sstep + sx1] : cval[0];
2919 D[0] = castOp(WT(v0*w[0] + v1*w[1] + v2*w[2] + v3*w[3]));
2923 for( ; dx < X1; dx++, D += cn )
2925 int sx = XY[dx*2], sy = XY[dx*2+1], k;
2926 if( borderType == BORDER_CONSTANT &&
2927 (sx >= ssize.width || sx+1 < 0 ||
2928 sy >= ssize.height || sy+1 < 0) )
2930 for( k = 0; k < cn; k++ )
2935 int sx0, sx1, sy0, sy1;
2936 const T *v0, *v1, *v2, *v3;
2937 const AT* w = wtab + FXY[dx]*4;
2938 if( borderType == BORDER_REPLICATE )
2940 sx0 = clip(sx, 0, ssize.width);
2941 sx1 = clip(sx+1, 0, ssize.width);
2942 sy0 = clip(sy, 0, ssize.height);
2943 sy1 = clip(sy+1, 0, ssize.height);
2944 v0 = S0 + sy0*sstep + sx0*cn;
2945 v1 = S0 + sy0*sstep + sx1*cn;
2946 v2 = S0 + sy1*sstep + sx0*cn;
2947 v3 = S0 + sy1*sstep + sx1*cn;
2949 else if( borderType == BORDER_TRANSPARENT &&
2950 ((unsigned)sx >= (unsigned)(ssize.width-1) ||
2951 (unsigned)sy >= (unsigned)(ssize.height-1)))
2955 sx0 = borderInterpolate(sx, ssize.width, borderType);
2956 sx1 = borderInterpolate(sx+1, ssize.width, borderType);
2957 sy0 = borderInterpolate(sy, ssize.height, borderType);
2958 sy1 = borderInterpolate(sy+1, ssize.height, borderType);
2959 v0 = sx0 >= 0 && sy0 >= 0 ? S0 + sy0*sstep + sx0*cn : &cval[0];
2960 v1 = sx1 >= 0 && sy0 >= 0 ? S0 + sy0*sstep + sx1*cn : &cval[0];
2961 v2 = sx0 >= 0 && sy1 >= 0 ? S0 + sy1*sstep + sx0*cn : &cval[0];
2962 v3 = sx1 >= 0 && sy1 >= 0 ? S0 + sy1*sstep + sx1*cn : &cval[0];
2964 for( k = 0; k < cn; k++ )
2965 D[k] = castOp(WT(v0[k]*w[0] + v1[k]*w[1] + v2[k]*w[2] + v3[k]*w[3]));
2974 template<class CastOp, typename AT, int ONE>
2975 static void remapBicubic( const Mat& _src, Mat& _dst, const Mat& _xy,
2976 const Mat& _fxy, const void* _wtab,
2977 int borderType, const Scalar& _borderValue )
2979 typedef typename CastOp::rtype T;
2980 typedef typename CastOp::type1 WT;
2981 Size ssize = _src.size(), dsize = _dst.size();
2982 int cn = _src.channels();
2983 const AT* wtab = (const AT*)_wtab;
2984 const T* S0 = (const T*)_src.data;
2985 size_t sstep = _src.step/sizeof(S0[0]);
2986 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
2987 saturate_cast<T>(_borderValue[1]),
2988 saturate_cast<T>(_borderValue[2]),
2989 saturate_cast<T>(_borderValue[3]));
2992 int borderType1 = borderType != BORDER_TRANSPARENT ? borderType : BORDER_REFLECT_101;
2994 unsigned width1 = std::max(ssize.width-3, 0), height1 = std::max(ssize.height-3, 0);
2996 if( _dst.isContinuous() && _xy.isContinuous() && _fxy.isContinuous() )
2998 dsize.width *= dsize.height;
3002 for( dy = 0; dy < dsize.height; dy++ )
3004 T* D = (T*)(_dst.data + _dst.step*dy);
3005 const short* XY = (const short*)(_xy.data + _xy.step*dy);
3006 const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
3008 for( dx = 0; dx < dsize.width; dx++, D += cn )
3010 int sx = XY[dx*2]-1, sy = XY[dx*2+1]-1;
3011 const AT* w = wtab + FXY[dx]*16;
3013 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
3015 const T* S = S0 + sy*sstep + sx*cn;
3016 for( k = 0; k < cn; k++ )
3018 WT sum = S[0]*w[0] + S[cn]*w[1] + S[cn*2]*w[2] + S[cn*3]*w[3];
3020 sum += S[0]*w[4] + S[cn]*w[5] + S[cn*2]*w[6] + S[cn*3]*w[7];
3022 sum += S[0]*w[8] + S[cn]*w[9] + S[cn*2]*w[10] + S[cn*3]*w[11];
3024 sum += S[0]*w[12] + S[cn]*w[13] + S[cn*2]*w[14] + S[cn*3]*w[15];
3032 if( borderType == BORDER_TRANSPARENT &&
3033 ((unsigned)(sx+1) >= (unsigned)ssize.width ||
3034 (unsigned)(sy+1) >= (unsigned)ssize.height) )
3037 if( borderType1 == BORDER_CONSTANT &&
3038 (sx >= ssize.width || sx+4 <= 0 ||
3039 sy >= ssize.height || sy+4 <= 0))
3041 for( k = 0; k < cn; k++ )
3046 for( i = 0; i < 4; i++ )
3048 x[i] = borderInterpolate(sx + i, ssize.width, borderType1)*cn;
3049 y[i] = borderInterpolate(sy + i, ssize.height, borderType1);
3052 for( k = 0; k < cn; k++, S0++, w -= 16 )
3054 WT cv = cval[k], sum = cv*ONE;
3055 for( i = 0; i < 4; i++, w += 4 )
3058 const T* S = S0 + yi*sstep;
3062 sum += (S[x[0]] - cv)*w[0];
3064 sum += (S[x[1]] - cv)*w[1];
3066 sum += (S[x[2]] - cv)*w[2];
3068 sum += (S[x[3]] - cv)*w[3];
3079 template<class CastOp, typename AT, int ONE>
3080 static void remapLanczos4( const Mat& _src, Mat& _dst, const Mat& _xy,
3081 const Mat& _fxy, const void* _wtab,
3082 int borderType, const Scalar& _borderValue )
3084 typedef typename CastOp::rtype T;
3085 typedef typename CastOp::type1 WT;
3086 Size ssize = _src.size(), dsize = _dst.size();
3087 int cn = _src.channels();
3088 const AT* wtab = (const AT*)_wtab;
3089 const T* S0 = (const T*)_src.data;
3090 size_t sstep = _src.step/sizeof(S0[0]);
3091 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
3092 saturate_cast<T>(_borderValue[1]),
3093 saturate_cast<T>(_borderValue[2]),
3094 saturate_cast<T>(_borderValue[3]));
3097 int borderType1 = borderType != BORDER_TRANSPARENT ? borderType : BORDER_REFLECT_101;
3099 unsigned width1 = std::max(ssize.width-7, 0), height1 = std::max(ssize.height-7, 0);
3101 if( _dst.isContinuous() && _xy.isContinuous() && _fxy.isContinuous() )
3103 dsize.width *= dsize.height;
3107 for( dy = 0; dy < dsize.height; dy++ )
3109 T* D = (T*)(_dst.data + _dst.step*dy);
3110 const short* XY = (const short*)(_xy.data + _xy.step*dy);
3111 const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
3113 for( dx = 0; dx < dsize.width; dx++, D += cn )
3115 int sx = XY[dx*2]-3, sy = XY[dx*2+1]-3;
3116 const AT* w = wtab + FXY[dx]*64;
3117 const T* S = S0 + sy*sstep + sx*cn;
3119 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
3121 for( k = 0; k < cn; k++ )
3124 for( int r = 0; r < 8; r++, S += sstep, w += 8 )
3125 sum += S[0]*w[0] + S[cn]*w[1] + S[cn*2]*w[2] + S[cn*3]*w[3] +
3126 S[cn*4]*w[4] + S[cn*5]*w[5] + S[cn*6]*w[6] + S[cn*7]*w[7];
3135 if( borderType == BORDER_TRANSPARENT &&
3136 ((unsigned)(sx+3) >= (unsigned)ssize.width ||
3137 (unsigned)(sy+3) >= (unsigned)ssize.height) )
3140 if( borderType1 == BORDER_CONSTANT &&
3141 (sx >= ssize.width || sx+8 <= 0 ||
3142 sy >= ssize.height || sy+8 <= 0))
3144 for( k = 0; k < cn; k++ )
3149 for( i = 0; i < 8; i++ )
3151 x[i] = borderInterpolate(sx + i, ssize.width, borderType1)*cn;
3152 y[i] = borderInterpolate(sy + i, ssize.height, borderType1);
3155 for( k = 0; k < cn; k++, S0++, w -= 64 )
3157 WT cv = cval[k], sum = cv*ONE;
3158 for( i = 0; i < 8; i++, w += 8 )
3161 const T* S1 = S0 + yi*sstep;
3165 sum += (S1[x[0]] - cv)*w[0];
3167 sum += (S1[x[1]] - cv)*w[1];
3169 sum += (S1[x[2]] - cv)*w[2];
3171 sum += (S1[x[3]] - cv)*w[3];
3173 sum += (S1[x[4]] - cv)*w[4];
3175 sum += (S1[x[5]] - cv)*w[5];
3177 sum += (S1[x[6]] - cv)*w[6];
3179 sum += (S1[x[7]] - cv)*w[7];
3190 typedef void (*RemapNNFunc)(const Mat& _src, Mat& _dst, const Mat& _xy,
3191 int borderType, const Scalar& _borderValue );
3193 typedef void (*RemapFunc)(const Mat& _src, Mat& _dst, const Mat& _xy,
3194 const Mat& _fxy, const void* _wtab,
3195 int borderType, const Scalar& _borderValue);
3197 class RemapInvoker :
3198 public ParallelLoopBody
3201 RemapInvoker(const Mat& _src, Mat& _dst, const Mat *_m1,
3202 const Mat *_m2, int _borderType, const Scalar &_borderValue,
3203 int _planar_input, RemapNNFunc _nnfunc, RemapFunc _ifunc, const void *_ctab) :
3204 ParallelLoopBody(), src(&_src), dst(&_dst), m1(_m1), m2(_m2),
3205 borderType(_borderType), borderValue(_borderValue),
3206 planar_input(_planar_input), nnfunc(_nnfunc), ifunc(_ifunc), ctab(_ctab)
3210 virtual void operator() (const Range& range) const
3213 const int buf_size = 1 << 14;
3214 int brows0 = std::min(128, dst->rows), map_depth = m1->depth();
3215 int bcols0 = std::min(buf_size/brows0, dst->cols);
3216 brows0 = std::min(buf_size/bcols0, dst->rows);
3218 bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
3221 Mat _bufxy(brows0, bcols0, CV_16SC2), _bufa;
3223 _bufa.create(brows0, bcols0, CV_16UC1);
3225 for( y = range.start; y < range.end; y += brows0 )
3227 for( x = 0; x < dst->cols; x += bcols0 )
3229 int brows = std::min(brows0, range.end - y);
3230 int bcols = std::min(bcols0, dst->cols - x);
3231 Mat dpart(*dst, Rect(x, y, bcols, brows));
3232 Mat bufxy(_bufxy, Rect(0, 0, bcols, brows));
3236 if( m1->type() == CV_16SC2 && !m2->data ) // the data is already in the right format
3237 bufxy = (*m1)(Rect(x, y, bcols, brows));
3238 else if( map_depth != CV_32F )
3240 for( y1 = 0; y1 < brows; y1++ )
3242 short* XY = (short*)(bufxy.data + bufxy.step*y1);
3243 const short* sXY = (const short*)(m1->data + m1->step*(y+y1)) + x*2;
3244 const ushort* sA = (const ushort*)(m2->data + m2->step*(y+y1)) + x;
3246 for( x1 = 0; x1 < bcols; x1++ )
3248 int a = sA[x1] & (INTER_TAB_SIZE2-1);
3249 XY[x1*2] = sXY[x1*2] + NNDeltaTab_i[a][0];
3250 XY[x1*2+1] = sXY[x1*2+1] + NNDeltaTab_i[a][1];
3254 else if( !planar_input )
3255 (*m1)(Rect(x, y, bcols, brows)).convertTo(bufxy, bufxy.depth());
3258 for( y1 = 0; y1 < brows; y1++ )
3260 short* XY = (short*)(bufxy.data + bufxy.step*y1);
3261 const float* sX = (const float*)(m1->data + m1->step*(y+y1)) + x;
3262 const float* sY = (const float*)(m2->data + m2->step*(y+y1)) + x;
3268 for( ; x1 <= bcols - 8; x1 += 8 )
3270 __m128 fx0 = _mm_loadu_ps(sX + x1);
3271 __m128 fx1 = _mm_loadu_ps(sX + x1 + 4);
3272 __m128 fy0 = _mm_loadu_ps(sY + x1);
3273 __m128 fy1 = _mm_loadu_ps(sY + x1 + 4);
3274 __m128i ix0 = _mm_cvtps_epi32(fx0);
3275 __m128i ix1 = _mm_cvtps_epi32(fx1);
3276 __m128i iy0 = _mm_cvtps_epi32(fy0);
3277 __m128i iy1 = _mm_cvtps_epi32(fy1);
3278 ix0 = _mm_packs_epi32(ix0, ix1);
3279 iy0 = _mm_packs_epi32(iy0, iy1);
3280 ix1 = _mm_unpacklo_epi16(ix0, iy0);
3281 iy1 = _mm_unpackhi_epi16(ix0, iy0);
3282 _mm_storeu_si128((__m128i*)(XY + x1*2), ix1);
3283 _mm_storeu_si128((__m128i*)(XY + x1*2 + 8), iy1);
3288 for( ; x1 < bcols; x1++ )
3290 XY[x1*2] = saturate_cast<short>(sX[x1]);
3291 XY[x1*2+1] = saturate_cast<short>(sY[x1]);
3295 nnfunc( *src, dpart, bufxy, borderType, borderValue );
3299 Mat bufa(_bufa, Rect(0, 0, bcols, brows));
3300 for( y1 = 0; y1 < brows; y1++ )
3302 short* XY = (short*)(bufxy.data + bufxy.step*y1);
3303 ushort* A = (ushort*)(bufa.data + bufa.step*y1);
3305 if( m1->type() == CV_16SC2 && (m2->type() == CV_16UC1 || m2->type() == CV_16SC1) )
3307 bufxy = (*m1)(Rect(x, y, bcols, brows));
3309 const ushort* sA = (const ushort*)(m2->data + m2->step*(y+y1)) + x;
3310 for( x1 = 0; x1 < bcols; x1++ )
3311 A[x1] = (ushort)(sA[x1] & (INTER_TAB_SIZE2-1));
3313 else if( planar_input )
3315 const float* sX = (const float*)(m1->data + m1->step*(y+y1)) + x;
3316 const float* sY = (const float*)(m2->data + m2->step*(y+y1)) + x;
3322 __m128 scale = _mm_set1_ps((float)INTER_TAB_SIZE);
3323 __m128i mask = _mm_set1_epi32(INTER_TAB_SIZE-1);
3324 for( ; x1 <= bcols - 8; x1 += 8 )
3326 __m128 fx0 = _mm_loadu_ps(sX + x1);
3327 __m128 fx1 = _mm_loadu_ps(sX + x1 + 4);
3328 __m128 fy0 = _mm_loadu_ps(sY + x1);
3329 __m128 fy1 = _mm_loadu_ps(sY + x1 + 4);
3330 __m128i ix0 = _mm_cvtps_epi32(_mm_mul_ps(fx0, scale));
3331 __m128i ix1 = _mm_cvtps_epi32(_mm_mul_ps(fx1, scale));
3332 __m128i iy0 = _mm_cvtps_epi32(_mm_mul_ps(fy0, scale));
3333 __m128i iy1 = _mm_cvtps_epi32(_mm_mul_ps(fy1, scale));
3334 __m128i mx0 = _mm_and_si128(ix0, mask);
3335 __m128i mx1 = _mm_and_si128(ix1, mask);
3336 __m128i my0 = _mm_and_si128(iy0, mask);
3337 __m128i my1 = _mm_and_si128(iy1, mask);
3338 mx0 = _mm_packs_epi32(mx0, mx1);
3339 my0 = _mm_packs_epi32(my0, my1);
3340 my0 = _mm_slli_epi16(my0, INTER_BITS);
3341 mx0 = _mm_or_si128(mx0, my0);
3342 _mm_storeu_si128((__m128i*)(A + x1), mx0);
3343 ix0 = _mm_srai_epi32(ix0, INTER_BITS);
3344 ix1 = _mm_srai_epi32(ix1, INTER_BITS);
3345 iy0 = _mm_srai_epi32(iy0, INTER_BITS);
3346 iy1 = _mm_srai_epi32(iy1, INTER_BITS);
3347 ix0 = _mm_packs_epi32(ix0, ix1);
3348 iy0 = _mm_packs_epi32(iy0, iy1);
3349 ix1 = _mm_unpacklo_epi16(ix0, iy0);
3350 iy1 = _mm_unpackhi_epi16(ix0, iy0);
3351 _mm_storeu_si128((__m128i*)(XY + x1*2), ix1);
3352 _mm_storeu_si128((__m128i*)(XY + x1*2 + 8), iy1);
3357 for( ; x1 < bcols; x1++ )
3359 int sx = cvRound(sX[x1]*INTER_TAB_SIZE);
3360 int sy = cvRound(sY[x1]*INTER_TAB_SIZE);
3361 int v = (sy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (sx & (INTER_TAB_SIZE-1));
3362 XY[x1*2] = saturate_cast<short>(sx >> INTER_BITS);
3363 XY[x1*2+1] = saturate_cast<short>(sy >> INTER_BITS);
3369 const float* sXY = (const float*)(m1->data + m1->step*(y+y1)) + x*2;
3371 for( x1 = 0; x1 < bcols; x1++ )
3373 int sx = cvRound(sXY[x1*2]*INTER_TAB_SIZE);
3374 int sy = cvRound(sXY[x1*2+1]*INTER_TAB_SIZE);
3375 int v = (sy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (sx & (INTER_TAB_SIZE-1));
3376 XY[x1*2] = saturate_cast<short>(sx >> INTER_BITS);
3377 XY[x1*2+1] = saturate_cast<short>(sy >> INTER_BITS);
3382 ifunc(*src, dpart, bufxy, bufa, ctab, borderType, borderValue);
3401 static bool ocl_remap(InputArray _src, OutputArray _dst, InputArray _map1, InputArray _map2,
3402 int interpolation, int borderType, const Scalar& borderValue)
3404 int cn = _src.channels(), type = _src.type(), depth = _src.depth();
3406 if (borderType == BORDER_TRANSPARENT || cn == 3 || !(interpolation == INTER_LINEAR || interpolation == INTER_NEAREST)
3407 || _map1.type() == CV_16SC1 || _map2.type() == CV_16SC1)
3410 UMat src = _src.getUMat(), map1 = _map1.getUMat(), map2 = _map2.getUMat();
3412 if( (map1.type() == CV_16SC2 && (map2.type() == CV_16UC1 || map2.empty())) ||
3413 (map2.type() == CV_16SC2 && (map1.type() == CV_16UC1 || map1.empty())) )
3415 if (map1.type() != CV_16SC2)
3416 std::swap(map1, map2);
3419 CV_Assert( map1.type() == CV_32FC2 || (map1.type() == CV_32FC1 && map2.type() == CV_32FC1) );
3421 _dst.create(map1.size(), type);
3422 UMat dst = _dst.getUMat();
3424 String kernelName = "remap";
3425 if (map1.type() == CV_32FC2 && map2.empty())
3426 kernelName += "_32FC2";
3427 else if (map1.type() == CV_16SC2)
3429 kernelName += "_16SC2";
3431 kernelName += "_16UC1";
3433 else if (map1.type() == CV_32FC1 && map2.type() == CV_32FC1)
3434 kernelName += "_2_32FC1";
3436 CV_Error(Error::StsBadArg, "Unsupported map types");
3438 static const char * const interMap[] = { "INTER_NEAREST", "INTER_LINEAR", "INTER_CUBIC", "INTER_LINEAR", "INTER_LANCZOS" };
3439 static const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP",
3440 "BORDER_REFLECT_101", "BORDER_TRANSPARENT" };
3441 String buildOptions = format("-D %s -D %s -D T=%s", interMap[interpolation], borderMap[borderType], ocl::typeToStr(type));
3443 if (interpolation != INTER_NEAREST)
3446 int wdepth = std::max(CV_32F, dst.depth());
3447 buildOptions = buildOptions
3448 + format(" -D WT=%s -D convertToT=%s -D convertToWT=%s"
3449 " -D convertToWT2=%s -D WT2=%s",
3450 ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
3451 ocl::convertTypeStr(wdepth, depth, cn, cvt[0]),
3452 ocl::convertTypeStr(depth, wdepth, cn, cvt[1]),
3453 ocl::convertTypeStr(CV_32S, wdepth, 2, cvt[2]),
3454 ocl::typeToStr(CV_MAKE_TYPE(wdepth, 2)));
3457 ocl::Kernel k(kernelName.c_str(), ocl::imgproc::remap_oclsrc, buildOptions);
3459 Mat scalar(1, 1, type, borderValue);
3460 ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src), dstarg = ocl::KernelArg::WriteOnly(dst),
3461 map1arg = ocl::KernelArg::ReadOnlyNoSize(map1),
3462 scalararg = ocl::KernelArg::Constant((void*)scalar.data, scalar.elemSize());
3465 k.args(srcarg, dstarg, map1arg, scalararg);
3467 k.args(srcarg, dstarg, map1arg, ocl::KernelArg::ReadOnlyNoSize(map2), scalararg);
3469 size_t globalThreads[2] = { dst.cols, dst.rows };
3470 return k.run(2, globalThreads, NULL, false);
3477 void cv::remap( InputArray _src, OutputArray _dst,
3478 InputArray _map1, InputArray _map2,
3479 int interpolation, int borderType, const Scalar& borderValue )
3481 static RemapNNFunc nn_tab[] =
3483 remapNearest<uchar>, remapNearest<schar>, remapNearest<ushort>, remapNearest<short>,
3484 remapNearest<int>, remapNearest<float>, remapNearest<double>, 0
3487 static RemapFunc linear_tab[] =
3489 remapBilinear<FixedPtCast<int, uchar, INTER_REMAP_COEF_BITS>, RemapVec_8u, short>, 0,
3490 remapBilinear<Cast<float, ushort>, RemapNoVec, float>,
3491 remapBilinear<Cast<float, short>, RemapNoVec, float>, 0,
3492 remapBilinear<Cast<float, float>, RemapNoVec, float>,
3493 remapBilinear<Cast<double, double>, RemapNoVec, float>, 0
3496 static RemapFunc cubic_tab[] =
3498 remapBicubic<FixedPtCast<int, uchar, INTER_REMAP_COEF_BITS>, short, INTER_REMAP_COEF_SCALE>, 0,
3499 remapBicubic<Cast<float, ushort>, float, 1>,
3500 remapBicubic<Cast<float, short>, float, 1>, 0,
3501 remapBicubic<Cast<float, float>, float, 1>,
3502 remapBicubic<Cast<double, double>, float, 1>, 0
3505 static RemapFunc lanczos4_tab[] =
3507 remapLanczos4<FixedPtCast<int, uchar, INTER_REMAP_COEF_BITS>, short, INTER_REMAP_COEF_SCALE>, 0,
3508 remapLanczos4<Cast<float, ushort>, float, 1>,
3509 remapLanczos4<Cast<float, short>, float, 1>, 0,
3510 remapLanczos4<Cast<float, float>, float, 1>,
3511 remapLanczos4<Cast<double, double>, float, 1>, 0
3514 CV_Assert( _map1.size().area() > 0 );
3515 CV_Assert( _map2.empty() || (_map2.size() == _map1.size()));
3517 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
3518 ocl_remap(_src, _dst, _map1, _map2, interpolation, borderType, borderValue))
3520 Mat src = _src.getMat(), map1 = _map1.getMat(), map2 = _map2.getMat();
3521 _dst.create( map1.size(), src.type() );
3522 Mat dst = _dst.getMat();
3523 if( dst.data == src.data )
3526 int depth = src.depth();
3527 RemapNNFunc nnfunc = 0;
3528 RemapFunc ifunc = 0;
3529 const void* ctab = 0;
3530 bool fixpt = depth == CV_8U;
3531 bool planar_input = false;
3533 if( interpolation == INTER_NEAREST )
3535 nnfunc = nn_tab[depth];
3536 CV_Assert( nnfunc != 0 );
3540 if( interpolation == INTER_AREA )
3541 interpolation = INTER_LINEAR;
3543 if( interpolation == INTER_LINEAR )
3544 ifunc = linear_tab[depth];
3545 else if( interpolation == INTER_CUBIC )
3546 ifunc = cubic_tab[depth];
3547 else if( interpolation == INTER_LANCZOS4 )
3548 ifunc = lanczos4_tab[depth];
3550 CV_Error( CV_StsBadArg, "Unknown interpolation method" );
3551 CV_Assert( ifunc != 0 );
3552 ctab = initInterTab2D( interpolation, fixpt );
3555 const Mat *m1 = &map1, *m2 = &map2;
3557 if( (map1.type() == CV_16SC2 && (map2.type() == CV_16UC1 || map2.type() == CV_16SC1 || !map2.data)) ||
3558 (map2.type() == CV_16SC2 && (map1.type() == CV_16UC1 || map1.type() == CV_16SC1 || !map1.data)) )
3560 if( map1.type() != CV_16SC2 )
3565 CV_Assert( ((map1.type() == CV_32FC2 || map1.type() == CV_16SC2) && !map2.data) ||
3566 (map1.type() == CV_32FC1 && map2.type() == CV_32FC1) );
3567 planar_input = map1.channels() == 1;
3570 RemapInvoker invoker(src, dst, m1, m2,
3571 borderType, borderValue, planar_input, nnfunc, ifunc,
3573 parallel_for_(Range(0, dst.rows), invoker, dst.total()/(double)(1<<16));
3577 void cv::convertMaps( InputArray _map1, InputArray _map2,
3578 OutputArray _dstmap1, OutputArray _dstmap2,
3579 int dstm1type, bool nninterpolate )
3581 Mat map1 = _map1.getMat(), map2 = _map2.getMat(), dstmap1, dstmap2;
3582 Size size = map1.size();
3583 const Mat *m1 = &map1, *m2 = &map2;
3584 int m1type = m1->type(), m2type = m2->type();
3586 CV_Assert( (m1type == CV_16SC2 && (nninterpolate || m2type == CV_16UC1 || m2type == CV_16SC1)) ||
3587 (m2type == CV_16SC2 && (nninterpolate || m1type == CV_16UC1 || m1type == CV_16SC1)) ||
3588 (m1type == CV_32FC1 && m2type == CV_32FC1) ||
3589 (m1type == CV_32FC2 && !m2->data) );
3591 if( m2type == CV_16SC2 )
3593 std::swap( m1, m2 );
3594 std::swap( m1type, m2type );
3597 if( dstm1type <= 0 )
3598 dstm1type = m1type == CV_16SC2 ? CV_32FC2 : CV_16SC2;
3599 CV_Assert( dstm1type == CV_16SC2 || dstm1type == CV_32FC1 || dstm1type == CV_32FC2 );
3600 _dstmap1.create( size, dstm1type );
3601 dstmap1 = _dstmap1.getMat();
3603 if( !nninterpolate && dstm1type != CV_32FC2 )
3605 _dstmap2.create( size, dstm1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 );
3606 dstmap2 = _dstmap2.getMat();
3611 if( m1type == dstm1type || (nninterpolate &&
3612 ((m1type == CV_16SC2 && dstm1type == CV_32FC2) ||
3613 (m1type == CV_32FC2 && dstm1type == CV_16SC2))) )
3615 m1->convertTo( dstmap1, dstmap1.type() );
3616 if( dstmap2.data && dstmap2.type() == m2->type() )
3617 m2->copyTo( dstmap2 );
3621 if( m1type == CV_32FC1 && dstm1type == CV_32FC2 )
3623 Mat vdata[] = { *m1, *m2 };
3624 merge( vdata, 2, dstmap1 );
3628 if( m1type == CV_32FC2 && dstm1type == CV_32FC1 )
3630 Mat mv[] = { dstmap1, dstmap2 };
3635 if( m1->isContinuous() && (!m2->data || m2->isContinuous()) &&
3636 dstmap1.isContinuous() && (!dstmap2.data || dstmap2.isContinuous()) )
3638 size.width *= size.height;
3642 const float scale = 1.f/INTER_TAB_SIZE;
3644 for( y = 0; y < size.height; y++ )
3646 const float* src1f = (const float*)(m1->data + m1->step*y);
3647 const float* src2f = (const float*)(m2->data + m2->step*y);
3648 const short* src1 = (const short*)src1f;
3649 const ushort* src2 = (const ushort*)src2f;
3651 float* dst1f = (float*)(dstmap1.data + dstmap1.step*y);
3652 float* dst2f = (float*)(dstmap2.data + dstmap2.step*y);
3653 short* dst1 = (short*)dst1f;
3654 ushort* dst2 = (ushort*)dst2f;
3656 if( m1type == CV_32FC1 && dstm1type == CV_16SC2 )
3659 for( x = 0; x < size.width; x++ )
3661 dst1[x*2] = saturate_cast<short>(src1f[x]);
3662 dst1[x*2+1] = saturate_cast<short>(src2f[x]);
3665 for( x = 0; x < size.width; x++ )
3667 int ix = saturate_cast<int>(src1f[x]*INTER_TAB_SIZE);
3668 int iy = saturate_cast<int>(src2f[x]*INTER_TAB_SIZE);
3669 dst1[x*2] = saturate_cast<short>(ix >> INTER_BITS);
3670 dst1[x*2+1] = saturate_cast<short>(iy >> INTER_BITS);
3671 dst2[x] = (ushort)((iy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (ix & (INTER_TAB_SIZE-1)));
3674 else if( m1type == CV_32FC2 && dstm1type == CV_16SC2 )
3677 for( x = 0; x < size.width; x++ )
3679 dst1[x*2] = saturate_cast<short>(src1f[x*2]);
3680 dst1[x*2+1] = saturate_cast<short>(src1f[x*2+1]);
3683 for( x = 0; x < size.width; x++ )
3685 int ix = saturate_cast<int>(src1f[x*2]*INTER_TAB_SIZE);
3686 int iy = saturate_cast<int>(src1f[x*2+1]*INTER_TAB_SIZE);
3687 dst1[x*2] = saturate_cast<short>(ix >> INTER_BITS);
3688 dst1[x*2+1] = saturate_cast<short>(iy >> INTER_BITS);
3689 dst2[x] = (ushort)((iy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (ix & (INTER_TAB_SIZE-1)));
3692 else if( m1type == CV_16SC2 && dstm1type == CV_32FC1 )
3694 for( x = 0; x < size.width; x++ )
3696 int fxy = src2 ? src2[x] & (INTER_TAB_SIZE2-1) : 0;
3697 dst1f[x] = src1[x*2] + (fxy & (INTER_TAB_SIZE-1))*scale;
3698 dst2f[x] = src1[x*2+1] + (fxy >> INTER_BITS)*scale;
3701 else if( m1type == CV_16SC2 && dstm1type == CV_32FC2 )
3703 for( x = 0; x < size.width; x++ )
3705 int fxy = src2 ? src2[x] & (INTER_TAB_SIZE2-1): 0;
3706 dst1f[x*2] = src1[x*2] + (fxy & (INTER_TAB_SIZE-1))*scale;
3707 dst1f[x*2+1] = src1[x*2+1] + (fxy >> INTER_BITS)*scale;
3711 CV_Error( CV_StsNotImplemented, "Unsupported combination of input/output matrices" );
3719 class warpAffineInvoker :
3720 public ParallelLoopBody
3723 warpAffineInvoker(const Mat &_src, Mat &_dst, int _interpolation, int _borderType,
3724 const Scalar &_borderValue, int *_adelta, int *_bdelta, double *_M) :
3725 ParallelLoopBody(), src(_src), dst(_dst), interpolation(_interpolation),
3726 borderType(_borderType), borderValue(_borderValue), adelta(_adelta), bdelta(_bdelta),
3731 virtual void operator() (const Range& range) const
3733 const int BLOCK_SZ = 64;
3734 short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
3735 const int AB_BITS = MAX(10, (int)INTER_BITS);
3736 const int AB_SCALE = 1 << AB_BITS;
3737 int round_delta = interpolation == INTER_NEAREST ? AB_SCALE/2 : AB_SCALE/INTER_TAB_SIZE/2, x, y, x1, y1;
3739 bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
3742 int bh0 = std::min(BLOCK_SZ/2, dst.rows);
3743 int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, dst.cols);
3744 bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, dst.rows);
3746 for( y = range.start; y < range.end; y += bh0 )
3748 for( x = 0; x < dst.cols; x += bw0 )
3750 int bw = std::min( bw0, dst.cols - x);
3751 int bh = std::min( bh0, range.end - y);
3753 Mat _XY(bh, bw, CV_16SC2, XY), matA;
3754 Mat dpart(dst, Rect(x, y, bw, bh));
3756 for( y1 = 0; y1 < bh; y1++ )
3758 short* xy = XY + y1*bw*2;
3759 int X0 = saturate_cast<int>((M[1]*(y + y1) + M[2])*AB_SCALE) + round_delta;
3760 int Y0 = saturate_cast<int>((M[4]*(y + y1) + M[5])*AB_SCALE) + round_delta;
3762 if( interpolation == INTER_NEAREST )
3763 for( x1 = 0; x1 < bw; x1++ )
3765 int X = (X0 + adelta[x+x1]) >> AB_BITS;
3766 int Y = (Y0 + bdelta[x+x1]) >> AB_BITS;
3767 xy[x1*2] = saturate_cast<short>(X);
3768 xy[x1*2+1] = saturate_cast<short>(Y);
3772 short* alpha = A + y1*bw;
3777 __m128i fxy_mask = _mm_set1_epi32(INTER_TAB_SIZE - 1);
3778 __m128i XX = _mm_set1_epi32(X0), YY = _mm_set1_epi32(Y0);
3779 for( ; x1 <= bw - 8; x1 += 8 )
3781 __m128i tx0, tx1, ty0, ty1;
3782 tx0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(adelta + x + x1)), XX);
3783 ty0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(bdelta + x + x1)), YY);
3784 tx1 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(adelta + x + x1 + 4)), XX);
3785 ty1 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(bdelta + x + x1 + 4)), YY);
3787 tx0 = _mm_srai_epi32(tx0, AB_BITS - INTER_BITS);
3788 ty0 = _mm_srai_epi32(ty0, AB_BITS - INTER_BITS);
3789 tx1 = _mm_srai_epi32(tx1, AB_BITS - INTER_BITS);
3790 ty1 = _mm_srai_epi32(ty1, AB_BITS - INTER_BITS);
3792 __m128i fx_ = _mm_packs_epi32(_mm_and_si128(tx0, fxy_mask),
3793 _mm_and_si128(tx1, fxy_mask));
3794 __m128i fy_ = _mm_packs_epi32(_mm_and_si128(ty0, fxy_mask),
3795 _mm_and_si128(ty1, fxy_mask));
3796 tx0 = _mm_packs_epi32(_mm_srai_epi32(tx0, INTER_BITS),
3797 _mm_srai_epi32(tx1, INTER_BITS));
3798 ty0 = _mm_packs_epi32(_mm_srai_epi32(ty0, INTER_BITS),
3799 _mm_srai_epi32(ty1, INTER_BITS));
3800 fx_ = _mm_adds_epi16(fx_, _mm_slli_epi16(fy_, INTER_BITS));
3802 _mm_storeu_si128((__m128i*)(xy + x1*2), _mm_unpacklo_epi16(tx0, ty0));
3803 _mm_storeu_si128((__m128i*)(xy + x1*2 + 8), _mm_unpackhi_epi16(tx0, ty0));
3804 _mm_storeu_si128((__m128i*)(alpha + x1), fx_);
3808 for( ; x1 < bw; x1++ )
3810 int X = (X0 + adelta[x+x1]) >> (AB_BITS - INTER_BITS);
3811 int Y = (Y0 + bdelta[x+x1]) >> (AB_BITS - INTER_BITS);
3812 xy[x1*2] = saturate_cast<short>(X >> INTER_BITS);
3813 xy[x1*2+1] = saturate_cast<short>(Y >> INTER_BITS);
3814 alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
3815 (X & (INTER_TAB_SIZE-1)));
3820 if( interpolation == INTER_NEAREST )
3821 remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
3824 Mat _matA(bh, bw, CV_16U, A);
3825 remap( src, dpart, _XY, _matA, interpolation, borderType, borderValue );
3834 int interpolation, borderType;
3836 int *adelta, *bdelta;
3840 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
3841 class IPPwarpAffineInvoker :
3842 public ParallelLoopBody
3845 IPPwarpAffineInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[2][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpAffineBackFunc _func, bool *_ok) :
3846 ParallelLoopBody(), src(_src), dst(_dst), mode(_interpolation), coeffs(_coeffs), borderType(_borderType), borderValue(_borderValue), func(_func), ok(_ok)
3851 virtual void operator() (const Range& range) const
3853 IppiSize srcsize = { src.cols, src.rows };
3854 IppiRect srcroi = { 0, 0, src.cols, src.rows };
3855 IppiRect dstroi = { 0, range.start, dst.cols, range.end - range.start };
3856 int cnn = src.channels();
3857 if( borderType == BORDER_CONSTANT )
3859 IppiSize setSize = { dst.cols, range.end - range.start };
3860 void *dataPointer = dst.data + dst.step[0] * range.start;
3861 if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) )
3867 if( func( src.data, srcsize, (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, coeffs, mode ) < 0) ////Aug 2013: problem in IPP 7.1, 8.0 : sometimes function return ippStsCoeffErr
3873 double (&coeffs)[2][3];
3877 ippiWarpAffineBackFunc func;
3879 const IPPwarpAffineInvoker& operator= (const IPPwarpAffineInvoker&);
3885 enum { OCL_OP_PERSPECTIVE = 1, OCL_OP_AFFINE = 0 };
3887 static bool ocl_warpTransform(InputArray _src, OutputArray _dst, InputArray _M0,
3888 Size dsize, int flags, int borderType, const Scalar& borderValue,
3891 CV_Assert(op_type == OCL_OP_AFFINE || op_type == OCL_OP_PERSPECTIVE);
3893 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
3894 double doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
3896 int interpolation = flags & INTER_MAX;
3897 if( interpolation == INTER_AREA )
3898 interpolation = INTER_LINEAR;
3900 if ( !(borderType == cv::BORDER_CONSTANT &&
3901 (interpolation == cv::INTER_NEAREST || interpolation == cv::INTER_LINEAR || interpolation == cv::INTER_CUBIC)) ||
3902 (!doubleSupport && depth == CV_64F) || cn > 4)
3905 const char * const interpolationMap[3] = { "NEAREST", "LINEAR", "CUBIC" };
3906 ocl::ProgramSource program = op_type == OCL_OP_AFFINE ?
3907 ocl::imgproc::warp_affine_oclsrc : ocl::imgproc::warp_perspective_oclsrc;
3908 const char * const kernelName = op_type == OCL_OP_AFFINE ? "warpAffine" : "warpPerspective";
3910 int scalarcn = cn == 3 ? 4 : cn;
3911 int wdepth = interpolation == INTER_NEAREST ? depth : std::max(CV_32S, depth);
3912 int sctype = CV_MAKETYPE(wdepth, scalarcn);
3916 if (interpolation == INTER_NEAREST)
3918 opts = format("-D INTER_NEAREST -D T=%s%s -D T1=%s -D ST=%s -D cn=%d", ocl::typeToStr(type),
3919 doubleSupport ? " -D DOUBLE_SUPPORT" : "",
3920 ocl::typeToStr(CV_MAT_DEPTH(type)),
3921 ocl::typeToStr(sctype),
3927 opts = format("-D INTER_%s -D T=%s -D T1=%s -D ST=%s -D WT=%s -D depth=%d -D convertToWT=%s -D convertToT=%s%s -D cn=%d",
3928 interpolationMap[interpolation], ocl::typeToStr(type),
3929 ocl::typeToStr(CV_MAT_DEPTH(type)),
3930 ocl::typeToStr(sctype),
3931 ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)), depth,
3932 ocl::convertTypeStr(depth, wdepth, cn, cvt[0]),
3933 ocl::convertTypeStr(wdepth, depth, cn, cvt[1]),
3934 doubleSupport ? " -D DOUBLE_SUPPORT" : "", cn);
3937 k.create(kernelName, program, opts);
3941 double borderBuf[] = {0, 0, 0, 0};
3942 scalarToRawData(borderValue, borderBuf, sctype);
3944 UMat src = _src.getUMat(), M0;
3945 _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() );
3946 UMat dst = _dst.getUMat();
3949 int matRows = (op_type == OCL_OP_AFFINE ? 2 : 3);
3950 Mat matM(matRows, 3, CV_64F, M), M1 = _M0.getMat();
3951 CV_Assert( (M1.type() == CV_32F || M1.type() == CV_64F) &&
3952 M1.rows == matRows && M1.cols == 3 );
3953 M1.convertTo(matM, matM.type());
3955 if( !(flags & WARP_INVERSE_MAP) )
3957 if (op_type == OCL_OP_PERSPECTIVE)
3961 double D = M[0]*M[4] - M[1]*M[3];
3962 D = D != 0 ? 1./D : 0;
3963 double A11 = M[4]*D, A22=M[0]*D;
3964 M[0] = A11; M[1] *= -D;
3965 M[3] *= -D; M[4] = A22;
3966 double b1 = -M[0]*M[2] - M[1]*M[5];
3967 double b2 = -M[3]*M[2] - M[4]*M[5];
3968 M[2] = b1; M[5] = b2;
3971 matM.convertTo(M0, doubleSupport ? CV_64F : CV_32F);
3973 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst), ocl::KernelArg::PtrReadOnly(M0),
3974 ocl::KernelArg(0, 0, 0, borderBuf, CV_ELEM_SIZE(sctype)));
3976 size_t globalThreads[2] = { dst.cols, dst.rows };
3977 return k.run(2, globalThreads, NULL, false);
3985 void cv::warpAffine( InputArray _src, OutputArray _dst,
3986 InputArray _M0, Size dsize,
3987 int flags, int borderType, const Scalar& borderValue )
3989 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
3990 ocl_warpTransform(_src, _dst, _M0, dsize, flags, borderType,
3991 borderValue, OCL_OP_AFFINE))
3993 Mat src = _src.getMat(), M0 = _M0.getMat();
3994 _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() );
3995 Mat dst = _dst.getMat();
3996 CV_Assert( src.cols > 0 && src.rows > 0 );
3997 if( dst.data == src.data )
4001 Mat matM(2, 3, CV_64F, M);
4002 int interpolation = flags & INTER_MAX;
4003 if( interpolation == INTER_AREA )
4004 interpolation = INTER_LINEAR;
4006 CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 2 && M0.cols == 3 );
4007 M0.convertTo(matM, matM.type());
4009 #ifdef HAVE_TEGRA_OPTIMIZATION
4010 if( tegra::warpAffine(src, dst, M, flags, borderType, borderValue) )
4014 if( !(flags & WARP_INVERSE_MAP) )
4016 double D = M[0]*M[4] - M[1]*M[3];
4017 D = D != 0 ? 1./D : 0;
4018 double A11 = M[4]*D, A22=M[0]*D;
4019 M[0] = A11; M[1] *= -D;
4020 M[3] *= -D; M[4] = A22;
4021 double b1 = -M[0]*M[2] - M[1]*M[5];
4022 double b2 = -M[3]*M[2] - M[4]*M[5];
4023 M[2] = b1; M[5] = b2;
4027 AutoBuffer<int> _abdelta(dst.cols*2);
4028 int* adelta = &_abdelta[0], *bdelta = adelta + dst.cols;
4029 const int AB_BITS = MAX(10, (int)INTER_BITS);
4030 const int AB_SCALE = 1 << AB_BITS;
4032 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
4033 int depth = src.depth();
4034 int channels = src.channels();
4035 if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) &&
4036 ( channels == 1 || channels == 3 || channels == 4 ) &&
4037 ( borderType == cv::BORDER_TRANSPARENT || ( borderType == cv::BORDER_CONSTANT ) ) )
4039 int type = src.type();
4040 ippiWarpAffineBackFunc ippFunc =
4041 type == CV_8UC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C1R :
4042 type == CV_8UC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C3R :
4043 type == CV_8UC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C4R :
4044 type == CV_16UC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C1R :
4045 type == CV_16UC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C3R :
4046 type == CV_16UC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C4R :
4047 type == CV_32FC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C1R :
4048 type == CV_32FC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C3R :
4049 type == CV_32FC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C4R :
4052 flags == INTER_LINEAR ? IPPI_INTER_LINEAR :
4053 flags == INTER_NEAREST ? IPPI_INTER_NN :
4054 flags == INTER_CUBIC ? IPPI_INTER_CUBIC :
4056 if( mode && ippFunc )
4058 double coeffs[2][3];
4059 for( int i = 0; i < 2; i++ )
4061 for( int j = 0; j < 3; j++ )
4063 coeffs[i][j] = matM.at<double>(i, j);
4067 Range range(0, dst.rows);
4068 IPPwarpAffineInvoker invoker(src, dst, coeffs, mode, borderType, borderValue, ippFunc, &ok);
4069 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4076 for( x = 0; x < dst.cols; x++ )
4078 adelta[x] = saturate_cast<int>(M[0]*x*AB_SCALE);
4079 bdelta[x] = saturate_cast<int>(M[3]*x*AB_SCALE);
4082 Range range(0, dst.rows);
4083 warpAffineInvoker invoker(src, dst, interpolation, borderType,
4084 borderValue, adelta, bdelta, M);
4085 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4092 class warpPerspectiveInvoker :
4093 public ParallelLoopBody
4097 warpPerspectiveInvoker(const Mat &_src, Mat &_dst, double *_M, int _interpolation,
4098 int _borderType, const Scalar &_borderValue) :
4099 ParallelLoopBody(), src(_src), dst(_dst), M(_M), interpolation(_interpolation),
4100 borderType(_borderType), borderValue(_borderValue)
4104 virtual void operator() (const Range& range) const
4106 const int BLOCK_SZ = 32;
4107 short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
4108 int x, y, x1, y1, width = dst.cols, height = dst.rows;
4110 int bh0 = std::min(BLOCK_SZ/2, height);
4111 int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width);
4112 bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height);
4114 for( y = range.start; y < range.end; y += bh0 )
4116 for( x = 0; x < width; x += bw0 )
4118 int bw = std::min( bw0, width - x);
4119 int bh = std::min( bh0, range.end - y); // height
4121 Mat _XY(bh, bw, CV_16SC2, XY), matA;
4122 Mat dpart(dst, Rect(x, y, bw, bh));
4124 for( y1 = 0; y1 < bh; y1++ )
4126 short* xy = XY + y1*bw*2;
4127 double X0 = M[0]*x + M[1]*(y + y1) + M[2];
4128 double Y0 = M[3]*x + M[4]*(y + y1) + M[5];
4129 double W0 = M[6]*x + M[7]*(y + y1) + M[8];
4131 if( interpolation == INTER_NEAREST )
4132 for( x1 = 0; x1 < bw; x1++ )
4134 double W = W0 + M[6]*x1;
4136 double fX = std::max((double)INT_MIN, std::min((double)INT_MAX, (X0 + M[0]*x1)*W));
4137 double fY = std::max((double)INT_MIN, std::min((double)INT_MAX, (Y0 + M[3]*x1)*W));
4138 int X = saturate_cast<int>(fX);
4139 int Y = saturate_cast<int>(fY);
4141 xy[x1*2] = saturate_cast<short>(X);
4142 xy[x1*2+1] = saturate_cast<short>(Y);
4146 short* alpha = A + y1*bw;
4147 for( x1 = 0; x1 < bw; x1++ )
4149 double W = W0 + M[6]*x1;
4150 W = W ? INTER_TAB_SIZE/W : 0;
4151 double fX = std::max((double)INT_MIN, std::min((double)INT_MAX, (X0 + M[0]*x1)*W));
4152 double fY = std::max((double)INT_MIN, std::min((double)INT_MAX, (Y0 + M[3]*x1)*W));
4153 int X = saturate_cast<int>(fX);
4154 int Y = saturate_cast<int>(fY);
4156 xy[x1*2] = saturate_cast<short>(X >> INTER_BITS);
4157 xy[x1*2+1] = saturate_cast<short>(Y >> INTER_BITS);
4158 alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
4159 (X & (INTER_TAB_SIZE-1)));
4164 if( interpolation == INTER_NEAREST )
4165 remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
4168 Mat _matA(bh, bw, CV_16U, A);
4169 remap( src, dpart, _XY, _matA, interpolation, borderType, borderValue );
4179 int interpolation, borderType;
4183 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
4184 class IPPwarpPerspectiveInvoker :
4185 public ParallelLoopBody
4188 IPPwarpPerspectiveInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[3][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpPerspectiveBackFunc _func, bool *_ok) :
4189 ParallelLoopBody(), src(_src), dst(_dst), mode(_interpolation), coeffs(_coeffs), borderType(_borderType), borderValue(_borderValue), func(_func), ok(_ok)
4194 virtual void operator() (const Range& range) const
4196 IppiSize srcsize = {src.cols, src.rows};
4197 IppiRect srcroi = {0, 0, src.cols, src.rows};
4198 IppiRect dstroi = {0, range.start, dst.cols, range.end - range.start};
4199 int cnn = src.channels();
4201 if( borderType == BORDER_CONSTANT )
4203 IppiSize setSize = {dst.cols, range.end - range.start};
4204 void *dataPointer = dst.data + dst.step[0] * range.start;
4205 if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) )
4211 if( func(src.data, srcsize, (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, coeffs, mode) < 0)
4217 double (&coeffs)[3][3];
4220 const Scalar borderValue;
4221 ippiWarpPerspectiveBackFunc func;
4223 const IPPwarpPerspectiveInvoker& operator= (const IPPwarpPerspectiveInvoker&);
4229 void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0,
4230 Size dsize, int flags, int borderType, const Scalar& borderValue )
4232 CV_Assert( _src.total() > 0 );
4234 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
4235 ocl_warpTransform(_src, _dst, _M0, dsize, flags, borderType, borderValue,
4236 OCL_OP_PERSPECTIVE))
4238 Mat src = _src.getMat(), M0 = _M0.getMat();
4239 _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() );
4240 Mat dst = _dst.getMat();
4242 if( dst.data == src.data )
4246 Mat matM(3, 3, CV_64F, M);
4247 int interpolation = flags & INTER_MAX;
4248 if( interpolation == INTER_AREA )
4249 interpolation = INTER_LINEAR;
4251 CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
4252 M0.convertTo(matM, matM.type());
4254 #ifdef HAVE_TEGRA_OPTIMIZATION
4255 if( tegra::warpPerspective(src, dst, M, flags, borderType, borderValue) )
4259 if( !(flags & WARP_INVERSE_MAP) )
4262 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
4263 int depth = src.depth();
4264 int channels = src.channels();
4265 if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) &&
4266 ( channels == 1 || channels == 3 || channels == 4 ) &&
4267 ( borderType == cv::BORDER_TRANSPARENT || borderType == cv::BORDER_CONSTANT ) )
4269 int type = src.type();
4270 ippiWarpPerspectiveBackFunc ippFunc =
4271 type == CV_8UC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C1R :
4272 type == CV_8UC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C3R :
4273 type == CV_8UC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C4R :
4274 type == CV_16UC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C1R :
4275 type == CV_16UC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C3R :
4276 type == CV_16UC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C4R :
4277 type == CV_32FC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C1R :
4278 type == CV_32FC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C3R :
4279 type == CV_32FC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C4R :
4282 flags == INTER_LINEAR ? IPPI_INTER_LINEAR :
4283 flags == INTER_NEAREST ? IPPI_INTER_NN :
4284 flags == INTER_CUBIC ? IPPI_INTER_CUBIC :
4286 if( mode && ippFunc )
4288 double coeffs[3][3];
4289 for( int i = 0; i < 3; i++ )
4291 for( int j = 0; j < 3; j++ )
4293 coeffs[i][j] = matM.at<double>(i, j);
4297 Range range(0, dst.rows);
4298 IPPwarpPerspectiveInvoker invoker(src, dst, coeffs, mode, borderType, borderValue, ippFunc, &ok);
4299 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4306 Range range(0, dst.rows);
4307 warpPerspectiveInvoker invoker(src, dst, M, interpolation, borderType, borderValue);
4308 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4312 cv::Mat cv::getRotationMatrix2D( Point2f center, double angle, double scale )
4315 double alpha = cos(angle)*scale;
4316 double beta = sin(angle)*scale;
4318 Mat M(2, 3, CV_64F);
4319 double* m = (double*)M.data;
4323 m[2] = (1-alpha)*center.x - beta*center.y;
4326 m[5] = beta*center.x + (1-alpha)*center.y;
4331 /* Calculates coefficients of perspective transformation
4332 * which maps (xi,yi) to (ui,vi), (i=1,2,3,4):
4334 * c00*xi + c01*yi + c02
4335 * ui = ---------------------
4336 * c20*xi + c21*yi + c22
4338 * c10*xi + c11*yi + c12
4339 * vi = ---------------------
4340 * c20*xi + c21*yi + c22
4342 * Coefficients are calculated by solving linear system:
4343 * / x0 y0 1 0 0 0 -x0*u0 -y0*u0 \ /c00\ /u0\
4344 * | x1 y1 1 0 0 0 -x1*u1 -y1*u1 | |c01| |u1|
4345 * | x2 y2 1 0 0 0 -x2*u2 -y2*u2 | |c02| |u2|
4346 * | x3 y3 1 0 0 0 -x3*u3 -y3*u3 |.|c10|=|u3|,
4347 * | 0 0 0 x0 y0 1 -x0*v0 -y0*v0 | |c11| |v0|
4348 * | 0 0 0 x1 y1 1 -x1*v1 -y1*v1 | |c12| |v1|
4349 * | 0 0 0 x2 y2 1 -x2*v2 -y2*v2 | |c20| |v2|
4350 * \ 0 0 0 x3 y3 1 -x3*v3 -y3*v3 / \c21/ \v3/
4353 * cij - matrix coefficients, c22 = 1
4355 cv::Mat cv::getPerspectiveTransform( const Point2f src[], const Point2f dst[] )
4357 Mat M(3, 3, CV_64F), X(8, 1, CV_64F, M.data);
4358 double a[8][8], b[8];
4359 Mat A(8, 8, CV_64F, a), B(8, 1, CV_64F, b);
4361 for( int i = 0; i < 4; ++i )
4363 a[i][0] = a[i+4][3] = src[i].x;
4364 a[i][1] = a[i+4][4] = src[i].y;
4365 a[i][2] = a[i+4][5] = 1;
4366 a[i][3] = a[i][4] = a[i][5] =
4367 a[i+4][0] = a[i+4][1] = a[i+4][2] = 0;
4368 a[i][6] = -src[i].x*dst[i].x;
4369 a[i][7] = -src[i].y*dst[i].x;
4370 a[i+4][6] = -src[i].x*dst[i].y;
4371 a[i+4][7] = -src[i].y*dst[i].y;
4376 solve( A, B, X, DECOMP_SVD );
4377 ((double*)M.data)[8] = 1.;
4382 /* Calculates coefficients of affine transformation
4383 * which maps (xi,yi) to (ui,vi), (i=1,2,3):
4385 * ui = c00*xi + c01*yi + c02
4387 * vi = c10*xi + c11*yi + c12
4389 * Coefficients are calculated by solving linear system:
4390 * / x0 y0 1 0 0 0 \ /c00\ /u0\
4391 * | x1 y1 1 0 0 0 | |c01| |u1|
4392 * | x2 y2 1 0 0 0 | |c02| |u2|
4393 * | 0 0 0 x0 y0 1 | |c10| |v0|
4394 * | 0 0 0 x1 y1 1 | |c11| |v1|
4395 * \ 0 0 0 x2 y2 1 / |c12| |v2|
4398 * cij - matrix coefficients
4401 cv::Mat cv::getAffineTransform( const Point2f src[], const Point2f dst[] )
4403 Mat M(2, 3, CV_64F), X(6, 1, CV_64F, M.data);
4404 double a[6*6], b[6];
4405 Mat A(6, 6, CV_64F, a), B(6, 1, CV_64F, b);
4407 for( int i = 0; i < 3; i++ )
4411 a[j] = a[k+3] = src[i].x;
4412 a[j+1] = a[k+4] = src[i].y;
4413 a[j+2] = a[k+5] = 1;
4414 a[j+3] = a[j+4] = a[j+5] = 0;
4415 a[k] = a[k+1] = a[k+2] = 0;
4417 b[i*2+1] = dst[i].y;
4424 void cv::invertAffineTransform(InputArray _matM, OutputArray __iM)
4426 Mat matM = _matM.getMat();
4427 CV_Assert(matM.rows == 2 && matM.cols == 3);
4428 __iM.create(2, 3, matM.type());
4429 Mat _iM = __iM.getMat();
4431 if( matM.type() == CV_32F )
4433 const float* M = (const float*)matM.data;
4434 float* iM = (float*)_iM.data;
4435 int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0]));
4437 double D = M[0]*M[step+1] - M[1]*M[step];
4438 D = D != 0 ? 1./D : 0;
4439 double A11 = M[step+1]*D, A22 = M[0]*D, A12 = -M[1]*D, A21 = -M[step]*D;
4440 double b1 = -A11*M[2] - A12*M[step+2];
4441 double b2 = -A21*M[2] - A22*M[step+2];
4443 iM[0] = (float)A11; iM[1] = (float)A12; iM[2] = (float)b1;
4444 iM[istep] = (float)A21; iM[istep+1] = (float)A22; iM[istep+2] = (float)b2;
4446 else if( matM.type() == CV_64F )
4448 const double* M = (const double*)matM.data;
4449 double* iM = (double*)_iM.data;
4450 int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0]));
4452 double D = M[0]*M[step+1] - M[1]*M[step];
4453 D = D != 0 ? 1./D : 0;
4454 double A11 = M[step+1]*D, A22 = M[0]*D, A12 = -M[1]*D, A21 = -M[step]*D;
4455 double b1 = -A11*M[2] - A12*M[step+2];
4456 double b2 = -A21*M[2] - A22*M[step+2];
4458 iM[0] = A11; iM[1] = A12; iM[2] = b1;
4459 iM[istep] = A21; iM[istep+1] = A22; iM[istep+2] = b2;
4462 CV_Error( CV_StsUnsupportedFormat, "" );
4465 cv::Mat cv::getPerspectiveTransform(InputArray _src, InputArray _dst)
4467 Mat src = _src.getMat(), dst = _dst.getMat();
4468 CV_Assert(src.checkVector(2, CV_32F) == 4 && dst.checkVector(2, CV_32F) == 4);
4469 return getPerspectiveTransform((const Point2f*)src.data, (const Point2f*)dst.data);
4472 cv::Mat cv::getAffineTransform(InputArray _src, InputArray _dst)
4474 Mat src = _src.getMat(), dst = _dst.getMat();
4475 CV_Assert(src.checkVector(2, CV_32F) == 3 && dst.checkVector(2, CV_32F) == 3);
4476 return getAffineTransform((const Point2f*)src.data, (const Point2f*)dst.data);
4480 cvResize( const CvArr* srcarr, CvArr* dstarr, int method )
4482 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
4483 CV_Assert( src.type() == dst.type() );
4484 cv::resize( src, dst, dst.size(), (double)dst.cols/src.cols,
4485 (double)dst.rows/src.rows, method );
4490 cvWarpAffine( const CvArr* srcarr, CvArr* dstarr, const CvMat* marr,
4491 int flags, CvScalar fillval )
4493 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
4494 cv::Mat matrix = cv::cvarrToMat(marr);
4495 CV_Assert( src.type() == dst.type() );
4496 cv::warpAffine( src, dst, matrix, dst.size(), flags,
4497 (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT,
4502 cvWarpPerspective( const CvArr* srcarr, CvArr* dstarr, const CvMat* marr,
4503 int flags, CvScalar fillval )
4505 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
4506 cv::Mat matrix = cv::cvarrToMat(marr);
4507 CV_Assert( src.type() == dst.type() );
4508 cv::warpPerspective( src, dst, matrix, dst.size(), flags,
4509 (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT,
4514 cvRemap( const CvArr* srcarr, CvArr* dstarr,
4515 const CvArr* _mapx, const CvArr* _mapy,
4516 int flags, CvScalar fillval )
4518 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst;
4519 cv::Mat mapx = cv::cvarrToMat(_mapx), mapy = cv::cvarrToMat(_mapy);
4520 CV_Assert( src.type() == dst.type() && dst.size() == mapx.size() );
4521 cv::remap( src, dst, mapx, mapy, flags & cv::INTER_MAX,
4522 (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT,
4524 CV_Assert( dst0.data == dst.data );
4529 cv2DRotationMatrix( CvPoint2D32f center, double angle,
4530 double scale, CvMat* matrix )
4532 cv::Mat M0 = cv::cvarrToMat(matrix), M = cv::getRotationMatrix2D(center, angle, scale);
4533 CV_Assert( M.size() == M0.size() );
4534 M.convertTo(M0, M0.type());
4540 cvGetPerspectiveTransform( const CvPoint2D32f* src,
4541 const CvPoint2D32f* dst,
4544 cv::Mat M0 = cv::cvarrToMat(matrix),
4545 M = cv::getPerspectiveTransform((const cv::Point2f*)src, (const cv::Point2f*)dst);
4546 CV_Assert( M.size() == M0.size() );
4547 M.convertTo(M0, M0.type());
4553 cvGetAffineTransform( const CvPoint2D32f* src,
4554 const CvPoint2D32f* dst,
4557 cv::Mat M0 = cv::cvarrToMat(matrix),
4558 M = cv::getAffineTransform((const cv::Point2f*)src, (const cv::Point2f*)dst);
4559 CV_Assert( M.size() == M0.size() );
4560 M.convertTo(M0, M0.type());
4566 cvConvertMaps( const CvArr* arr1, const CvArr* arr2, CvArr* dstarr1, CvArr* dstarr2 )
4568 cv::Mat map1 = cv::cvarrToMat(arr1), map2;
4569 cv::Mat dstmap1 = cv::cvarrToMat(dstarr1), dstmap2;
4572 map2 = cv::cvarrToMat(arr2);
4575 dstmap2 = cv::cvarrToMat(dstarr2);
4576 if( dstmap2.type() == CV_16SC1 )
4577 dstmap2 = cv::Mat(dstmap2.size(), CV_16UC1, dstmap2.data, dstmap2.step);
4580 cv::convertMaps( map1, map2, dstmap1, dstmap2, dstmap1.type(), false );
4583 /****************************************************************************************\
4584 * Log-Polar Transform *
4585 \****************************************************************************************/
4587 /* now it is done via Remap; more correct implementation should use
4588 some super-sampling technique outside of the "fovea" circle */
4590 cvLogPolar( const CvArr* srcarr, CvArr* dstarr,
4591 CvPoint2D32f center, double M, int flags )
4593 cv::Ptr<CvMat> mapx, mapy;
4595 CvMat srcstub, *src = cvGetMat(srcarr, &srcstub);
4596 CvMat dststub, *dst = cvGetMat(dstarr, &dststub);
4597 CvSize ssize, dsize;
4599 if( !CV_ARE_TYPES_EQ( src, dst ))
4600 CV_Error( CV_StsUnmatchedFormats, "" );
4603 CV_Error( CV_StsOutOfRange, "M should be >0" );
4605 ssize = cvGetMatSize(src);
4606 dsize = cvGetMatSize(dst);
4608 mapx.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4609 mapy.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4611 if( !(flags & CV_WARP_INVERSE_MAP) )
4614 cv::AutoBuffer<double> _exp_tab(dsize.width);
4615 double* exp_tab = _exp_tab;
4617 for( rho = 0; rho < dst->width; rho++ )
4618 exp_tab[rho] = std::exp(rho/M);
4620 for( phi = 0; phi < dsize.height; phi++ )
4622 double cp = cos(phi*2*CV_PI/dsize.height);
4623 double sp = sin(phi*2*CV_PI/dsize.height);
4624 float* mx = (float*)(mapx->data.ptr + phi*mapx->step);
4625 float* my = (float*)(mapy->data.ptr + phi*mapy->step);
4627 for( rho = 0; rho < dsize.width; rho++ )
4629 double r = exp_tab[rho];
4630 double x = r*cp + center.x;
4631 double y = r*sp + center.y;
4641 CvMat bufx, bufy, bufp, bufa;
4642 double ascale = ssize.height/(2*CV_PI);
4643 cv::AutoBuffer<float> _buf(4*dsize.width);
4646 bufx = cvMat( 1, dsize.width, CV_32F, buf );
4647 bufy = cvMat( 1, dsize.width, CV_32F, buf + dsize.width );
4648 bufp = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*2 );
4649 bufa = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*3 );
4651 for( x = 0; x < dsize.width; x++ )
4652 bufx.data.fl[x] = (float)x - center.x;
4654 for( y = 0; y < dsize.height; y++ )
4656 float* mx = (float*)(mapx->data.ptr + y*mapx->step);
4657 float* my = (float*)(mapy->data.ptr + y*mapy->step);
4659 for( x = 0; x < dsize.width; x++ )
4660 bufy.data.fl[x] = (float)y - center.y;
4663 cvCartToPolar( &bufx, &bufy, &bufp, &bufa );
4665 for( x = 0; x < dsize.width; x++ )
4666 bufp.data.fl[x] += 1.f;
4668 cvLog( &bufp, &bufp );
4670 for( x = 0; x < dsize.width; x++ )
4672 double rho = bufp.data.fl[x]*M;
4673 double phi = bufa.data.fl[x]*ascale;
4679 for( x = 0; x < dsize.width; x++ )
4681 double xx = bufx.data.fl[x];
4682 double yy = bufy.data.fl[x];
4684 double p = log(std::sqrt(xx*xx + yy*yy) + 1.)*M;
4685 double a = atan2(yy,xx);
4697 cvRemap( src, dst, mapx, mapy, flags, cvScalarAll(0) );
4700 void cv::logPolar( InputArray _src, OutputArray _dst,
4701 Point2f center, double M, int flags )
4703 Mat src = _src.getMat();
4704 _dst.create( src.size(), src.type() );
4705 CvMat c_src = src, c_dst = _dst.getMat();
4706 cvLogPolar( &c_src, &c_dst, center, M, flags );
4709 /****************************************************************************************
4710 Linear-Polar Transform
4711 J.L. Blanco, Apr 2009
4712 ****************************************************************************************/
4714 void cvLinearPolar( const CvArr* srcarr, CvArr* dstarr,
4715 CvPoint2D32f center, double maxRadius, int flags )
4717 cv::Ptr<CvMat> mapx, mapy;
4719 CvMat srcstub, *src = (CvMat*)srcarr;
4720 CvMat dststub, *dst = (CvMat*)dstarr;
4721 CvSize ssize, dsize;
4723 src = cvGetMat( srcarr, &srcstub,0,0 );
4724 dst = cvGetMat( dstarr, &dststub,0,0 );
4726 if( !CV_ARE_TYPES_EQ( src, dst ))
4727 CV_Error( CV_StsUnmatchedFormats, "" );
4729 ssize.width = src->cols;
4730 ssize.height = src->rows;
4731 dsize.width = dst->cols;
4732 dsize.height = dst->rows;
4734 mapx.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4735 mapy.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4737 if( !(flags & CV_WARP_INVERSE_MAP) )
4741 for( phi = 0; phi < dsize.height; phi++ )
4743 double cp = cos(phi*2*CV_PI/dsize.height);
4744 double sp = sin(phi*2*CV_PI/dsize.height);
4745 float* mx = (float*)(mapx->data.ptr + phi*mapx->step);
4746 float* my = (float*)(mapy->data.ptr + phi*mapy->step);
4748 for( rho = 0; rho < dsize.width; rho++ )
4750 double r = maxRadius*(rho+1)/dsize.width;
4751 double x = r*cp + center.x;
4752 double y = r*sp + center.y;
4762 CvMat bufx, bufy, bufp, bufa;
4763 const double ascale = ssize.height/(2*CV_PI);
4764 const double pscale = ssize.width/maxRadius;
4766 cv::AutoBuffer<float> _buf(4*dsize.width);
4769 bufx = cvMat( 1, dsize.width, CV_32F, buf );
4770 bufy = cvMat( 1, dsize.width, CV_32F, buf + dsize.width );
4771 bufp = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*2 );
4772 bufa = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*3 );
4774 for( x = 0; x < dsize.width; x++ )
4775 bufx.data.fl[x] = (float)x - center.x;
4777 for( y = 0; y < dsize.height; y++ )
4779 float* mx = (float*)(mapx->data.ptr + y*mapx->step);
4780 float* my = (float*)(mapy->data.ptr + y*mapy->step);
4782 for( x = 0; x < dsize.width; x++ )
4783 bufy.data.fl[x] = (float)y - center.y;
4785 cvCartToPolar( &bufx, &bufy, &bufp, &bufa, 0 );
4787 for( x = 0; x < dsize.width; x++ )
4788 bufp.data.fl[x] += 1.f;
4790 for( x = 0; x < dsize.width; x++ )
4792 double rho = bufp.data.fl[x]*pscale;
4793 double phi = bufa.data.fl[x]*ascale;
4800 cvRemap( src, dst, mapx, mapy, flags, cvScalarAll(0) );
4803 void cv::linearPolar( InputArray _src, OutputArray _dst,
4804 Point2f center, double maxRadius, int flags )
4806 Mat src = _src.getMat();
4807 _dst.create( src.size(), src.type() );
4808 CvMat c_src = src, c_dst = _dst.getMat();
4809 cvLinearPolar( &c_src, &c_dst, center, maxRadius, flags );