1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 /* ////////////////////////////////////////////////////////////////////
45 // Geometrical transforms on images and matrices: rotation, zoom etc.
49 #include "precomp.hpp"
50 #include "opencl_kernels.hpp"
52 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
53 static IppStatus sts = ippInit();
59 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
60 typedef IppStatus (CV_STDCALL* ippiSetFunc)(const void*, void *, int, IppiSize);
61 typedef IppStatus (CV_STDCALL* ippiWarpPerspectiveBackFunc)(const void*, IppiSize, int, IppiRect, void *, int, IppiRect, double [3][3], int);
62 typedef IppStatus (CV_STDCALL* ippiWarpAffineBackFunc)(const void*, IppiSize, int, IppiRect, void *, int, IppiRect, double [2][3], int);
63 typedef IppStatus (CV_STDCALL* ippiResizeSqrPixelFunc)(const void*, IppiSize, int, IppiRect, void*, int, IppiRect, double, double, double, double, int, Ipp8u *);
65 template <int channels, typename Type>
66 bool IPPSetSimple(cv::Scalar value, void *dataPointer, int step, IppiSize &size, ippiSetFunc func)
68 Type values[channels];
69 for( int i = 0; i < channels; i++ )
70 values[i] = (Type)value[i];
71 return func(values, dataPointer, step, size) >= 0;
74 bool IPPSet(const cv::Scalar &value, void *dataPointer, int step, IppiSize &size, int channels, int depth)
81 return ippiSet_8u_C1R((Ipp8u)value[0], (Ipp8u *)dataPointer, step, size) >= 0;
83 return ippiSet_16u_C1R((Ipp16u)value[0], (Ipp16u *)dataPointer, step, size) >= 0;
85 return ippiSet_32f_C1R((Ipp32f)value[0], (Ipp32f *)dataPointer, step, size) >= 0;
95 return IPPSetSimple<3, Ipp8u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_8u_C3R);
97 return IPPSetSimple<3, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C3R);
99 return IPPSetSimple<3, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C3R);
102 else if( channels == 4 )
107 return IPPSetSimple<4, Ipp8u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_8u_C4R);
109 return IPPSetSimple<4, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C4R);
111 return IPPSetSimple<4, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C4R);
119 /************** interpolation formulas and tables ***************/
121 const int INTER_RESIZE_COEF_BITS=11;
122 const int INTER_RESIZE_COEF_SCALE=1 << INTER_RESIZE_COEF_BITS;
124 const int INTER_REMAP_COEF_BITS=15;
125 const int INTER_REMAP_COEF_SCALE=1 << INTER_REMAP_COEF_BITS;
127 static uchar NNDeltaTab_i[INTER_TAB_SIZE2][2];
129 static float BilinearTab_f[INTER_TAB_SIZE2][2][2];
130 static short BilinearTab_i[INTER_TAB_SIZE2][2][2];
133 static short BilinearTab_iC4_buf[INTER_TAB_SIZE2+2][2][8];
134 static short (*BilinearTab_iC4)[2][8] = (short (*)[2][8])alignPtr(BilinearTab_iC4_buf, 16);
137 static float BicubicTab_f[INTER_TAB_SIZE2][4][4];
138 static short BicubicTab_i[INTER_TAB_SIZE2][4][4];
140 static float Lanczos4Tab_f[INTER_TAB_SIZE2][8][8];
141 static short Lanczos4Tab_i[INTER_TAB_SIZE2][8][8];
143 static inline void interpolateLinear( float x, float* coeffs )
149 static inline void interpolateCubic( float x, float* coeffs )
151 const float A = -0.75f;
153 coeffs[0] = ((A*(x + 1) - 5*A)*(x + 1) + 8*A)*(x + 1) - 4*A;
154 coeffs[1] = ((A + 2)*x - (A + 3))*x*x + 1;
155 coeffs[2] = ((A + 2)*(1 - x) - (A + 3))*(1 - x)*(1 - x) + 1;
156 coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2];
159 static inline void interpolateLanczos4( float x, float* coeffs )
161 static const double s45 = 0.70710678118654752440084436210485;
162 static const double cs[][2]=
163 {{1, 0}, {-s45, -s45}, {0, 1}, {s45, -s45}, {-1, 0}, {s45, s45}, {0, -1}, {-s45, s45}};
165 if( x < FLT_EPSILON )
167 for( int i = 0; i < 8; i++ )
174 double y0=-(x+3)*CV_PI*0.25, s0 = sin(y0), c0=cos(y0);
175 for(int i = 0; i < 8; i++ )
177 double y = -(x+3-i)*CV_PI*0.25;
178 coeffs[i] = (float)((cs[i][0]*s0 + cs[i][1]*c0)/(y*y));
183 for(int i = 0; i < 8; i++ )
187 static void initInterTab1D(int method, float* tab, int tabsz)
189 float scale = 1.f/tabsz;
190 if( method == INTER_LINEAR )
192 for( int i = 0; i < tabsz; i++, tab += 2 )
193 interpolateLinear( i*scale, tab );
195 else if( method == INTER_CUBIC )
197 for( int i = 0; i < tabsz; i++, tab += 4 )
198 interpolateCubic( i*scale, tab );
200 else if( method == INTER_LANCZOS4 )
202 for( int i = 0; i < tabsz; i++, tab += 8 )
203 interpolateLanczos4( i*scale, tab );
206 CV_Error( CV_StsBadArg, "Unknown interpolation method" );
210 static const void* initInterTab2D( int method, bool fixpt )
212 static bool inittab[INTER_MAX+1] = {false};
216 if( method == INTER_LINEAR )
217 tab = BilinearTab_f[0][0], itab = BilinearTab_i[0][0], ksize=2;
218 else if( method == INTER_CUBIC )
219 tab = BicubicTab_f[0][0], itab = BicubicTab_i[0][0], ksize=4;
220 else if( method == INTER_LANCZOS4 )
221 tab = Lanczos4Tab_f[0][0], itab = Lanczos4Tab_i[0][0], ksize=8;
223 CV_Error( CV_StsBadArg, "Unknown/unsupported interpolation type" );
225 if( !inittab[method] )
227 AutoBuffer<float> _tab(8*INTER_TAB_SIZE);
229 initInterTab1D(method, _tab, INTER_TAB_SIZE);
230 for( i = 0; i < INTER_TAB_SIZE; i++ )
231 for( j = 0; j < INTER_TAB_SIZE; j++, tab += ksize*ksize, itab += ksize*ksize )
234 NNDeltaTab_i[i*INTER_TAB_SIZE+j][0] = j < INTER_TAB_SIZE/2;
235 NNDeltaTab_i[i*INTER_TAB_SIZE+j][1] = i < INTER_TAB_SIZE/2;
237 for( k1 = 0; k1 < ksize; k1++ )
239 float vy = _tab[i*ksize + k1];
240 for( k2 = 0; k2 < ksize; k2++ )
242 float v = vy*_tab[j*ksize + k2];
243 tab[k1*ksize + k2] = v;
244 isum += itab[k1*ksize + k2] = saturate_cast<short>(v*INTER_REMAP_COEF_SCALE);
248 if( isum != INTER_REMAP_COEF_SCALE )
250 int diff = isum - INTER_REMAP_COEF_SCALE;
251 int ksize2 = ksize/2, Mk1=ksize2, Mk2=ksize2, mk1=ksize2, mk2=ksize2;
252 for( k1 = ksize2; k1 < ksize2+2; k1++ )
253 for( k2 = ksize2; k2 < ksize2+2; k2++ )
255 if( itab[k1*ksize+k2] < itab[mk1*ksize+mk2] )
257 else if( itab[k1*ksize+k2] > itab[Mk1*ksize+Mk2] )
261 itab[Mk1*ksize + Mk2] = (short)(itab[Mk1*ksize + Mk2] - diff);
263 itab[mk1*ksize + mk2] = (short)(itab[mk1*ksize + mk2] - diff);
266 tab -= INTER_TAB_SIZE2*ksize*ksize;
267 itab -= INTER_TAB_SIZE2*ksize*ksize;
269 if( method == INTER_LINEAR )
271 for( i = 0; i < INTER_TAB_SIZE2; i++ )
272 for( j = 0; j < 4; j++ )
274 BilinearTab_iC4[i][0][j*2] = BilinearTab_i[i][0][0];
275 BilinearTab_iC4[i][0][j*2+1] = BilinearTab_i[i][0][1];
276 BilinearTab_iC4[i][1][j*2] = BilinearTab_i[i][1][0];
277 BilinearTab_iC4[i][1][j*2+1] = BilinearTab_i[i][1][1];
281 inittab[method] = true;
283 return fixpt ? (const void*)itab : (const void*)tab;
287 static bool initAllInterTab2D()
289 return initInterTab2D( INTER_LINEAR, false ) &&
290 initInterTab2D( INTER_LINEAR, true ) &&
291 initInterTab2D( INTER_CUBIC, false ) &&
292 initInterTab2D( INTER_CUBIC, true ) &&
293 initInterTab2D( INTER_LANCZOS4, false ) &&
294 initInterTab2D( INTER_LANCZOS4, true );
297 static volatile bool doInitAllInterTab2D = initAllInterTab2D();
300 template<typename ST, typename DT> struct Cast
305 DT operator()(ST val) const { return saturate_cast<DT>(val); }
308 template<typename ST, typename DT, int bits> struct FixedPtCast
312 enum { SHIFT = bits, DELTA = 1 << (bits-1) };
314 DT operator()(ST val) const { return saturate_cast<DT>((val + DELTA)>>SHIFT); }
317 /****************************************************************************************\
319 \****************************************************************************************/
321 class resizeNNInvoker :
322 public ParallelLoopBody
325 resizeNNInvoker(const Mat& _src, Mat &_dst, int *_x_ofs, int _pix_size4, double _ify) :
326 ParallelLoopBody(), src(_src), dst(_dst), x_ofs(_x_ofs), pix_size4(_pix_size4),
331 virtual void operator() (const Range& range) const
333 Size ssize = src.size(), dsize = dst.size();
334 int y, x, pix_size = (int)src.elemSize();
336 for( y = range.start; y < range.end; y++ )
338 uchar* D = dst.data + dst.step*y;
339 int sy = std::min(cvFloor(y*ify), ssize.height-1);
340 const uchar* S = src.data + src.step*sy;
345 for( x = 0; x <= dsize.width - 2; x += 2 )
347 uchar t0 = S[x_ofs[x]];
348 uchar t1 = S[x_ofs[x+1]];
353 for( ; x < dsize.width; x++ )
357 for( x = 0; x < dsize.width; x++ )
358 *(ushort*)(D + x*2) = *(ushort*)(S + x_ofs[x]);
361 for( x = 0; x < dsize.width; x++, D += 3 )
363 const uchar* _tS = S + x_ofs[x];
364 D[0] = _tS[0]; D[1] = _tS[1]; D[2] = _tS[2];
368 for( x = 0; x < dsize.width; x++ )
369 *(int*)(D + x*4) = *(int*)(S + x_ofs[x]);
372 for( x = 0; x < dsize.width; x++, D += 6 )
374 const ushort* _tS = (const ushort*)(S + x_ofs[x]);
375 ushort* _tD = (ushort*)D;
376 _tD[0] = _tS[0]; _tD[1] = _tS[1]; _tD[2] = _tS[2];
380 for( x = 0; x < dsize.width; x++, D += 8 )
382 const int* _tS = (const int*)(S + x_ofs[x]);
384 _tD[0] = _tS[0]; _tD[1] = _tS[1];
388 for( x = 0; x < dsize.width; x++, D += 12 )
390 const int* _tS = (const int*)(S + x_ofs[x]);
392 _tD[0] = _tS[0]; _tD[1] = _tS[1]; _tD[2] = _tS[2];
396 for( x = 0; x < dsize.width; x++, D += pix_size )
398 const int* _tS = (const int*)(S + x_ofs[x]);
400 for( int k = 0; k < pix_size4; k++ )
410 int* x_ofs, pix_size4;
413 resizeNNInvoker(const resizeNNInvoker&);
414 resizeNNInvoker& operator=(const resizeNNInvoker&);
418 resizeNN( const Mat& src, Mat& dst, double fx, double fy )
420 Size ssize = src.size(), dsize = dst.size();
421 AutoBuffer<int> _x_ofs(dsize.width);
423 int pix_size = (int)src.elemSize();
424 int pix_size4 = (int)(pix_size / sizeof(int));
425 double ifx = 1./fx, ify = 1./fy;
428 for( x = 0; x < dsize.width; x++ )
430 int sx = cvFloor(x*ifx);
431 x_ofs[x] = std::min(sx, ssize.width-1)*pix_size;
434 Range range(0, dsize.height);
435 resizeNNInvoker invoker(src, dst, x_ofs, pix_size4, ify);
436 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
442 int operator()(const uchar**, uchar*, const uchar*, int ) const { return 0; }
447 int operator()(const uchar**, uchar**, int, const int*,
448 const uchar*, int, int, int, int, int) const { return 0; }
453 struct VResizeLinearVec_32s8u
455 int operator()(const uchar** _src, uchar* dst, const uchar* _beta, int width ) const
457 if( !checkHardwareSupport(CV_CPU_SSE2) )
460 const int** src = (const int**)_src;
461 const short* beta = (const short*)_beta;
462 const int *S0 = src[0], *S1 = src[1];
464 __m128i b0 = _mm_set1_epi16(beta[0]), b1 = _mm_set1_epi16(beta[1]);
465 __m128i delta = _mm_set1_epi16(2);
467 if( (((size_t)S0|(size_t)S1)&15) == 0 )
468 for( ; x <= width - 16; x += 16 )
470 __m128i x0, x1, x2, y0, y1, y2;
471 x0 = _mm_load_si128((const __m128i*)(S0 + x));
472 x1 = _mm_load_si128((const __m128i*)(S0 + x + 4));
473 y0 = _mm_load_si128((const __m128i*)(S1 + x));
474 y1 = _mm_load_si128((const __m128i*)(S1 + x + 4));
475 x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4));
476 y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4));
478 x1 = _mm_load_si128((const __m128i*)(S0 + x + 8));
479 x2 = _mm_load_si128((const __m128i*)(S0 + x + 12));
480 y1 = _mm_load_si128((const __m128i*)(S1 + x + 8));
481 y2 = _mm_load_si128((const __m128i*)(S1 + x + 12));
482 x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4));
483 y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4));
485 x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 ));
486 x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 ));
488 x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
489 x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2);
490 _mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1));
493 for( ; x <= width - 16; x += 16 )
495 __m128i x0, x1, x2, y0, y1, y2;
496 x0 = _mm_loadu_si128((const __m128i*)(S0 + x));
497 x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4));
498 y0 = _mm_loadu_si128((const __m128i*)(S1 + x));
499 y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4));
500 x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4));
501 y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4));
503 x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 8));
504 x2 = _mm_loadu_si128((const __m128i*)(S0 + x + 12));
505 y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 8));
506 y2 = _mm_loadu_si128((const __m128i*)(S1 + x + 12));
507 x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4));
508 y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4));
510 x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 ));
511 x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 ));
513 x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
514 x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2);
515 _mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1));
518 for( ; x < width - 4; x += 4 )
521 x0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S0 + x)), 4);
522 y0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S1 + x)), 4);
523 x0 = _mm_packs_epi32(x0, x0);
524 y0 = _mm_packs_epi32(y0, y0);
525 x0 = _mm_adds_epi16(_mm_mulhi_epi16(x0, b0), _mm_mulhi_epi16(y0, b1));
526 x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
527 x0 = _mm_packus_epi16(x0, x0);
528 *(int*)(dst + x) = _mm_cvtsi128_si32(x0);
536 template<int shiftval> struct VResizeLinearVec_32f16
538 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
540 if( !checkHardwareSupport(CV_CPU_SSE2) )
543 const float** src = (const float**)_src;
544 const float* beta = (const float*)_beta;
545 const float *S0 = src[0], *S1 = src[1];
546 ushort* dst = (ushort*)_dst;
549 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]);
550 __m128i preshift = _mm_set1_epi32(shiftval);
551 __m128i postshift = _mm_set1_epi16((short)shiftval);
553 if( (((size_t)S0|(size_t)S1)&15) == 0 )
554 for( ; x <= width - 16; x += 16 )
556 __m128 x0, x1, y0, y1;
558 x0 = _mm_load_ps(S0 + x);
559 x1 = _mm_load_ps(S0 + x + 4);
560 y0 = _mm_load_ps(S1 + x);
561 y1 = _mm_load_ps(S1 + x + 4);
563 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
564 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
565 t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
566 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
567 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift);
569 x0 = _mm_load_ps(S0 + x + 8);
570 x1 = _mm_load_ps(S0 + x + 12);
571 y0 = _mm_load_ps(S1 + x + 8);
572 y1 = _mm_load_ps(S1 + x + 12);
574 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
575 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
576 t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
577 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
578 t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift);
580 _mm_storeu_si128( (__m128i*)(dst + x), t0);
581 _mm_storeu_si128( (__m128i*)(dst + x + 8), t1);
584 for( ; x <= width - 16; x += 16 )
586 __m128 x0, x1, y0, y1;
588 x0 = _mm_loadu_ps(S0 + x);
589 x1 = _mm_loadu_ps(S0 + x + 4);
590 y0 = _mm_loadu_ps(S1 + x);
591 y1 = _mm_loadu_ps(S1 + x + 4);
593 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
594 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
595 t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
596 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
597 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift);
599 x0 = _mm_loadu_ps(S0 + x + 8);
600 x1 = _mm_loadu_ps(S0 + x + 12);
601 y0 = _mm_loadu_ps(S1 + x + 8);
602 y1 = _mm_loadu_ps(S1 + x + 12);
604 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
605 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
606 t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
607 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
608 t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift);
610 _mm_storeu_si128( (__m128i*)(dst + x), t0);
611 _mm_storeu_si128( (__m128i*)(dst + x + 8), t1);
614 for( ; x < width - 4; x += 4 )
618 x0 = _mm_loadu_ps(S0 + x);
619 y0 = _mm_loadu_ps(S1 + x);
621 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
622 t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
623 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t0), postshift);
624 _mm_storel_epi64( (__m128i*)(dst + x), t0);
631 typedef VResizeLinearVec_32f16<SHRT_MIN> VResizeLinearVec_32f16u;
632 typedef VResizeLinearVec_32f16<0> VResizeLinearVec_32f16s;
634 struct VResizeLinearVec_32f
636 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
638 if( !checkHardwareSupport(CV_CPU_SSE) )
641 const float** src = (const float**)_src;
642 const float* beta = (const float*)_beta;
643 const float *S0 = src[0], *S1 = src[1];
644 float* dst = (float*)_dst;
647 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]);
649 if( (((size_t)S0|(size_t)S1)&15) == 0 )
650 for( ; x <= width - 8; x += 8 )
652 __m128 x0, x1, y0, y1;
653 x0 = _mm_load_ps(S0 + x);
654 x1 = _mm_load_ps(S0 + x + 4);
655 y0 = _mm_load_ps(S1 + x);
656 y1 = _mm_load_ps(S1 + x + 4);
658 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
659 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
661 _mm_storeu_ps( dst + x, x0);
662 _mm_storeu_ps( dst + x + 4, x1);
665 for( ; x <= width - 8; x += 8 )
667 __m128 x0, x1, y0, y1;
668 x0 = _mm_loadu_ps(S0 + x);
669 x1 = _mm_loadu_ps(S0 + x + 4);
670 y0 = _mm_loadu_ps(S1 + x);
671 y1 = _mm_loadu_ps(S1 + x + 4);
673 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
674 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
676 _mm_storeu_ps( dst + x, x0);
677 _mm_storeu_ps( dst + x + 4, x1);
685 struct VResizeCubicVec_32s8u
687 int operator()(const uchar** _src, uchar* dst, const uchar* _beta, int width ) const
689 if( !checkHardwareSupport(CV_CPU_SSE2) )
692 const int** src = (const int**)_src;
693 const short* beta = (const short*)_beta;
694 const int *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
696 float scale = 1.f/(INTER_RESIZE_COEF_SCALE*INTER_RESIZE_COEF_SCALE);
697 __m128 b0 = _mm_set1_ps(beta[0]*scale), b1 = _mm_set1_ps(beta[1]*scale),
698 b2 = _mm_set1_ps(beta[2]*scale), b3 = _mm_set1_ps(beta[3]*scale);
700 if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&15) == 0 )
701 for( ; x <= width - 8; x += 8 )
703 __m128i x0, x1, y0, y1;
704 __m128 s0, s1, f0, f1;
705 x0 = _mm_load_si128((const __m128i*)(S0 + x));
706 x1 = _mm_load_si128((const __m128i*)(S0 + x + 4));
707 y0 = _mm_load_si128((const __m128i*)(S1 + x));
708 y1 = _mm_load_si128((const __m128i*)(S1 + x + 4));
710 s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0);
711 s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0);
712 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1);
713 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1);
714 s0 = _mm_add_ps(s0, f0);
715 s1 = _mm_add_ps(s1, f1);
717 x0 = _mm_load_si128((const __m128i*)(S2 + x));
718 x1 = _mm_load_si128((const __m128i*)(S2 + x + 4));
719 y0 = _mm_load_si128((const __m128i*)(S3 + x));
720 y1 = _mm_load_si128((const __m128i*)(S3 + x + 4));
722 f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2);
723 f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2);
724 s0 = _mm_add_ps(s0, f0);
725 s1 = _mm_add_ps(s1, f1);
726 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3);
727 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3);
728 s0 = _mm_add_ps(s0, f0);
729 s1 = _mm_add_ps(s1, f1);
731 x0 = _mm_cvtps_epi32(s0);
732 x1 = _mm_cvtps_epi32(s1);
734 x0 = _mm_packs_epi32(x0, x1);
735 _mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0));
738 for( ; x <= width - 8; x += 8 )
740 __m128i x0, x1, y0, y1;
741 __m128 s0, s1, f0, f1;
742 x0 = _mm_loadu_si128((const __m128i*)(S0 + x));
743 x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4));
744 y0 = _mm_loadu_si128((const __m128i*)(S1 + x));
745 y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4));
747 s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0);
748 s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0);
749 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1);
750 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1);
751 s0 = _mm_add_ps(s0, f0);
752 s1 = _mm_add_ps(s1, f1);
754 x0 = _mm_loadu_si128((const __m128i*)(S2 + x));
755 x1 = _mm_loadu_si128((const __m128i*)(S2 + x + 4));
756 y0 = _mm_loadu_si128((const __m128i*)(S3 + x));
757 y1 = _mm_loadu_si128((const __m128i*)(S3 + x + 4));
759 f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2);
760 f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2);
761 s0 = _mm_add_ps(s0, f0);
762 s1 = _mm_add_ps(s1, f1);
763 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3);
764 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3);
765 s0 = _mm_add_ps(s0, f0);
766 s1 = _mm_add_ps(s1, f1);
768 x0 = _mm_cvtps_epi32(s0);
769 x1 = _mm_cvtps_epi32(s1);
771 x0 = _mm_packs_epi32(x0, x1);
772 _mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0));
780 template<int shiftval> struct VResizeCubicVec_32f16
782 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
784 if( !checkHardwareSupport(CV_CPU_SSE2) )
787 const float** src = (const float**)_src;
788 const float* beta = (const float*)_beta;
789 const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
790 ushort* dst = (ushort*)_dst;
792 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]),
793 b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]);
794 __m128i preshift = _mm_set1_epi32(shiftval);
795 __m128i postshift = _mm_set1_epi16((short)shiftval);
797 for( ; x <= width - 8; x += 8 )
799 __m128 x0, x1, y0, y1, s0, s1;
801 x0 = _mm_loadu_ps(S0 + x);
802 x1 = _mm_loadu_ps(S0 + x + 4);
803 y0 = _mm_loadu_ps(S1 + x);
804 y1 = _mm_loadu_ps(S1 + x + 4);
806 s0 = _mm_mul_ps(x0, b0);
807 s1 = _mm_mul_ps(x1, b0);
808 y0 = _mm_mul_ps(y0, b1);
809 y1 = _mm_mul_ps(y1, b1);
810 s0 = _mm_add_ps(s0, y0);
811 s1 = _mm_add_ps(s1, y1);
813 x0 = _mm_loadu_ps(S2 + x);
814 x1 = _mm_loadu_ps(S2 + x + 4);
815 y0 = _mm_loadu_ps(S3 + x);
816 y1 = _mm_loadu_ps(S3 + x + 4);
818 x0 = _mm_mul_ps(x0, b2);
819 x1 = _mm_mul_ps(x1, b2);
820 y0 = _mm_mul_ps(y0, b3);
821 y1 = _mm_mul_ps(y1, b3);
822 s0 = _mm_add_ps(s0, x0);
823 s1 = _mm_add_ps(s1, x1);
824 s0 = _mm_add_ps(s0, y0);
825 s1 = _mm_add_ps(s1, y1);
827 t0 = _mm_add_epi32(_mm_cvtps_epi32(s0), preshift);
828 t1 = _mm_add_epi32(_mm_cvtps_epi32(s1), preshift);
830 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t1), postshift);
831 _mm_storeu_si128( (__m128i*)(dst + x), t0);
838 typedef VResizeCubicVec_32f16<SHRT_MIN> VResizeCubicVec_32f16u;
839 typedef VResizeCubicVec_32f16<0> VResizeCubicVec_32f16s;
841 struct VResizeCubicVec_32f
843 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
845 if( !checkHardwareSupport(CV_CPU_SSE) )
848 const float** src = (const float**)_src;
849 const float* beta = (const float*)_beta;
850 const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
851 float* dst = (float*)_dst;
853 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]),
854 b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]);
856 for( ; x <= width - 8; x += 8 )
858 __m128 x0, x1, y0, y1, s0, s1;
859 x0 = _mm_loadu_ps(S0 + x);
860 x1 = _mm_loadu_ps(S0 + x + 4);
861 y0 = _mm_loadu_ps(S1 + x);
862 y1 = _mm_loadu_ps(S1 + x + 4);
864 s0 = _mm_mul_ps(x0, b0);
865 s1 = _mm_mul_ps(x1, b0);
866 y0 = _mm_mul_ps(y0, b1);
867 y1 = _mm_mul_ps(y1, b1);
868 s0 = _mm_add_ps(s0, y0);
869 s1 = _mm_add_ps(s1, y1);
871 x0 = _mm_loadu_ps(S2 + x);
872 x1 = _mm_loadu_ps(S2 + x + 4);
873 y0 = _mm_loadu_ps(S3 + x);
874 y1 = _mm_loadu_ps(S3 + x + 4);
876 x0 = _mm_mul_ps(x0, b2);
877 x1 = _mm_mul_ps(x1, b2);
878 y0 = _mm_mul_ps(y0, b3);
879 y1 = _mm_mul_ps(y1, b3);
880 s0 = _mm_add_ps(s0, x0);
881 s1 = _mm_add_ps(s1, x1);
882 s0 = _mm_add_ps(s0, y0);
883 s1 = _mm_add_ps(s1, y1);
885 _mm_storeu_ps( dst + x, s0);
886 _mm_storeu_ps( dst + x + 4, s1);
895 typedef VResizeNoVec VResizeLinearVec_32s8u;
896 typedef VResizeNoVec VResizeLinearVec_32f16u;
897 typedef VResizeNoVec VResizeLinearVec_32f16s;
898 typedef VResizeNoVec VResizeLinearVec_32f;
900 typedef VResizeNoVec VResizeCubicVec_32s8u;
901 typedef VResizeNoVec VResizeCubicVec_32f16u;
902 typedef VResizeNoVec VResizeCubicVec_32f16s;
903 typedef VResizeNoVec VResizeCubicVec_32f;
907 typedef HResizeNoVec HResizeLinearVec_8u32s;
908 typedef HResizeNoVec HResizeLinearVec_16u32f;
909 typedef HResizeNoVec HResizeLinearVec_16s32f;
910 typedef HResizeNoVec HResizeLinearVec_32f;
911 typedef HResizeNoVec HResizeLinearVec_64f;
914 template<typename T, typename WT, typename AT, int ONE, class VecOp>
917 typedef T value_type;
919 typedef AT alpha_type;
921 void operator()(const T** src, WT** dst, int count,
922 const int* xofs, const AT* alpha,
923 int swidth, int dwidth, int cn, int xmin, int xmax ) const
928 int dx0 = vecOp((const uchar**)src, (uchar**)dst, count,
929 xofs, (const uchar*)alpha, swidth, dwidth, cn, xmin, xmax );
931 for( k = 0; k <= count - 2; k++ )
933 const T *S0 = src[k], *S1 = src[k+1];
934 WT *D0 = dst[k], *D1 = dst[k+1];
935 for( dx = dx0; dx < xmax; dx++ )
938 WT a0 = alpha[dx*2], a1 = alpha[dx*2+1];
939 WT t0 = S0[sx]*a0 + S0[sx + cn]*a1;
940 WT t1 = S1[sx]*a0 + S1[sx + cn]*a1;
941 D0[dx] = t0; D1[dx] = t1;
944 for( ; dx < dwidth; dx++ )
947 D0[dx] = WT(S0[sx]*ONE); D1[dx] = WT(S1[sx]*ONE);
951 for( ; k < count; k++ )
955 for( dx = 0; dx < xmax; dx++ )
958 D[dx] = S[sx]*alpha[dx*2] + S[sx+cn]*alpha[dx*2+1];
961 for( ; dx < dwidth; dx++ )
962 D[dx] = WT(S[xofs[dx]]*ONE);
968 template<typename T, typename WT, typename AT, class CastOp, class VecOp>
971 typedef T value_type;
973 typedef AT alpha_type;
975 void operator()(const WT** src, T* dst, const AT* beta, int width ) const
977 WT b0 = beta[0], b1 = beta[1];
978 const WT *S0 = src[0], *S1 = src[1];
982 int x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
983 #if CV_ENABLE_UNROLLED
984 for( ; x <= width - 4; x += 4 )
987 t0 = S0[x]*b0 + S1[x]*b1;
988 t1 = S0[x+1]*b0 + S1[x+1]*b1;
989 dst[x] = castOp(t0); dst[x+1] = castOp(t1);
990 t0 = S0[x+2]*b0 + S1[x+2]*b1;
991 t1 = S0[x+3]*b0 + S1[x+3]*b1;
992 dst[x+2] = castOp(t0); dst[x+3] = castOp(t1);
995 for( ; x < width; x++ )
996 dst[x] = castOp(S0[x]*b0 + S1[x]*b1);
1001 struct VResizeLinear<uchar, int, short, FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>, VResizeLinearVec_32s8u>
1003 typedef uchar value_type;
1004 typedef int buf_type;
1005 typedef short alpha_type;
1007 void operator()(const buf_type** src, value_type* dst, const alpha_type* beta, int width ) const
1009 alpha_type b0 = beta[0], b1 = beta[1];
1010 const buf_type *S0 = src[0], *S1 = src[1];
1011 VResizeLinearVec_32s8u vecOp;
1013 int x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
1014 #if CV_ENABLE_UNROLLED
1015 for( ; x <= width - 4; x += 4 )
1017 dst[x+0] = uchar(( ((b0 * (S0[x+0] >> 4)) >> 16) + ((b1 * (S1[x+0] >> 4)) >> 16) + 2)>>2);
1018 dst[x+1] = uchar(( ((b0 * (S0[x+1] >> 4)) >> 16) + ((b1 * (S1[x+1] >> 4)) >> 16) + 2)>>2);
1019 dst[x+2] = uchar(( ((b0 * (S0[x+2] >> 4)) >> 16) + ((b1 * (S1[x+2] >> 4)) >> 16) + 2)>>2);
1020 dst[x+3] = uchar(( ((b0 * (S0[x+3] >> 4)) >> 16) + ((b1 * (S1[x+3] >> 4)) >> 16) + 2)>>2);
1023 for( ; x < width; x++ )
1024 dst[x] = uchar(( ((b0 * (S0[x] >> 4)) >> 16) + ((b1 * (S1[x] >> 4)) >> 16) + 2)>>2);
1029 template<typename T, typename WT, typename AT>
1032 typedef T value_type;
1033 typedef WT buf_type;
1034 typedef AT alpha_type;
1036 void operator()(const T** src, WT** dst, int count,
1037 const int* xofs, const AT* alpha,
1038 int swidth, int dwidth, int cn, int xmin, int xmax ) const
1040 for( int k = 0; k < count; k++ )
1042 const T *S = src[k];
1044 int dx = 0, limit = xmin;
1047 for( ; dx < limit; dx++, alpha += 4 )
1049 int j, sx = xofs[dx] - cn;
1051 for( j = 0; j < 4; j++ )
1053 int sxj = sx + j*cn;
1054 if( (unsigned)sxj >= (unsigned)swidth )
1058 while( sxj >= swidth )
1061 v += S[sxj]*alpha[j];
1065 if( limit == dwidth )
1067 for( ; dx < xmax; dx++, alpha += 4 )
1070 D[dx] = S[sx-cn]*alpha[0] + S[sx]*alpha[1] +
1071 S[sx+cn]*alpha[2] + S[sx+cn*2]*alpha[3];
1081 template<typename T, typename WT, typename AT, class CastOp, class VecOp>
1084 typedef T value_type;
1085 typedef WT buf_type;
1086 typedef AT alpha_type;
1088 void operator()(const WT** src, T* dst, const AT* beta, int width ) const
1090 WT b0 = beta[0], b1 = beta[1], b2 = beta[2], b3 = beta[3];
1091 const WT *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
1095 int x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
1096 for( ; x < width; x++ )
1097 dst[x] = castOp(S0[x]*b0 + S1[x]*b1 + S2[x]*b2 + S3[x]*b3);
1102 template<typename T, typename WT, typename AT>
1103 struct HResizeLanczos4
1105 typedef T value_type;
1106 typedef WT buf_type;
1107 typedef AT alpha_type;
1109 void operator()(const T** src, WT** dst, int count,
1110 const int* xofs, const AT* alpha,
1111 int swidth, int dwidth, int cn, int xmin, int xmax ) const
1113 for( int k = 0; k < count; k++ )
1115 const T *S = src[k];
1117 int dx = 0, limit = xmin;
1120 for( ; dx < limit; dx++, alpha += 8 )
1122 int j, sx = xofs[dx] - cn*3;
1124 for( j = 0; j < 8; j++ )
1126 int sxj = sx + j*cn;
1127 if( (unsigned)sxj >= (unsigned)swidth )
1131 while( sxj >= swidth )
1134 v += S[sxj]*alpha[j];
1138 if( limit == dwidth )
1140 for( ; dx < xmax; dx++, alpha += 8 )
1143 D[dx] = S[sx-cn*3]*alpha[0] + S[sx-cn*2]*alpha[1] +
1144 S[sx-cn]*alpha[2] + S[sx]*alpha[3] +
1145 S[sx+cn]*alpha[4] + S[sx+cn*2]*alpha[5] +
1146 S[sx+cn*3]*alpha[6] + S[sx+cn*4]*alpha[7];
1156 template<typename T, typename WT, typename AT, class CastOp, class VecOp>
1157 struct VResizeLanczos4
1159 typedef T value_type;
1160 typedef WT buf_type;
1161 typedef AT alpha_type;
1163 void operator()(const WT** src, T* dst, const AT* beta, int width ) const
1167 int k, x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
1168 #if CV_ENABLE_UNROLLED
1169 for( ; x <= width - 4; x += 4 )
1172 const WT* S = src[0];
1173 WT s0 = S[x]*b, s1 = S[x+1]*b, s2 = S[x+2]*b, s3 = S[x+3]*b;
1175 for( k = 1; k < 8; k++ )
1177 b = beta[k]; S = src[k];
1178 s0 += S[x]*b; s1 += S[x+1]*b;
1179 s2 += S[x+2]*b; s3 += S[x+3]*b;
1182 dst[x] = castOp(s0); dst[x+1] = castOp(s1);
1183 dst[x+2] = castOp(s2); dst[x+3] = castOp(s3);
1186 for( ; x < width; x++ )
1188 dst[x] = castOp(src[0][x]*beta[0] + src[1][x]*beta[1] +
1189 src[2][x]*beta[2] + src[3][x]*beta[3] + src[4][x]*beta[4] +
1190 src[5][x]*beta[5] + src[6][x]*beta[6] + src[7][x]*beta[7]);
1196 static inline int clip(int x, int a, int b)
1198 return x >= a ? (x < b ? x : b-1) : a;
1201 static const int MAX_ESIZE=16;
1203 template <typename HResize, typename VResize>
1204 class resizeGeneric_Invoker :
1205 public ParallelLoopBody
1208 typedef typename HResize::value_type T;
1209 typedef typename HResize::buf_type WT;
1210 typedef typename HResize::alpha_type AT;
1212 resizeGeneric_Invoker(const Mat& _src, Mat &_dst, const int *_xofs, const int *_yofs,
1213 const AT* _alpha, const AT* __beta, const Size& _ssize, const Size &_dsize,
1214 int _ksize, int _xmin, int _xmax) :
1215 ParallelLoopBody(), src(_src), dst(_dst), xofs(_xofs), yofs(_yofs),
1216 alpha(_alpha), _beta(__beta), ssize(_ssize), dsize(_dsize),
1217 ksize(_ksize), xmin(_xmin), xmax(_xmax)
1221 virtual void operator() (const Range& range) const
1223 int dy, cn = src.channels();
1227 int bufstep = (int)alignSize(dsize.width, 16);
1228 AutoBuffer<WT> _buffer(bufstep*ksize);
1229 const T* srows[MAX_ESIZE]={0};
1230 WT* rows[MAX_ESIZE]={0};
1231 int prev_sy[MAX_ESIZE];
1233 for(int k = 0; k < ksize; k++ )
1236 rows[k] = (WT*)_buffer + bufstep*k;
1239 const AT* beta = _beta + ksize * range.start;
1241 for( dy = range.start; dy < range.end; dy++, beta += ksize )
1243 int sy0 = yofs[dy], k0=ksize, k1=0, ksize2 = ksize/2;
1245 for(int k = 0; k < ksize; k++ )
1247 int sy = clip(sy0 - ksize2 + 1 + k, 0, ssize.height);
1248 for( k1 = std::max(k1, k); k1 < ksize; k1++ )
1250 if( sy == prev_sy[k1] ) // if the sy-th row has been computed already, reuse it.
1253 memcpy( rows[k], rows[k1], bufstep*sizeof(rows[0][0]) );
1258 k0 = std::min(k0, k); // remember the first row that needs to be computed
1259 srows[k] = (T*)(src.data + src.step*sy);
1264 hresize( (const T**)(srows + k0), (WT**)(rows + k0), ksize - k0, xofs, (const AT*)(alpha),
1265 ssize.width, dsize.width, cn, xmin, xmax );
1266 vresize( (const WT**)rows, (T*)(dst.data + dst.step*dy), beta, dsize.width );
1273 const int* xofs, *yofs;
1274 const AT* alpha, *_beta;
1276 int ksize, xmin, xmax;
1279 template<class HResize, class VResize>
1280 static void resizeGeneric_( const Mat& src, Mat& dst,
1281 const int* xofs, const void* _alpha,
1282 const int* yofs, const void* _beta,
1283 int xmin, int xmax, int ksize )
1285 typedef typename HResize::alpha_type AT;
1287 const AT* beta = (const AT*)_beta;
1288 Size ssize = src.size(), dsize = dst.size();
1289 int cn = src.channels();
1294 // image resize is a separable operation. In case of not too strong
1296 Range range(0, dsize.height);
1297 resizeGeneric_Invoker<HResize, VResize> invoker(src, dst, xofs, yofs, (const AT*)_alpha, beta,
1298 ssize, dsize, ksize, xmin, xmax);
1299 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
1302 template <typename T, typename WT>
1303 struct ResizeAreaFastNoVec
1305 ResizeAreaFastNoVec(int, int) { }
1306 ResizeAreaFastNoVec(int, int, int, int) { }
1307 int operator() (const T*, T*, int) const
1312 class ResizeAreaFastVec_SIMD_8u
1315 ResizeAreaFastVec_SIMD_8u(int _cn, int _step) :
1316 cn(_cn), step(_step)
1318 use_simd = checkHardwareSupport(CV_CPU_SSE2);
1321 int operator() (const uchar* S, uchar* D, int w) const
1327 const uchar* S0 = S;
1328 const uchar* S1 = S0 + step;
1329 __m128i zero = _mm_setzero_si128();
1330 __m128i delta2 = _mm_set1_epi16(2);
1334 __m128i masklow = _mm_set1_epi16(0x00ff);
1335 for ( ; dx <= w - 8; dx += 8, S0 += 16, S1 += 16, D += 8)
1337 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1338 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1340 __m128i s0 = _mm_add_epi16(_mm_srli_epi16(r0, 8), _mm_and_si128(r0, masklow));
1341 __m128i s1 = _mm_add_epi16(_mm_srli_epi16(r1, 8), _mm_and_si128(r1, masklow));
1342 s0 = _mm_add_epi16(_mm_add_epi16(s0, s1), delta2);
1343 s0 = _mm_packus_epi16(_mm_srli_epi16(s0, 2), zero);
1345 _mm_storel_epi64((__m128i*)D, s0);
1349 for ( ; dx <= w - 11; dx += 6, S0 += 12, S1 += 12, D += 6)
1351 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1352 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1354 __m128i r0_16l = _mm_unpacklo_epi8(r0, zero);
1355 __m128i r0_16h = _mm_unpacklo_epi8(_mm_srli_si128(r0, 6), zero);
1356 __m128i r1_16l = _mm_unpacklo_epi8(r1, zero);
1357 __m128i r1_16h = _mm_unpacklo_epi8(_mm_srli_si128(r1, 6), zero);
1359 __m128i s0 = _mm_add_epi16(r0_16l, _mm_srli_si128(r0_16l, 6));
1360 __m128i s1 = _mm_add_epi16(r1_16l, _mm_srli_si128(r1_16l, 6));
1361 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1362 s0 = _mm_packus_epi16(_mm_srli_epi16(s0, 2), zero);
1363 _mm_storel_epi64((__m128i*)D, s0);
1365 s0 = _mm_add_epi16(r0_16h, _mm_srli_si128(r0_16h, 6));
1366 s1 = _mm_add_epi16(r1_16h, _mm_srli_si128(r1_16h, 6));
1367 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1368 s0 = _mm_packus_epi16(_mm_srli_epi16(s0, 2), zero);
1369 _mm_storel_epi64((__m128i*)(D+3), s0);
1374 int v[] = { 0, 0, -1, -1 };
1375 __m128i mask = _mm_loadu_si128((const __m128i*)v);
1377 for ( ; dx <= w - 8; dx += 8, S0 += 16, S1 += 16, D += 8)
1379 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1380 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1382 __m128i r0_16l = _mm_unpacklo_epi8(r0, zero);
1383 __m128i r0_16h = _mm_unpackhi_epi8(r0, zero);
1384 __m128i r1_16l = _mm_unpacklo_epi8(r1, zero);
1385 __m128i r1_16h = _mm_unpackhi_epi8(r1, zero);
1387 __m128i s0 = _mm_add_epi16(r0_16l, _mm_srli_si128(r0_16l, 8));
1388 __m128i s1 = _mm_add_epi16(r1_16l, _mm_srli_si128(r1_16l, 8));
1389 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1390 __m128i res0 = _mm_srli_epi16(s0, 2);
1392 s0 = _mm_add_epi16(r0_16h, _mm_srli_si128(r0_16h, 8));
1393 s1 = _mm_add_epi16(r1_16h, _mm_srli_si128(r1_16h, 8));
1394 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1395 __m128i res1 = _mm_srli_epi16(s0, 2);
1396 s0 = _mm_packus_epi16(_mm_or_si128(_mm_andnot_si128(mask, res0),
1397 _mm_and_si128(mask, _mm_slli_si128(res1, 8))), zero);
1398 _mm_storel_epi64((__m128i*)(D), s0);
1411 class ResizeAreaFastVec_SIMD_16u
1414 ResizeAreaFastVec_SIMD_16u(int _cn, int _step) :
1415 cn(_cn), step(_step)
1417 use_simd = checkHardwareSupport(CV_CPU_SSE2);
1420 int operator() (const ushort* S, ushort* D, int w) const
1426 const ushort* S0 = (const ushort*)S;
1427 const ushort* S1 = (const ushort*)((const uchar*)(S) + step);
1428 __m128i masklow = _mm_set1_epi32(0x0000ffff);
1429 __m128i zero = _mm_setzero_si128();
1430 __m128i delta2 = _mm_set1_epi32(2);
1432 #define _mm_packus_epi32(a, zero) _mm_packs_epi32(_mm_srai_epi32(_mm_slli_epi32(a, 16), 16), zero)
1436 for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4)
1438 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1439 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1441 __m128i s0 = _mm_add_epi32(_mm_srli_epi32(r0, 16), _mm_and_si128(r0, masklow));
1442 __m128i s1 = _mm_add_epi32(_mm_srli_epi32(r1, 16), _mm_and_si128(r1, masklow));
1443 s0 = _mm_add_epi32(_mm_add_epi32(s0, s1), delta2);
1444 s0 = _mm_srli_epi32(s0, 2);
1445 s0 = _mm_packus_epi32(s0, zero);
1447 _mm_storel_epi64((__m128i*)D, s0);
1451 for ( ; dx <= w - 4; dx += 3, S0 += 6, S1 += 6, D += 3)
1453 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1454 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1456 __m128i r0_16l = _mm_unpacklo_epi16(r0, zero);
1457 __m128i r0_16h = _mm_unpacklo_epi16(_mm_srli_si128(r0, 6), zero);
1458 __m128i r1_16l = _mm_unpacklo_epi16(r1, zero);
1459 __m128i r1_16h = _mm_unpacklo_epi16(_mm_srli_si128(r1, 6), zero);
1461 __m128i s0 = _mm_add_epi32(r0_16l, r0_16h);
1462 __m128i s1 = _mm_add_epi32(r1_16l, r1_16h);
1463 s0 = _mm_add_epi32(delta2, _mm_add_epi32(s0, s1));
1464 s0 = _mm_packus_epi32(_mm_srli_epi32(s0, 2), zero);
1465 _mm_storel_epi64((__m128i*)D, s0);
1470 for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4)
1472 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1473 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1475 __m128i r0_32l = _mm_unpacklo_epi16(r0, zero);
1476 __m128i r0_32h = _mm_unpackhi_epi16(r0, zero);
1477 __m128i r1_32l = _mm_unpacklo_epi16(r1, zero);
1478 __m128i r1_32h = _mm_unpackhi_epi16(r1, zero);
1480 __m128i s0 = _mm_add_epi32(r0_32l, r0_32h);
1481 __m128i s1 = _mm_add_epi32(r1_32l, r1_32h);
1482 s0 = _mm_add_epi32(s1, _mm_add_epi32(s0, delta2));
1483 s0 = _mm_packus_epi32(_mm_srli_epi32(s0, 2), zero);
1484 _mm_storel_epi64((__m128i*)D, s0);
1488 #undef _mm_packus_epi32
1500 typedef ResizeAreaFastNoVec<uchar, uchar> ResizeAreaFastVec_SIMD_8u;
1501 typedef ResizeAreaFastNoVec<ushort, ushort> ResizeAreaFastVec_SIMD_16u;
1504 template<typename T, typename SIMDVecOp>
1505 struct ResizeAreaFastVec
1507 ResizeAreaFastVec(int _scale_x, int _scale_y, int _cn, int _step) :
1508 scale_x(_scale_x), scale_y(_scale_y), cn(_cn), step(_step), vecOp(_cn, _step)
1510 fast_mode = scale_x == 2 && scale_y == 2 && (cn == 1 || cn == 3 || cn == 4);
1513 int operator() (const T* S, T* D, int w) const
1518 const T* nextS = (const T*)((const uchar*)S + step);
1519 int dx = vecOp(S, D, w);
1522 for( ; dx < w; ++dx )
1525 D[dx] = (T)((S[index] + S[index+1] + nextS[index] + nextS[index+1] + 2) >> 2);
1528 for( ; dx < w; dx += 3 )
1531 D[dx] = (T)((S[index] + S[index+3] + nextS[index] + nextS[index+3] + 2) >> 2);
1532 D[dx+1] = (T)((S[index+1] + S[index+4] + nextS[index+1] + nextS[index+4] + 2) >> 2);
1533 D[dx+2] = (T)((S[index+2] + S[index+5] + nextS[index+2] + nextS[index+5] + 2) >> 2);
1538 for( ; dx < w; dx += 4 )
1541 D[dx] = (T)((S[index] + S[index+4] + nextS[index] + nextS[index+4] + 2) >> 2);
1542 D[dx+1] = (T)((S[index+1] + S[index+5] + nextS[index+1] + nextS[index+5] + 2) >> 2);
1543 D[dx+2] = (T)((S[index+2] + S[index+6] + nextS[index+2] + nextS[index+6] + 2) >> 2);
1544 D[dx+3] = (T)((S[index+3] + S[index+7] + nextS[index+3] + nextS[index+7] + 2) >> 2);
1552 int scale_x, scale_y;
1559 template <typename T, typename WT, typename VecOp>
1560 class resizeAreaFast_Invoker :
1561 public ParallelLoopBody
1564 resizeAreaFast_Invoker(const Mat &_src, Mat &_dst,
1565 int _scale_x, int _scale_y, const int* _ofs, const int* _xofs) :
1566 ParallelLoopBody(), src(_src), dst(_dst), scale_x(_scale_x),
1567 scale_y(_scale_y), ofs(_ofs), xofs(_xofs)
1571 virtual void operator() (const Range& range) const
1573 Size ssize = src.size(), dsize = dst.size();
1574 int cn = src.channels();
1575 int area = scale_x*scale_y;
1576 float scale = 1.f/(area);
1577 int dwidth1 = (ssize.width/scale_x)*cn;
1582 VecOp vop(scale_x, scale_y, src.channels(), (int)src.step/*, area_ofs*/);
1584 for( dy = range.start; dy < range.end; dy++ )
1586 T* D = (T*)(dst.data + dst.step*dy);
1587 int sy0 = dy*scale_y;
1588 int w = sy0 + scale_y <= ssize.height ? dwidth1 : 0;
1590 if( sy0 >= ssize.height )
1592 for( dx = 0; dx < dsize.width; dx++ )
1597 dx = vop((const T*)(src.data + src.step * sy0), D, w);
1598 for( ; dx < w; dx++ )
1600 const T* S = (const T*)(src.data + src.step * sy0) + xofs[dx];
1603 #if CV_ENABLE_UNROLLED
1604 for( ; k <= area - 4; k += 4 )
1605 sum += S[ofs[k]] + S[ofs[k+1]] + S[ofs[k+2]] + S[ofs[k+3]];
1607 for( ; k < area; k++ )
1610 D[dx] = saturate_cast<T>(sum * scale);
1613 for( ; dx < dsize.width; dx++ )
1616 int count = 0, sx0 = xofs[dx];
1617 if( sx0 >= ssize.width )
1620 for( int sy = 0; sy < scale_y; sy++ )
1622 if( sy0 + sy >= ssize.height )
1624 const T* S = (const T*)(src.data + src.step*(sy0 + sy)) + sx0;
1625 for( int sx = 0; sx < scale_x*cn; sx += cn )
1627 if( sx0 + sx >= ssize.width )
1634 D[dx] = saturate_cast<T>((float)sum/count);
1642 int scale_x, scale_y;
1643 const int *ofs, *xofs;
1646 template<typename T, typename WT, typename VecOp>
1647 static void resizeAreaFast_( const Mat& src, Mat& dst, const int* ofs, const int* xofs,
1648 int scale_x, int scale_y )
1650 Range range(0, dst.rows);
1651 resizeAreaFast_Invoker<T, WT, VecOp> invoker(src, dst, scale_x,
1652 scale_y, ofs, xofs);
1653 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
1656 struct DecimateAlpha
1663 template<typename T, typename WT> class ResizeArea_Invoker :
1664 public ParallelLoopBody
1667 ResizeArea_Invoker( const Mat& _src, Mat& _dst,
1668 const DecimateAlpha* _xtab, int _xtab_size,
1669 const DecimateAlpha* _ytab, int _ytab_size,
1670 const int* _tabofs )
1675 xtab_size0 = _xtab_size;
1677 ytab_size = _ytab_size;
1681 virtual void operator() (const Range& range) const
1683 Size dsize = dst->size();
1684 int cn = dst->channels();
1686 AutoBuffer<WT> _buffer(dsize.width*2);
1687 const DecimateAlpha* xtab = xtab0;
1688 int xtab_size = xtab_size0;
1689 WT *buf = _buffer, *sum = buf + dsize.width;
1690 int j_start = tabofs[range.start], j_end = tabofs[range.end], j, k, dx, prev_dy = ytab[j_start].di;
1692 for( dx = 0; dx < dsize.width; dx++ )
1695 for( j = j_start; j < j_end; j++ )
1697 WT beta = ytab[j].alpha;
1698 int dy = ytab[j].di;
1699 int sy = ytab[j].si;
1702 const T* S = (const T*)(src->data + src->step*sy);
1703 for( dx = 0; dx < dsize.width; dx++ )
1707 for( k = 0; k < xtab_size; k++ )
1709 int dxn = xtab[k].di;
1710 WT alpha = xtab[k].alpha;
1711 buf[dxn] += S[xtab[k].si]*alpha;
1714 for( k = 0; k < xtab_size; k++ )
1716 int sxn = xtab[k].si;
1717 int dxn = xtab[k].di;
1718 WT alpha = xtab[k].alpha;
1719 WT t0 = buf[dxn] + S[sxn]*alpha;
1720 WT t1 = buf[dxn+1] + S[sxn+1]*alpha;
1721 buf[dxn] = t0; buf[dxn+1] = t1;
1724 for( k = 0; k < xtab_size; k++ )
1726 int sxn = xtab[k].si;
1727 int dxn = xtab[k].di;
1728 WT alpha = xtab[k].alpha;
1729 WT t0 = buf[dxn] + S[sxn]*alpha;
1730 WT t1 = buf[dxn+1] + S[sxn+1]*alpha;
1731 WT t2 = buf[dxn+2] + S[sxn+2]*alpha;
1732 buf[dxn] = t0; buf[dxn+1] = t1; buf[dxn+2] = t2;
1736 for( k = 0; k < xtab_size; k++ )
1738 int sxn = xtab[k].si;
1739 int dxn = xtab[k].di;
1740 WT alpha = xtab[k].alpha;
1741 WT t0 = buf[dxn] + S[sxn]*alpha;
1742 WT t1 = buf[dxn+1] + S[sxn+1]*alpha;
1743 buf[dxn] = t0; buf[dxn+1] = t1;
1744 t0 = buf[dxn+2] + S[sxn+2]*alpha;
1745 t1 = buf[dxn+3] + S[sxn+3]*alpha;
1746 buf[dxn+2] = t0; buf[dxn+3] = t1;
1751 for( k = 0; k < xtab_size; k++ )
1753 int sxn = xtab[k].si;
1754 int dxn = xtab[k].di;
1755 WT alpha = xtab[k].alpha;
1756 for( int c = 0; c < cn; c++ )
1757 buf[dxn + c] += S[sxn + c]*alpha;
1764 T* D = (T*)(dst->data + dst->step*prev_dy);
1766 for( dx = 0; dx < dsize.width; dx++ )
1768 D[dx] = saturate_cast<T>(sum[dx]);
1769 sum[dx] = beta*buf[dx];
1775 for( dx = 0; dx < dsize.width; dx++ )
1776 sum[dx] += beta*buf[dx];
1781 T* D = (T*)(dst->data + dst->step*prev_dy);
1782 for( dx = 0; dx < dsize.width; dx++ )
1783 D[dx] = saturate_cast<T>(sum[dx]);
1790 const DecimateAlpha* xtab0;
1791 const DecimateAlpha* ytab;
1792 int xtab_size0, ytab_size;
1797 template <typename T, typename WT>
1798 static void resizeArea_( const Mat& src, Mat& dst,
1799 const DecimateAlpha* xtab, int xtab_size,
1800 const DecimateAlpha* ytab, int ytab_size,
1803 parallel_for_(Range(0, dst.rows),
1804 ResizeArea_Invoker<T, WT>(src, dst, xtab, xtab_size, ytab, ytab_size, tabofs),
1805 dst.total()/((double)(1 << 16)));
1809 typedef void (*ResizeFunc)( const Mat& src, Mat& dst,
1810 const int* xofs, const void* alpha,
1811 const int* yofs, const void* beta,
1812 int xmin, int xmax, int ksize );
1814 typedef void (*ResizeAreaFastFunc)( const Mat& src, Mat& dst,
1815 const int* ofs, const int *xofs,
1816 int scale_x, int scale_y );
1818 typedef void (*ResizeAreaFunc)( const Mat& src, Mat& dst,
1819 const DecimateAlpha* xtab, int xtab_size,
1820 const DecimateAlpha* ytab, int ytab_size,
1824 static int computeResizeAreaTab( int ssize, int dsize, int cn, double scale, DecimateAlpha* tab )
1827 for(int dx = 0; dx < dsize; dx++ )
1829 double fsx1 = dx * scale;
1830 double fsx2 = fsx1 + scale;
1831 double cellWidth = std::min(scale, ssize - fsx1);
1833 int sx1 = cvCeil(fsx1), sx2 = cvFloor(fsx2);
1835 sx2 = std::min(sx2, ssize - 1);
1836 sx1 = std::min(sx1, sx2);
1838 if( sx1 - fsx1 > 1e-3 )
1840 assert( k < ssize*2 );
1841 tab[k].di = dx * cn;
1842 tab[k].si = (sx1 - 1) * cn;
1843 tab[k++].alpha = (float)((sx1 - fsx1) / cellWidth);
1846 for(int sx = sx1; sx < sx2; sx++ )
1848 assert( k < ssize*2 );
1849 tab[k].di = dx * cn;
1850 tab[k].si = sx * cn;
1851 tab[k++].alpha = float(1.0 / cellWidth);
1854 if( fsx2 - sx2 > 1e-3 )
1856 assert( k < ssize*2 );
1857 tab[k].di = dx * cn;
1858 tab[k].si = sx2 * cn;
1859 tab[k++].alpha = (float)(std::min(std::min(fsx2 - sx2, 1.), cellWidth) / cellWidth);
1865 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
1866 class IPPresizeInvoker :
1867 public ParallelLoopBody
1870 IPPresizeInvoker(Mat &_src, Mat &_dst, double &_inv_scale_x, double &_inv_scale_y, int _mode, ippiResizeSqrPixelFunc _func, bool *_ok) :
1871 ParallelLoopBody(), src(_src), dst(_dst), inv_scale_x(_inv_scale_x), inv_scale_y(_inv_scale_y), mode(_mode), func(_func), ok(_ok)
1876 virtual void operator() (const Range& range) const
1878 int cn = src.channels();
1879 IppiRect srcroi = { 0, range.start, src.cols, range.end - range.start };
1880 int dsty = CV_IMIN(cvRound(range.start * inv_scale_y), dst.rows);
1881 int dstwidth = CV_IMIN(cvRound(src.cols * inv_scale_x), dst.cols);
1882 int dstheight = CV_IMIN(cvRound(range.end * inv_scale_y), dst.rows);
1883 IppiRect dstroi = { 0, dsty, dstwidth, dstheight - dsty };
1885 ippiResizeGetBufSize( srcroi, dstroi, cn, mode, &bufsize );
1886 AutoBuffer<uchar> buf(bufsize + 64);
1887 uchar* bufptr = alignPtr((uchar*)buf, 32);
1888 if( func( src.data, ippiSize(src.cols, src.rows), (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, inv_scale_x, inv_scale_y, 0, 0, mode, bufptr ) < 0 )
1897 ippiResizeSqrPixelFunc func;
1899 const IPPresizeInvoker& operator= (const IPPresizeInvoker&);
1903 static void ocl_computeResizeAreaTabs(int ssize, int dsize, double scale, int * const map_tab,
1904 float * const alpha_tab, int * const ofs_tab)
1907 for ( ; dx < dsize; dx++)
1911 double fsx1 = dx * scale;
1912 double fsx2 = fsx1 + scale;
1913 double cellWidth = std::min(scale, ssize - fsx1);
1915 int sx1 = cvCeil(fsx1), sx2 = cvFloor(fsx2);
1917 sx2 = std::min(sx2, ssize - 1);
1918 sx1 = std::min(sx1, sx2);
1920 if (sx1 - fsx1 > 1e-3)
1922 map_tab[k] = sx1 - 1;
1923 alpha_tab[k++] = (float)((sx1 - fsx1) / cellWidth);
1926 for (int sx = sx1; sx < sx2; sx++)
1929 alpha_tab[k++] = float(1.0 / cellWidth);
1932 if (fsx2 - sx2 > 1e-3)
1935 alpha_tab[k++] = (float)(std::min(std::min(fsx2 - sx2, 1.), cellWidth) / cellWidth);
1941 static void ocl_computeResizeAreaFastTabs(int * dmap_tab, int * smap_tab, int scale, int dcols, int scol)
1943 for (int i = 0; i < dcols; ++i)
1944 dmap_tab[i] = scale * i;
1946 for (int i = 0, size = dcols * scale; i < size; ++i)
1947 smap_tab[i] = std::min(scol - 1, i);
1950 static bool ocl_resize( InputArray _src, OutputArray _dst, Size dsize,
1951 double fx, double fy, int interpolation)
1953 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
1955 double inv_fx = 1. / fx, inv_fy = 1. / fy;
1956 float inv_fxf = (float)inv_fx, inv_fyf = (float)inv_fy;
1958 if( cn == 3 || !(cn <= 4 &&
1959 (interpolation == INTER_NEAREST || interpolation == INTER_LINEAR ||
1960 (interpolation == INTER_AREA && inv_fx >= 1 && inv_fy >= 1) )) )
1963 UMat src = _src.getUMat();
1964 _dst.create(dsize, type);
1965 UMat dst = _dst.getUMat();
1968 size_t globalsize[] = { dst.cols, dst.rows };
1970 if (interpolation == INTER_LINEAR)
1972 int wdepth = std::max(depth, CV_32S);
1973 int wtype = CV_MAKETYPE(wdepth, cn);
1975 k.create("resizeLN", ocl::imgproc::resize_oclsrc,
1976 format("-D INTER_LINEAR -D depth=%d -D PIXTYPE=%s -D WORKTYPE=%s -D convertToWT=%s -D convertToDT=%s",
1977 depth, ocl::typeToStr(type), ocl::typeToStr(wtype),
1978 ocl::convertTypeStr(depth, wdepth, cn, buf[0]),
1979 ocl::convertTypeStr(wdepth, depth, cn, buf[1])));
1981 else if (interpolation == INTER_NEAREST)
1983 k.create("resizeNN", ocl::imgproc::resize_oclsrc,
1984 format("-D INTER_NEAREST -D PIXTYPE=%s -D cn", ocl::memopTypeToStr(type), cn));
1986 else if (interpolation == INTER_AREA)
1988 int iscale_x = saturate_cast<int>(inv_fx);
1989 int iscale_y = saturate_cast<int>(inv_fy);
1990 bool is_area_fast = std::abs(inv_fx - iscale_x) < DBL_EPSILON &&
1991 std::abs(inv_fy - iscale_y) < DBL_EPSILON;
1992 int wdepth = std::max(depth, is_area_fast ? CV_32S : CV_32F);
1993 int wtype = CV_MAKE_TYPE(wdepth, cn);
1996 String buildOption = format("-D INTER_AREA -D T=%s -D WTV=%s -D convertToWTV=%s",
1997 ocl::typeToStr(type), ocl::typeToStr(wtype),
1998 ocl::convertTypeStr(depth, wdepth, cn, cvt[0]));
2000 UMat alphaOcl, tabofsOcl, mapOcl;
2005 int wdepth2 = std::max(CV_32F, depth), wtype2 = CV_MAKE_TYPE(wdepth2, cn);
2006 buildOption = buildOption + format(" -D convertToT=%s -D WT2V=%s -D convertToWT2V=%s -D INTER_AREA_FAST"
2007 " -D XSCALE=%d -D YSCALE=%d -D SCALE=%f",
2008 ocl::convertTypeStr(wdepth2, depth, cn, cvt[0]),
2009 ocl::typeToStr(wtype2), ocl::convertTypeStr(wdepth, wdepth2, cn, cvt[1]),
2010 iscale_x, iscale_y, 1.0f / (iscale_x * iscale_y));
2012 k.create("resizeAREA_FAST", ocl::imgproc::resize_oclsrc, buildOption);
2016 int smap_tab_size = dst.cols * iscale_x + dst.rows * iscale_y;
2017 AutoBuffer<int> dmap_tab(dst.cols + dst.rows), smap_tab(smap_tab_size);
2018 int * dxmap_tab = dmap_tab, * dymap_tab = dxmap_tab + dst.cols;
2019 int * sxmap_tab = smap_tab, * symap_tab = smap_tab + dst.cols * iscale_y;
2021 ocl_computeResizeAreaFastTabs(dxmap_tab, sxmap_tab, iscale_x, dst.cols, src.cols);
2022 ocl_computeResizeAreaFastTabs(dymap_tab, symap_tab, iscale_y, dst.rows, src.rows);
2024 Mat(1, dst.cols + dst.rows, CV_32SC1, (void *)dmap_tab).copyTo(dmap);
2025 Mat(1, smap_tab_size, CV_32SC1, (void *)smap_tab).copyTo(smap);
2029 buildOption = buildOption + format(" -D convertToT=%s", ocl::convertTypeStr(wdepth, depth, cn, cvt[0]));
2030 k.create("resizeAREA", ocl::imgproc::resize_oclsrc, buildOption);
2034 Size ssize = src.size();
2035 int xytab_size = (ssize.width + ssize.height) << 1;
2036 int tabofs_size = dsize.height + dsize.width + 2;
2038 AutoBuffer<int> _xymap_tab(xytab_size), _xyofs_tab(tabofs_size);
2039 AutoBuffer<float> _xyalpha_tab(xytab_size);
2040 int * xmap_tab = _xymap_tab, * ymap_tab = _xymap_tab + (ssize.width << 1);
2041 float * xalpha_tab = _xyalpha_tab, * yalpha_tab = _xyalpha_tab + (ssize.width << 1);
2042 int * xofs_tab = _xyofs_tab, * yofs_tab = _xyofs_tab + dsize.width + 1;
2044 ocl_computeResizeAreaTabs(ssize.width, dsize.width, inv_fx, xmap_tab, xalpha_tab, xofs_tab);
2045 ocl_computeResizeAreaTabs(ssize.height, dsize.height, inv_fy, ymap_tab, yalpha_tab, yofs_tab);
2047 // loading precomputed arrays to GPU
2048 Mat(1, xytab_size, CV_32FC1, (void *)_xyalpha_tab).copyTo(alphaOcl);
2049 Mat(1, xytab_size, CV_32SC1, (void *)_xymap_tab).copyTo(mapOcl);
2050 Mat(1, tabofs_size, CV_32SC1, (void *)_xyofs_tab).copyTo(tabofsOcl);
2053 ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src), dstarg = ocl::KernelArg::WriteOnly(dst);
2056 k.args(srcarg, dstarg, ocl::KernelArg::PtrReadOnly(dmap), ocl::KernelArg::PtrReadOnly(smap));
2058 k.args(srcarg, dstarg, inv_fxf, inv_fyf, ocl::KernelArg::PtrReadOnly(tabofsOcl),
2059 ocl::KernelArg::PtrReadOnly(mapOcl), ocl::KernelArg::PtrReadOnly(alphaOcl));
2061 return k.run(2, globalsize, NULL, false);
2066 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
2067 (float)inv_fx, (float)inv_fy);
2069 return k.run(2, globalsize, 0, false);
2074 //////////////////////////////////////////////////////////////////////////////////////////
2076 void cv::resize( InputArray _src, OutputArray _dst, Size dsize,
2077 double inv_scale_x, double inv_scale_y, int interpolation )
2079 static ResizeFunc linear_tab[] =
2082 HResizeLinear<uchar, int, short,
2083 INTER_RESIZE_COEF_SCALE,
2084 HResizeLinearVec_8u32s>,
2085 VResizeLinear<uchar, int, short,
2086 FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>,
2087 VResizeLinearVec_32s8u> >,
2090 HResizeLinear<ushort, float, float, 1,
2091 HResizeLinearVec_16u32f>,
2092 VResizeLinear<ushort, float, float, Cast<float, ushort>,
2093 VResizeLinearVec_32f16u> >,
2095 HResizeLinear<short, float, float, 1,
2096 HResizeLinearVec_16s32f>,
2097 VResizeLinear<short, float, float, Cast<float, short>,
2098 VResizeLinearVec_32f16s> >,
2101 HResizeLinear<float, float, float, 1,
2102 HResizeLinearVec_32f>,
2103 VResizeLinear<float, float, float, Cast<float, float>,
2104 VResizeLinearVec_32f> >,
2106 HResizeLinear<double, double, float, 1,
2108 VResizeLinear<double, double, float, Cast<double, double>,
2113 static ResizeFunc cubic_tab[] =
2116 HResizeCubic<uchar, int, short>,
2117 VResizeCubic<uchar, int, short,
2118 FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>,
2119 VResizeCubicVec_32s8u> >,
2122 HResizeCubic<ushort, float, float>,
2123 VResizeCubic<ushort, float, float, Cast<float, ushort>,
2124 VResizeCubicVec_32f16u> >,
2126 HResizeCubic<short, float, float>,
2127 VResizeCubic<short, float, float, Cast<float, short>,
2128 VResizeCubicVec_32f16s> >,
2131 HResizeCubic<float, float, float>,
2132 VResizeCubic<float, float, float, Cast<float, float>,
2133 VResizeCubicVec_32f> >,
2135 HResizeCubic<double, double, float>,
2136 VResizeCubic<double, double, float, Cast<double, double>,
2141 static ResizeFunc lanczos4_tab[] =
2143 resizeGeneric_<HResizeLanczos4<uchar, int, short>,
2144 VResizeLanczos4<uchar, int, short,
2145 FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>,
2148 resizeGeneric_<HResizeLanczos4<ushort, float, float>,
2149 VResizeLanczos4<ushort, float, float, Cast<float, ushort>,
2151 resizeGeneric_<HResizeLanczos4<short, float, float>,
2152 VResizeLanczos4<short, float, float, Cast<float, short>,
2155 resizeGeneric_<HResizeLanczos4<float, float, float>,
2156 VResizeLanczos4<float, float, float, Cast<float, float>,
2158 resizeGeneric_<HResizeLanczos4<double, double, float>,
2159 VResizeLanczos4<double, double, float, Cast<double, double>,
2164 static ResizeAreaFastFunc areafast_tab[] =
2166 resizeAreaFast_<uchar, int, ResizeAreaFastVec<uchar, ResizeAreaFastVec_SIMD_8u> >,
2168 resizeAreaFast_<ushort, float, ResizeAreaFastVec<ushort, ResizeAreaFastVec_SIMD_16u> >,
2169 resizeAreaFast_<short, float, ResizeAreaFastVec<short, ResizeAreaFastNoVec<short, float> > >,
2171 resizeAreaFast_<float, float, ResizeAreaFastNoVec<float, float> >,
2172 resizeAreaFast_<double, double, ResizeAreaFastNoVec<double, double> >,
2176 static ResizeAreaFunc area_tab[] =
2178 resizeArea_<uchar, float>, 0, resizeArea_<ushort, float>,
2179 resizeArea_<short, float>, 0, resizeArea_<float, float>,
2180 resizeArea_<double, double>, 0
2183 Size ssize = _src.size();
2185 CV_Assert( ssize.area() > 0 );
2186 CV_Assert( dsize.area() > 0 || (inv_scale_x > 0 && inv_scale_y > 0) );
2187 if( dsize.area() == 0 )
2189 dsize = Size(saturate_cast<int>(ssize.width*inv_scale_x),
2190 saturate_cast<int>(ssize.height*inv_scale_y));
2191 CV_Assert( dsize.area() > 0 );
2195 inv_scale_x = (double)dsize.width/ssize.width;
2196 inv_scale_y = (double)dsize.height/ssize.height;
2199 if( ocl::useOpenCL() && _dst.kind() == _InputArray::UMAT &&
2200 ocl_resize(_src, _dst, dsize, inv_scale_x, inv_scale_y, interpolation))
2203 Mat src = _src.getMat();
2204 _dst.create(dsize, src.type());
2205 Mat dst = _dst.getMat();
2207 #ifdef HAVE_TEGRA_OPTIMIZATION
2208 if (tegra::resize(src, dst, (float)inv_scale_x, (float)inv_scale_y, interpolation))
2212 int depth = src.depth(), cn = src.channels();
2213 double scale_x = 1./inv_scale_x, scale_y = 1./inv_scale_y;
2214 int k, sx, sy, dx, dy;
2216 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
2217 int mode = interpolation == INTER_LINEAR ? IPPI_INTER_LINEAR : 0;
2218 int type = src.type();
2219 ippiResizeSqrPixelFunc ippFunc =
2220 type == CV_8UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C1R :
2221 type == CV_8UC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C3R :
2222 type == CV_8UC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C4R :
2223 type == CV_16UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C1R :
2224 type == CV_16UC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C3R :
2225 type == CV_16UC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C4R :
2226 type == CV_16SC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C1R :
2227 type == CV_16SC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C3R :
2228 type == CV_16SC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C4R :
2229 type == CV_32FC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C1R :
2230 type == CV_32FC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C3R :
2231 type == CV_32FC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C4R :
2233 if( ippFunc && mode != 0 )
2236 Range range(0, src.rows);
2237 IPPresizeInvoker invoker(src, dst, inv_scale_x, inv_scale_y, mode, ippFunc, &ok);
2238 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
2244 if( interpolation == INTER_NEAREST )
2246 resizeNN( src, dst, inv_scale_x, inv_scale_y );
2251 int iscale_x = saturate_cast<int>(scale_x);
2252 int iscale_y = saturate_cast<int>(scale_y);
2254 bool is_area_fast = std::abs(scale_x - iscale_x) < DBL_EPSILON &&
2255 std::abs(scale_y - iscale_y) < DBL_EPSILON;
2257 // in case of scale_x && scale_y is equal to 2
2258 // INTER_AREA (fast) also is equal to INTER_LINEAR
2259 if( interpolation == INTER_LINEAR && is_area_fast && iscale_x == 2 && iscale_y == 2 )
2260 interpolation = INTER_AREA;
2262 // true "area" interpolation is only implemented for the case (scale_x <= 1 && scale_y <= 1).
2263 // In other cases it is emulated using some variant of bilinear interpolation
2264 if( interpolation == INTER_AREA && scale_x >= 1 && scale_y >= 1 )
2268 int area = iscale_x*iscale_y;
2269 size_t srcstep = src.step / src.elemSize1();
2270 AutoBuffer<int> _ofs(area + dsize.width*cn);
2272 int* xofs = ofs + area;
2273 ResizeAreaFastFunc func = areafast_tab[depth];
2274 CV_Assert( func != 0 );
2276 for( sy = 0, k = 0; sy < iscale_y; sy++ )
2277 for( sx = 0; sx < iscale_x; sx++ )
2278 ofs[k++] = (int)(sy*srcstep + sx*cn);
2280 for( dx = 0; dx < dsize.width; dx++ )
2284 for( k = 0; k < cn; k++ )
2285 xofs[j + k] = sx + k;
2288 func( src, dst, ofs, xofs, iscale_x, iscale_y );
2292 ResizeAreaFunc func = area_tab[depth];
2293 CV_Assert( func != 0 && cn <= 4 );
2295 AutoBuffer<DecimateAlpha> _xytab((ssize.width + ssize.height)*2);
2296 DecimateAlpha* xtab = _xytab, *ytab = xtab + ssize.width*2;
2298 int xtab_size = computeResizeAreaTab(ssize.width, dsize.width, cn, scale_x, xtab);
2299 int ytab_size = computeResizeAreaTab(ssize.height, dsize.height, 1, scale_y, ytab);
2301 AutoBuffer<int> _tabofs(dsize.height + 1);
2302 int* tabofs = _tabofs;
2303 for( k = 0, dy = 0; k < ytab_size; k++ )
2305 if( k == 0 || ytab[k].di != ytab[k-1].di )
2307 assert( ytab[k].di == dy );
2311 tabofs[dy] = ytab_size;
2313 func( src, dst, xtab, xtab_size, ytab, ytab_size, tabofs );
2318 int xmin = 0, xmax = dsize.width, width = dsize.width*cn;
2319 bool area_mode = interpolation == INTER_AREA;
2320 bool fixpt = depth == CV_8U;
2323 int ksize=0, ksize2;
2324 if( interpolation == INTER_CUBIC )
2325 ksize = 4, func = cubic_tab[depth];
2326 else if( interpolation == INTER_LANCZOS4 )
2327 ksize = 8, func = lanczos4_tab[depth];
2328 else if( interpolation == INTER_LINEAR || interpolation == INTER_AREA )
2329 ksize = 2, func = linear_tab[depth];
2331 CV_Error( CV_StsBadArg, "Unknown interpolation method" );
2334 CV_Assert( func != 0 );
2336 AutoBuffer<uchar> _buffer((width + dsize.height)*(sizeof(int) + sizeof(float)*ksize));
2337 int* xofs = (int*)(uchar*)_buffer;
2338 int* yofs = xofs + width;
2339 float* alpha = (float*)(yofs + dsize.height);
2340 short* ialpha = (short*)alpha;
2341 float* beta = alpha + width*ksize;
2342 short* ibeta = ialpha + width*ksize;
2343 float cbuf[MAX_ESIZE];
2345 for( dx = 0; dx < dsize.width; dx++ )
2349 fx = (float)((dx+0.5)*scale_x - 0.5);
2355 sx = cvFloor(dx*scale_x);
2356 fx = (float)((dx+1) - (sx+1)*inv_scale_x);
2357 fx = fx <= 0 ? 0.f : fx - cvFloor(fx);
2367 if( sx + ksize2 >= ssize.width )
2369 xmax = std::min( xmax, dx );
2370 if( sx >= ssize.width-1 )
2371 fx = 0, sx = ssize.width-1;
2374 for( k = 0, sx *= cn; k < cn; k++ )
2375 xofs[dx*cn + k] = sx + k;
2377 if( interpolation == INTER_CUBIC )
2378 interpolateCubic( fx, cbuf );
2379 else if( interpolation == INTER_LANCZOS4 )
2380 interpolateLanczos4( fx, cbuf );
2388 for( k = 0; k < ksize; k++ )
2389 ialpha[dx*cn*ksize + k] = saturate_cast<short>(cbuf[k]*INTER_RESIZE_COEF_SCALE);
2390 for( ; k < cn*ksize; k++ )
2391 ialpha[dx*cn*ksize + k] = ialpha[dx*cn*ksize + k - ksize];
2395 for( k = 0; k < ksize; k++ )
2396 alpha[dx*cn*ksize + k] = cbuf[k];
2397 for( ; k < cn*ksize; k++ )
2398 alpha[dx*cn*ksize + k] = alpha[dx*cn*ksize + k - ksize];
2402 for( dy = 0; dy < dsize.height; dy++ )
2406 fy = (float)((dy+0.5)*scale_y - 0.5);
2412 sy = cvFloor(dy*scale_y);
2413 fy = (float)((dy+1) - (sy+1)*inv_scale_y);
2414 fy = fy <= 0 ? 0.f : fy - cvFloor(fy);
2418 if( interpolation == INTER_CUBIC )
2419 interpolateCubic( fy, cbuf );
2420 else if( interpolation == INTER_LANCZOS4 )
2421 interpolateLanczos4( fy, cbuf );
2430 for( k = 0; k < ksize; k++ )
2431 ibeta[dy*ksize + k] = saturate_cast<short>(cbuf[k]*INTER_RESIZE_COEF_SCALE);
2435 for( k = 0; k < ksize; k++ )
2436 beta[dy*ksize + k] = cbuf[k];
2440 func( src, dst, xofs, fixpt ? (void*)ialpha : (void*)alpha, yofs,
2441 fixpt ? (void*)ibeta : (void*)beta, xmin, xmax, ksize );
2445 /****************************************************************************************\
2446 * General warping (affine, perspective, remap) *
2447 \****************************************************************************************/
2452 template<typename T>
2453 static void remapNearest( const Mat& _src, Mat& _dst, const Mat& _xy,
2454 int borderType, const Scalar& _borderValue )
2456 Size ssize = _src.size(), dsize = _dst.size();
2457 int cn = _src.channels();
2458 const T* S0 = (const T*)_src.data;
2459 size_t sstep = _src.step/sizeof(S0[0]);
2460 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
2461 saturate_cast<T>(_borderValue[1]),
2462 saturate_cast<T>(_borderValue[2]),
2463 saturate_cast<T>(_borderValue[3]));
2466 unsigned width1 = ssize.width, height1 = ssize.height;
2468 if( _dst.isContinuous() && _xy.isContinuous() )
2470 dsize.width *= dsize.height;
2474 for( dy = 0; dy < dsize.height; dy++ )
2476 T* D = (T*)(_dst.data + _dst.step*dy);
2477 const short* XY = (const short*)(_xy.data + _xy.step*dy);
2481 for( dx = 0; dx < dsize.width; dx++ )
2483 int sx = XY[dx*2], sy = XY[dx*2+1];
2484 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
2485 D[dx] = S0[sy*sstep + sx];
2488 if( borderType == BORDER_REPLICATE )
2490 sx = clip(sx, 0, ssize.width);
2491 sy = clip(sy, 0, ssize.height);
2492 D[dx] = S0[sy*sstep + sx];
2494 else if( borderType == BORDER_CONSTANT )
2496 else if( borderType != BORDER_TRANSPARENT )
2498 sx = borderInterpolate(sx, ssize.width, borderType);
2499 sy = borderInterpolate(sy, ssize.height, borderType);
2500 D[dx] = S0[sy*sstep + sx];
2507 for( dx = 0; dx < dsize.width; dx++, D += cn )
2509 int sx = XY[dx*2], sy = XY[dx*2+1], k;
2511 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
2515 S = S0 + sy*sstep + sx*3;
2516 D[0] = S[0], D[1] = S[1], D[2] = S[2];
2520 S = S0 + sy*sstep + sx*4;
2521 D[0] = S[0], D[1] = S[1], D[2] = S[2], D[3] = S[3];
2525 S = S0 + sy*sstep + sx*cn;
2526 for( k = 0; k < cn; k++ )
2530 else if( borderType != BORDER_TRANSPARENT )
2532 if( borderType == BORDER_REPLICATE )
2534 sx = clip(sx, 0, ssize.width);
2535 sy = clip(sy, 0, ssize.height);
2536 S = S0 + sy*sstep + sx*cn;
2538 else if( borderType == BORDER_CONSTANT )
2542 sx = borderInterpolate(sx, ssize.width, borderType);
2543 sy = borderInterpolate(sy, ssize.height, borderType);
2544 S = S0 + sy*sstep + sx*cn;
2546 for( k = 0; k < cn; k++ )
2557 int operator()( const Mat&, void*, const short*, const ushort*,
2558 const void*, int ) const { return 0; }
2565 int operator()( const Mat& _src, void* _dst, const short* XY,
2566 const ushort* FXY, const void* _wtab, int width ) const
2568 int cn = _src.channels();
2570 if( (cn != 1 && cn != 3 && cn != 4) || !checkHardwareSupport(CV_CPU_SSE2) )
2573 const uchar *S0 = _src.data, *S1 = _src.data + _src.step;
2574 const short* wtab = cn == 1 ? (const short*)_wtab : &BilinearTab_iC4[0][0][0];
2575 uchar* D = (uchar*)_dst;
2576 int x = 0, sstep = (int)_src.step;
2577 __m128i delta = _mm_set1_epi32(INTER_REMAP_COEF_SCALE/2);
2578 __m128i xy2ofs = _mm_set1_epi32(cn + (sstep << 16));
2579 __m128i z = _mm_setzero_si128();
2580 int CV_DECL_ALIGNED(16) iofs0[4], iofs1[4];
2584 for( ; x <= width - 8; x += 8 )
2586 __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2));
2587 __m128i xy1 = _mm_loadu_si128( (const __m128i*)(XY + x*2 + 8));
2588 __m128i v0, v1, v2, v3, a0, a1, b0, b1;
2591 xy0 = _mm_madd_epi16( xy0, xy2ofs );
2592 xy1 = _mm_madd_epi16( xy1, xy2ofs );
2593 _mm_store_si128( (__m128i*)iofs0, xy0 );
2594 _mm_store_si128( (__m128i*)iofs1, xy1 );
2596 i0 = *(ushort*)(S0 + iofs0[0]) + (*(ushort*)(S0 + iofs0[1]) << 16);
2597 i1 = *(ushort*)(S0 + iofs0[2]) + (*(ushort*)(S0 + iofs0[3]) << 16);
2598 v0 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2599 i0 = *(ushort*)(S1 + iofs0[0]) + (*(ushort*)(S1 + iofs0[1]) << 16);
2600 i1 = *(ushort*)(S1 + iofs0[2]) + (*(ushort*)(S1 + iofs0[3]) << 16);
2601 v1 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2602 v0 = _mm_unpacklo_epi8(v0, z);
2603 v1 = _mm_unpacklo_epi8(v1, z);
2605 a0 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x]*4)),
2606 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+1]*4)));
2607 a1 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+2]*4)),
2608 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+3]*4)));
2609 b0 = _mm_unpacklo_epi64(a0, a1);
2610 b1 = _mm_unpackhi_epi64(a0, a1);
2611 v0 = _mm_madd_epi16(v0, b0);
2612 v1 = _mm_madd_epi16(v1, b1);
2613 v0 = _mm_add_epi32(_mm_add_epi32(v0, v1), delta);
2615 i0 = *(ushort*)(S0 + iofs1[0]) + (*(ushort*)(S0 + iofs1[1]) << 16);
2616 i1 = *(ushort*)(S0 + iofs1[2]) + (*(ushort*)(S0 + iofs1[3]) << 16);
2617 v2 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2618 i0 = *(ushort*)(S1 + iofs1[0]) + (*(ushort*)(S1 + iofs1[1]) << 16);
2619 i1 = *(ushort*)(S1 + iofs1[2]) + (*(ushort*)(S1 + iofs1[3]) << 16);
2620 v3 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2621 v2 = _mm_unpacklo_epi8(v2, z);
2622 v3 = _mm_unpacklo_epi8(v3, z);
2624 a0 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+4]*4)),
2625 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+5]*4)));
2626 a1 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+6]*4)),
2627 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+7]*4)));
2628 b0 = _mm_unpacklo_epi64(a0, a1);
2629 b1 = _mm_unpackhi_epi64(a0, a1);
2630 v2 = _mm_madd_epi16(v2, b0);
2631 v3 = _mm_madd_epi16(v3, b1);
2632 v2 = _mm_add_epi32(_mm_add_epi32(v2, v3), delta);
2634 v0 = _mm_srai_epi32(v0, INTER_REMAP_COEF_BITS);
2635 v2 = _mm_srai_epi32(v2, INTER_REMAP_COEF_BITS);
2636 v0 = _mm_packus_epi16(_mm_packs_epi32(v0, v2), z);
2637 _mm_storel_epi64( (__m128i*)(D + x), v0 );
2642 for( ; x <= width - 5; x += 4, D += 12 )
2644 __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2));
2645 __m128i u0, v0, u1, v1;
2647 xy0 = _mm_madd_epi16( xy0, xy2ofs );
2648 _mm_store_si128( (__m128i*)iofs0, xy0 );
2649 const __m128i *w0, *w1;
2650 w0 = (const __m128i*)(wtab + FXY[x]*16);
2651 w1 = (const __m128i*)(wtab + FXY[x+1]*16);
2653 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[0])),
2654 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[0] + 3)));
2655 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[0])),
2656 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[0] + 3)));
2657 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[1])),
2658 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[1] + 3)));
2659 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[1])),
2660 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[1] + 3)));
2661 u0 = _mm_unpacklo_epi8(u0, z);
2662 v0 = _mm_unpacklo_epi8(v0, z);
2663 u1 = _mm_unpacklo_epi8(u1, z);
2664 v1 = _mm_unpacklo_epi8(v1, z);
2665 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2666 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2667 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2668 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2669 u0 = _mm_slli_si128(u0, 4);
2670 u0 = _mm_packs_epi32(u0, u1);
2671 u0 = _mm_packus_epi16(u0, u0);
2672 _mm_storel_epi64((__m128i*)D, _mm_srli_si128(u0,1));
2674 w0 = (const __m128i*)(wtab + FXY[x+2]*16);
2675 w1 = (const __m128i*)(wtab + FXY[x+3]*16);
2677 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[2])),
2678 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[2] + 3)));
2679 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[2])),
2680 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[2] + 3)));
2681 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[3])),
2682 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[3] + 3)));
2683 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[3])),
2684 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[3] + 3)));
2685 u0 = _mm_unpacklo_epi8(u0, z);
2686 v0 = _mm_unpacklo_epi8(v0, z);
2687 u1 = _mm_unpacklo_epi8(u1, z);
2688 v1 = _mm_unpacklo_epi8(v1, z);
2689 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2690 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2691 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2692 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2693 u0 = _mm_slli_si128(u0, 4);
2694 u0 = _mm_packs_epi32(u0, u1);
2695 u0 = _mm_packus_epi16(u0, u0);
2696 _mm_storel_epi64((__m128i*)(D + 6), _mm_srli_si128(u0,1));
2701 for( ; x <= width - 4; x += 4, D += 16 )
2703 __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2));
2704 __m128i u0, v0, u1, v1;
2706 xy0 = _mm_madd_epi16( xy0, xy2ofs );
2707 _mm_store_si128( (__m128i*)iofs0, xy0 );
2708 const __m128i *w0, *w1;
2709 w0 = (const __m128i*)(wtab + FXY[x]*16);
2710 w1 = (const __m128i*)(wtab + FXY[x+1]*16);
2712 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[0])),
2713 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[0] + 4)));
2714 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[0])),
2715 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[0] + 4)));
2716 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[1])),
2717 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[1] + 4)));
2718 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[1])),
2719 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[1] + 4)));
2720 u0 = _mm_unpacklo_epi8(u0, z);
2721 v0 = _mm_unpacklo_epi8(v0, z);
2722 u1 = _mm_unpacklo_epi8(u1, z);
2723 v1 = _mm_unpacklo_epi8(v1, z);
2724 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2725 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2726 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2727 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2728 u0 = _mm_packs_epi32(u0, u1);
2729 u0 = _mm_packus_epi16(u0, u0);
2730 _mm_storel_epi64((__m128i*)D, u0);
2732 w0 = (const __m128i*)(wtab + FXY[x+2]*16);
2733 w1 = (const __m128i*)(wtab + FXY[x+3]*16);
2735 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[2])),
2736 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[2] + 4)));
2737 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[2])),
2738 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[2] + 4)));
2739 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[3])),
2740 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[3] + 4)));
2741 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[3])),
2742 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[3] + 4)));
2743 u0 = _mm_unpacklo_epi8(u0, z);
2744 v0 = _mm_unpacklo_epi8(v0, z);
2745 u1 = _mm_unpacklo_epi8(u1, z);
2746 v1 = _mm_unpacklo_epi8(v1, z);
2747 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2748 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2749 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2750 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2751 u0 = _mm_packs_epi32(u0, u1);
2752 u0 = _mm_packus_epi16(u0, u0);
2753 _mm_storel_epi64((__m128i*)(D + 8), u0);
2763 typedef RemapNoVec RemapVec_8u;
2768 template<class CastOp, class VecOp, typename AT>
2769 static void remapBilinear( const Mat& _src, Mat& _dst, const Mat& _xy,
2770 const Mat& _fxy, const void* _wtab,
2771 int borderType, const Scalar& _borderValue )
2773 typedef typename CastOp::rtype T;
2774 typedef typename CastOp::type1 WT;
2775 Size ssize = _src.size(), dsize = _dst.size();
2776 int cn = _src.channels();
2777 const AT* wtab = (const AT*)_wtab;
2778 const T* S0 = (const T*)_src.data;
2779 size_t sstep = _src.step/sizeof(S0[0]);
2780 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
2781 saturate_cast<T>(_borderValue[1]),
2782 saturate_cast<T>(_borderValue[2]),
2783 saturate_cast<T>(_borderValue[3]));
2788 unsigned width1 = std::max(ssize.width-1, 0), height1 = std::max(ssize.height-1, 0);
2789 CV_Assert( cn <= 4 && ssize.area() > 0 );
2791 if( _src.type() == CV_8UC3 )
2792 width1 = std::max(ssize.width-2, 0);
2795 for( dy = 0; dy < dsize.height; dy++ )
2797 T* D = (T*)(_dst.data + _dst.step*dy);
2798 const short* XY = (const short*)(_xy.data + _xy.step*dy);
2799 const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
2801 bool prevInlier = false;
2803 for( dx = 0; dx <= dsize.width; dx++ )
2805 bool curInlier = dx < dsize.width ?
2806 (unsigned)XY[dx*2] < width1 &&
2807 (unsigned)XY[dx*2+1] < height1 : !prevInlier;
2808 if( curInlier == prevInlier )
2814 prevInlier = curInlier;
2818 int len = vecOp( _src, D, XY + dx*2, FXY + dx, wtab, X1 - dx );
2824 for( ; dx < X1; dx++, D++ )
2826 int sx = XY[dx*2], sy = XY[dx*2+1];
2827 const AT* w = wtab + FXY[dx]*4;
2828 const T* S = S0 + sy*sstep + sx;
2829 *D = castOp(WT(S[0]*w[0] + S[1]*w[1] + S[sstep]*w[2] + S[sstep+1]*w[3]));
2833 for( ; dx < X1; dx++, D += 2 )
2835 int sx = XY[dx*2], sy = XY[dx*2+1];
2836 const AT* w = wtab + FXY[dx]*4;
2837 const T* S = S0 + sy*sstep + sx*2;
2838 WT t0 = S[0]*w[0] + S[2]*w[1] + S[sstep]*w[2] + S[sstep+2]*w[3];
2839 WT t1 = S[1]*w[0] + S[3]*w[1] + S[sstep+1]*w[2] + S[sstep+3]*w[3];
2840 D[0] = castOp(t0); D[1] = castOp(t1);
2843 for( ; dx < X1; dx++, D += 3 )
2845 int sx = XY[dx*2], sy = XY[dx*2+1];
2846 const AT* w = wtab + FXY[dx]*4;
2847 const T* S = S0 + sy*sstep + sx*3;
2848 WT t0 = S[0]*w[0] + S[3]*w[1] + S[sstep]*w[2] + S[sstep+3]*w[3];
2849 WT t1 = S[1]*w[0] + S[4]*w[1] + S[sstep+1]*w[2] + S[sstep+4]*w[3];
2850 WT t2 = S[2]*w[0] + S[5]*w[1] + S[sstep+2]*w[2] + S[sstep+5]*w[3];
2851 D[0] = castOp(t0); D[1] = castOp(t1); D[2] = castOp(t2);
2854 for( ; dx < X1; dx++, D += 4 )
2856 int sx = XY[dx*2], sy = XY[dx*2+1];
2857 const AT* w = wtab + FXY[dx]*4;
2858 const T* S = S0 + sy*sstep + sx*4;
2859 WT t0 = S[0]*w[0] + S[4]*w[1] + S[sstep]*w[2] + S[sstep+4]*w[3];
2860 WT t1 = S[1]*w[0] + S[5]*w[1] + S[sstep+1]*w[2] + S[sstep+5]*w[3];
2861 D[0] = castOp(t0); D[1] = castOp(t1);
2862 t0 = S[2]*w[0] + S[6]*w[1] + S[sstep+2]*w[2] + S[sstep+6]*w[3];
2863 t1 = S[3]*w[0] + S[7]*w[1] + S[sstep+3]*w[2] + S[sstep+7]*w[3];
2864 D[2] = castOp(t0); D[3] = castOp(t1);
2869 if( borderType == BORDER_TRANSPARENT && cn != 3 )
2877 for( ; dx < X1; dx++, D++ )
2879 int sx = XY[dx*2], sy = XY[dx*2+1];
2880 if( borderType == BORDER_CONSTANT &&
2881 (sx >= ssize.width || sx+1 < 0 ||
2882 sy >= ssize.height || sy+1 < 0) )
2888 int sx0, sx1, sy0, sy1;
2890 const AT* w = wtab + FXY[dx]*4;
2891 if( borderType == BORDER_REPLICATE )
2893 sx0 = clip(sx, 0, ssize.width);
2894 sx1 = clip(sx+1, 0, ssize.width);
2895 sy0 = clip(sy, 0, ssize.height);
2896 sy1 = clip(sy+1, 0, ssize.height);
2897 v0 = S0[sy0*sstep + sx0];
2898 v1 = S0[sy0*sstep + sx1];
2899 v2 = S0[sy1*sstep + sx0];
2900 v3 = S0[sy1*sstep + sx1];
2904 sx0 = borderInterpolate(sx, ssize.width, borderType);
2905 sx1 = borderInterpolate(sx+1, ssize.width, borderType);
2906 sy0 = borderInterpolate(sy, ssize.height, borderType);
2907 sy1 = borderInterpolate(sy+1, ssize.height, borderType);
2908 v0 = sx0 >= 0 && sy0 >= 0 ? S0[sy0*sstep + sx0] : cval[0];
2909 v1 = sx1 >= 0 && sy0 >= 0 ? S0[sy0*sstep + sx1] : cval[0];
2910 v2 = sx0 >= 0 && sy1 >= 0 ? S0[sy1*sstep + sx0] : cval[0];
2911 v3 = sx1 >= 0 && sy1 >= 0 ? S0[sy1*sstep + sx1] : cval[0];
2913 D[0] = castOp(WT(v0*w[0] + v1*w[1] + v2*w[2] + v3*w[3]));
2917 for( ; dx < X1; dx++, D += cn )
2919 int sx = XY[dx*2], sy = XY[dx*2+1], k;
2920 if( borderType == BORDER_CONSTANT &&
2921 (sx >= ssize.width || sx+1 < 0 ||
2922 sy >= ssize.height || sy+1 < 0) )
2924 for( k = 0; k < cn; k++ )
2929 int sx0, sx1, sy0, sy1;
2930 const T *v0, *v1, *v2, *v3;
2931 const AT* w = wtab + FXY[dx]*4;
2932 if( borderType == BORDER_REPLICATE )
2934 sx0 = clip(sx, 0, ssize.width);
2935 sx1 = clip(sx+1, 0, ssize.width);
2936 sy0 = clip(sy, 0, ssize.height);
2937 sy1 = clip(sy+1, 0, ssize.height);
2938 v0 = S0 + sy0*sstep + sx0*cn;
2939 v1 = S0 + sy0*sstep + sx1*cn;
2940 v2 = S0 + sy1*sstep + sx0*cn;
2941 v3 = S0 + sy1*sstep + sx1*cn;
2943 else if( borderType == BORDER_TRANSPARENT &&
2944 ((unsigned)sx >= (unsigned)(ssize.width-1) ||
2945 (unsigned)sy >= (unsigned)(ssize.height-1)))
2949 sx0 = borderInterpolate(sx, ssize.width, borderType);
2950 sx1 = borderInterpolate(sx+1, ssize.width, borderType);
2951 sy0 = borderInterpolate(sy, ssize.height, borderType);
2952 sy1 = borderInterpolate(sy+1, ssize.height, borderType);
2953 v0 = sx0 >= 0 && sy0 >= 0 ? S0 + sy0*sstep + sx0*cn : &cval[0];
2954 v1 = sx1 >= 0 && sy0 >= 0 ? S0 + sy0*sstep + sx1*cn : &cval[0];
2955 v2 = sx0 >= 0 && sy1 >= 0 ? S0 + sy1*sstep + sx0*cn : &cval[0];
2956 v3 = sx1 >= 0 && sy1 >= 0 ? S0 + sy1*sstep + sx1*cn : &cval[0];
2958 for( k = 0; k < cn; k++ )
2959 D[k] = castOp(WT(v0[k]*w[0] + v1[k]*w[1] + v2[k]*w[2] + v3[k]*w[3]));
2968 template<class CastOp, typename AT, int ONE>
2969 static void remapBicubic( const Mat& _src, Mat& _dst, const Mat& _xy,
2970 const Mat& _fxy, const void* _wtab,
2971 int borderType, const Scalar& _borderValue )
2973 typedef typename CastOp::rtype T;
2974 typedef typename CastOp::type1 WT;
2975 Size ssize = _src.size(), dsize = _dst.size();
2976 int cn = _src.channels();
2977 const AT* wtab = (const AT*)_wtab;
2978 const T* S0 = (const T*)_src.data;
2979 size_t sstep = _src.step/sizeof(S0[0]);
2980 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
2981 saturate_cast<T>(_borderValue[1]),
2982 saturate_cast<T>(_borderValue[2]),
2983 saturate_cast<T>(_borderValue[3]));
2986 int borderType1 = borderType != BORDER_TRANSPARENT ? borderType : BORDER_REFLECT_101;
2988 unsigned width1 = std::max(ssize.width-3, 0), height1 = std::max(ssize.height-3, 0);
2990 if( _dst.isContinuous() && _xy.isContinuous() && _fxy.isContinuous() )
2992 dsize.width *= dsize.height;
2996 for( dy = 0; dy < dsize.height; dy++ )
2998 T* D = (T*)(_dst.data + _dst.step*dy);
2999 const short* XY = (const short*)(_xy.data + _xy.step*dy);
3000 const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
3002 for( dx = 0; dx < dsize.width; dx++, D += cn )
3004 int sx = XY[dx*2]-1, sy = XY[dx*2+1]-1;
3005 const AT* w = wtab + FXY[dx]*16;
3007 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
3009 const T* S = S0 + sy*sstep + sx*cn;
3010 for( k = 0; k < cn; k++ )
3012 WT sum = S[0]*w[0] + S[cn]*w[1] + S[cn*2]*w[2] + S[cn*3]*w[3];
3014 sum += S[0]*w[4] + S[cn]*w[5] + S[cn*2]*w[6] + S[cn*3]*w[7];
3016 sum += S[0]*w[8] + S[cn]*w[9] + S[cn*2]*w[10] + S[cn*3]*w[11];
3018 sum += S[0]*w[12] + S[cn]*w[13] + S[cn*2]*w[14] + S[cn*3]*w[15];
3026 if( borderType == BORDER_TRANSPARENT &&
3027 ((unsigned)(sx+1) >= (unsigned)ssize.width ||
3028 (unsigned)(sy+1) >= (unsigned)ssize.height) )
3031 if( borderType1 == BORDER_CONSTANT &&
3032 (sx >= ssize.width || sx+4 <= 0 ||
3033 sy >= ssize.height || sy+4 <= 0))
3035 for( k = 0; k < cn; k++ )
3040 for( i = 0; i < 4; i++ )
3042 x[i] = borderInterpolate(sx + i, ssize.width, borderType1)*cn;
3043 y[i] = borderInterpolate(sy + i, ssize.height, borderType1);
3046 for( k = 0; k < cn; k++, S0++, w -= 16 )
3048 WT cv = cval[k], sum = cv*ONE;
3049 for( i = 0; i < 4; i++, w += 4 )
3052 const T* S = S0 + yi*sstep;
3056 sum += (S[x[0]] - cv)*w[0];
3058 sum += (S[x[1]] - cv)*w[1];
3060 sum += (S[x[2]] - cv)*w[2];
3062 sum += (S[x[3]] - cv)*w[3];
3073 template<class CastOp, typename AT, int ONE>
3074 static void remapLanczos4( const Mat& _src, Mat& _dst, const Mat& _xy,
3075 const Mat& _fxy, const void* _wtab,
3076 int borderType, const Scalar& _borderValue )
3078 typedef typename CastOp::rtype T;
3079 typedef typename CastOp::type1 WT;
3080 Size ssize = _src.size(), dsize = _dst.size();
3081 int cn = _src.channels();
3082 const AT* wtab = (const AT*)_wtab;
3083 const T* S0 = (const T*)_src.data;
3084 size_t sstep = _src.step/sizeof(S0[0]);
3085 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
3086 saturate_cast<T>(_borderValue[1]),
3087 saturate_cast<T>(_borderValue[2]),
3088 saturate_cast<T>(_borderValue[3]));
3091 int borderType1 = borderType != BORDER_TRANSPARENT ? borderType : BORDER_REFLECT_101;
3093 unsigned width1 = std::max(ssize.width-7, 0), height1 = std::max(ssize.height-7, 0);
3095 if( _dst.isContinuous() && _xy.isContinuous() && _fxy.isContinuous() )
3097 dsize.width *= dsize.height;
3101 for( dy = 0; dy < dsize.height; dy++ )
3103 T* D = (T*)(_dst.data + _dst.step*dy);
3104 const short* XY = (const short*)(_xy.data + _xy.step*dy);
3105 const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
3107 for( dx = 0; dx < dsize.width; dx++, D += cn )
3109 int sx = XY[dx*2]-3, sy = XY[dx*2+1]-3;
3110 const AT* w = wtab + FXY[dx]*64;
3111 const T* S = S0 + sy*sstep + sx*cn;
3113 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
3115 for( k = 0; k < cn; k++ )
3118 for( int r = 0; r < 8; r++, S += sstep, w += 8 )
3119 sum += S[0]*w[0] + S[cn]*w[1] + S[cn*2]*w[2] + S[cn*3]*w[3] +
3120 S[cn*4]*w[4] + S[cn*5]*w[5] + S[cn*6]*w[6] + S[cn*7]*w[7];
3129 if( borderType == BORDER_TRANSPARENT &&
3130 ((unsigned)(sx+3) >= (unsigned)ssize.width ||
3131 (unsigned)(sy+3) >= (unsigned)ssize.height) )
3134 if( borderType1 == BORDER_CONSTANT &&
3135 (sx >= ssize.width || sx+8 <= 0 ||
3136 sy >= ssize.height || sy+8 <= 0))
3138 for( k = 0; k < cn; k++ )
3143 for( i = 0; i < 8; i++ )
3145 x[i] = borderInterpolate(sx + i, ssize.width, borderType1)*cn;
3146 y[i] = borderInterpolate(sy + i, ssize.height, borderType1);
3149 for( k = 0; k < cn; k++, S0++, w -= 64 )
3151 WT cv = cval[k], sum = cv*ONE;
3152 for( i = 0; i < 8; i++, w += 8 )
3155 const T* S1 = S0 + yi*sstep;
3159 sum += (S1[x[0]] - cv)*w[0];
3161 sum += (S1[x[1]] - cv)*w[1];
3163 sum += (S1[x[2]] - cv)*w[2];
3165 sum += (S1[x[3]] - cv)*w[3];
3167 sum += (S1[x[4]] - cv)*w[4];
3169 sum += (S1[x[5]] - cv)*w[5];
3171 sum += (S1[x[6]] - cv)*w[6];
3173 sum += (S1[x[7]] - cv)*w[7];
3184 typedef void (*RemapNNFunc)(const Mat& _src, Mat& _dst, const Mat& _xy,
3185 int borderType, const Scalar& _borderValue );
3187 typedef void (*RemapFunc)(const Mat& _src, Mat& _dst, const Mat& _xy,
3188 const Mat& _fxy, const void* _wtab,
3189 int borderType, const Scalar& _borderValue);
3191 class RemapInvoker :
3192 public ParallelLoopBody
3195 RemapInvoker(const Mat& _src, Mat& _dst, const Mat *_m1,
3196 const Mat *_m2, int _borderType, const Scalar &_borderValue,
3197 int _planar_input, RemapNNFunc _nnfunc, RemapFunc _ifunc, const void *_ctab) :
3198 ParallelLoopBody(), src(&_src), dst(&_dst), m1(_m1), m2(_m2),
3199 borderType(_borderType), borderValue(_borderValue),
3200 planar_input(_planar_input), nnfunc(_nnfunc), ifunc(_ifunc), ctab(_ctab)
3204 virtual void operator() (const Range& range) const
3207 const int buf_size = 1 << 14;
3208 int brows0 = std::min(128, dst->rows), map_depth = m1->depth();
3209 int bcols0 = std::min(buf_size/brows0, dst->cols);
3210 brows0 = std::min(buf_size/bcols0, dst->rows);
3212 bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
3215 Mat _bufxy(brows0, bcols0, CV_16SC2), _bufa;
3217 _bufa.create(brows0, bcols0, CV_16UC1);
3219 for( y = range.start; y < range.end; y += brows0 )
3221 for( x = 0; x < dst->cols; x += bcols0 )
3223 int brows = std::min(brows0, range.end - y);
3224 int bcols = std::min(bcols0, dst->cols - x);
3225 Mat dpart(*dst, Rect(x, y, bcols, brows));
3226 Mat bufxy(_bufxy, Rect(0, 0, bcols, brows));
3230 if( m1->type() == CV_16SC2 && !m2->data ) // the data is already in the right format
3231 bufxy = (*m1)(Rect(x, y, bcols, brows));
3232 else if( map_depth != CV_32F )
3234 for( y1 = 0; y1 < brows; y1++ )
3236 short* XY = (short*)(bufxy.data + bufxy.step*y1);
3237 const short* sXY = (const short*)(m1->data + m1->step*(y+y1)) + x*2;
3238 const ushort* sA = (const ushort*)(m2->data + m2->step*(y+y1)) + x;
3240 for( x1 = 0; x1 < bcols; x1++ )
3242 int a = sA[x1] & (INTER_TAB_SIZE2-1);
3243 XY[x1*2] = sXY[x1*2] + NNDeltaTab_i[a][0];
3244 XY[x1*2+1] = sXY[x1*2+1] + NNDeltaTab_i[a][1];
3248 else if( !planar_input )
3249 (*m1)(Rect(x, y, bcols, brows)).convertTo(bufxy, bufxy.depth());
3252 for( y1 = 0; y1 < brows; y1++ )
3254 short* XY = (short*)(bufxy.data + bufxy.step*y1);
3255 const float* sX = (const float*)(m1->data + m1->step*(y+y1)) + x;
3256 const float* sY = (const float*)(m2->data + m2->step*(y+y1)) + x;
3262 for( ; x1 <= bcols - 8; x1 += 8 )
3264 __m128 fx0 = _mm_loadu_ps(sX + x1);
3265 __m128 fx1 = _mm_loadu_ps(sX + x1 + 4);
3266 __m128 fy0 = _mm_loadu_ps(sY + x1);
3267 __m128 fy1 = _mm_loadu_ps(sY + x1 + 4);
3268 __m128i ix0 = _mm_cvtps_epi32(fx0);
3269 __m128i ix1 = _mm_cvtps_epi32(fx1);
3270 __m128i iy0 = _mm_cvtps_epi32(fy0);
3271 __m128i iy1 = _mm_cvtps_epi32(fy1);
3272 ix0 = _mm_packs_epi32(ix0, ix1);
3273 iy0 = _mm_packs_epi32(iy0, iy1);
3274 ix1 = _mm_unpacklo_epi16(ix0, iy0);
3275 iy1 = _mm_unpackhi_epi16(ix0, iy0);
3276 _mm_storeu_si128((__m128i*)(XY + x1*2), ix1);
3277 _mm_storeu_si128((__m128i*)(XY + x1*2 + 8), iy1);
3282 for( ; x1 < bcols; x1++ )
3284 XY[x1*2] = saturate_cast<short>(sX[x1]);
3285 XY[x1*2+1] = saturate_cast<short>(sY[x1]);
3289 nnfunc( *src, dpart, bufxy, borderType, borderValue );
3293 Mat bufa(_bufa, Rect(0, 0, bcols, brows));
3294 for( y1 = 0; y1 < brows; y1++ )
3296 short* XY = (short*)(bufxy.data + bufxy.step*y1);
3297 ushort* A = (ushort*)(bufa.data + bufa.step*y1);
3299 if( m1->type() == CV_16SC2 && (m2->type() == CV_16UC1 || m2->type() == CV_16SC1) )
3301 bufxy = (*m1)(Rect(x, y, bcols, brows));
3303 const ushort* sA = (const ushort*)(m2->data + m2->step*(y+y1)) + x;
3304 for( x1 = 0; x1 < bcols; x1++ )
3305 A[x1] = (ushort)(sA[x1] & (INTER_TAB_SIZE2-1));
3307 else if( planar_input )
3309 const float* sX = (const float*)(m1->data + m1->step*(y+y1)) + x;
3310 const float* sY = (const float*)(m2->data + m2->step*(y+y1)) + x;
3316 __m128 scale = _mm_set1_ps((float)INTER_TAB_SIZE);
3317 __m128i mask = _mm_set1_epi32(INTER_TAB_SIZE-1);
3318 for( ; x1 <= bcols - 8; x1 += 8 )
3320 __m128 fx0 = _mm_loadu_ps(sX + x1);
3321 __m128 fx1 = _mm_loadu_ps(sX + x1 + 4);
3322 __m128 fy0 = _mm_loadu_ps(sY + x1);
3323 __m128 fy1 = _mm_loadu_ps(sY + x1 + 4);
3324 __m128i ix0 = _mm_cvtps_epi32(_mm_mul_ps(fx0, scale));
3325 __m128i ix1 = _mm_cvtps_epi32(_mm_mul_ps(fx1, scale));
3326 __m128i iy0 = _mm_cvtps_epi32(_mm_mul_ps(fy0, scale));
3327 __m128i iy1 = _mm_cvtps_epi32(_mm_mul_ps(fy1, scale));
3328 __m128i mx0 = _mm_and_si128(ix0, mask);
3329 __m128i mx1 = _mm_and_si128(ix1, mask);
3330 __m128i my0 = _mm_and_si128(iy0, mask);
3331 __m128i my1 = _mm_and_si128(iy1, mask);
3332 mx0 = _mm_packs_epi32(mx0, mx1);
3333 my0 = _mm_packs_epi32(my0, my1);
3334 my0 = _mm_slli_epi16(my0, INTER_BITS);
3335 mx0 = _mm_or_si128(mx0, my0);
3336 _mm_storeu_si128((__m128i*)(A + x1), mx0);
3337 ix0 = _mm_srai_epi32(ix0, INTER_BITS);
3338 ix1 = _mm_srai_epi32(ix1, INTER_BITS);
3339 iy0 = _mm_srai_epi32(iy0, INTER_BITS);
3340 iy1 = _mm_srai_epi32(iy1, INTER_BITS);
3341 ix0 = _mm_packs_epi32(ix0, ix1);
3342 iy0 = _mm_packs_epi32(iy0, iy1);
3343 ix1 = _mm_unpacklo_epi16(ix0, iy0);
3344 iy1 = _mm_unpackhi_epi16(ix0, iy0);
3345 _mm_storeu_si128((__m128i*)(XY + x1*2), ix1);
3346 _mm_storeu_si128((__m128i*)(XY + x1*2 + 8), iy1);
3351 for( ; x1 < bcols; x1++ )
3353 int sx = cvRound(sX[x1]*INTER_TAB_SIZE);
3354 int sy = cvRound(sY[x1]*INTER_TAB_SIZE);
3355 int v = (sy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (sx & (INTER_TAB_SIZE-1));
3356 XY[x1*2] = saturate_cast<short>(sx >> INTER_BITS);
3357 XY[x1*2+1] = saturate_cast<short>(sy >> INTER_BITS);
3363 const float* sXY = (const float*)(m1->data + m1->step*(y+y1)) + x*2;
3365 for( x1 = 0; x1 < bcols; x1++ )
3367 int sx = cvRound(sXY[x1*2]*INTER_TAB_SIZE);
3368 int sy = cvRound(sXY[x1*2+1]*INTER_TAB_SIZE);
3369 int v = (sy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (sx & (INTER_TAB_SIZE-1));
3370 XY[x1*2] = saturate_cast<short>(sx >> INTER_BITS);
3371 XY[x1*2+1] = saturate_cast<short>(sy >> INTER_BITS);
3376 ifunc(*src, dpart, bufxy, bufa, ctab, borderType, borderValue);
3393 static bool ocl_remap(InputArray _src, OutputArray _dst, InputArray _map1, InputArray _map2,
3394 int interpolation, int borderType, const Scalar& borderValue)
3396 int cn = _src.channels(), type = _src.type(), depth = _src.depth();
3398 if (borderType == BORDER_TRANSPARENT || cn == 3 || !(interpolation == INTER_LINEAR || interpolation == INTER_NEAREST)
3399 || _map1.type() == CV_16SC1 || _map2.type() == CV_16SC1)
3402 UMat src = _src.getUMat(), map1 = _map1.getUMat(), map2 = _map2.getUMat();
3404 if( (map1.type() == CV_16SC2 && (map2.type() == CV_16UC1 || map2.empty())) ||
3405 (map2.type() == CV_16SC2 && (map1.type() == CV_16UC1 || map1.empty())) )
3407 if (map1.type() != CV_16SC2)
3408 std::swap(map1, map2);
3411 CV_Assert( map1.type() == CV_32FC2 || (map1.type() == CV_32FC1 && map2.type() == CV_32FC1) );
3413 _dst.create(map1.size(), type);
3414 UMat dst = _dst.getUMat();
3416 String kernelName = "remap";
3417 if (map1.type() == CV_32FC2 && map2.empty())
3418 kernelName += "_32FC2";
3419 else if (map1.type() == CV_16SC2)
3421 kernelName += "_16SC2";
3423 kernelName += "_16UC1";
3425 else if (map1.type() == CV_32FC1 && map2.type() == CV_32FC1)
3426 kernelName += "_2_32FC1";
3428 CV_Error(Error::StsBadArg, "Unsupported map types");
3430 static const char * const interMap[] = { "INTER_NEAREST", "INTER_LINEAR", "INTER_CUBIC", "INTER_LINEAR", "INTER_LANCZOS" };
3431 static const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP",
3432 "BORDER_REFLECT_101", "BORDER_TRANSPARENT" };
3433 String buildOptions = format("-D %s -D %s -D T=%s", interMap[interpolation], borderMap[borderType], ocl::typeToStr(type));
3435 if (interpolation != INTER_NEAREST)
3438 int wdepth = std::max(CV_32F, dst.depth());
3439 buildOptions = buildOptions
3440 + format(" -D WT=%s -D convertToT=%s -D convertToWT=%s"
3441 " -D convertToWT2=%s -D WT2=%s",
3442 ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
3443 ocl::convertTypeStr(wdepth, depth, cn, cvt[0]),
3444 ocl::convertTypeStr(depth, wdepth, cn, cvt[1]),
3445 ocl::convertTypeStr(CV_32S, wdepth, 2, cvt[2]),
3446 ocl::typeToStr(CV_MAKE_TYPE(wdepth, 2)));
3449 ocl::Kernel k(kernelName.c_str(), ocl::imgproc::remap_oclsrc, buildOptions);
3451 Mat scalar(1, 1, type, borderValue);
3452 ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src), dstarg = ocl::KernelArg::WriteOnly(dst),
3453 map1arg = ocl::KernelArg::ReadOnlyNoSize(map1),
3454 scalararg = ocl::KernelArg::Constant((void*)scalar.data, scalar.elemSize());
3457 k.args(srcarg, dstarg, map1arg, scalararg);
3459 k.args(srcarg, dstarg, map1arg, ocl::KernelArg::ReadOnlyNoSize(map2), scalararg);
3461 size_t globalThreads[2] = { dst.cols, dst.rows };
3462 return k.run(2, globalThreads, NULL, false);
3467 void cv::remap( InputArray _src, OutputArray _dst,
3468 InputArray _map1, InputArray _map2,
3469 int interpolation, int borderType, const Scalar& borderValue )
3471 static RemapNNFunc nn_tab[] =
3473 remapNearest<uchar>, remapNearest<schar>, remapNearest<ushort>, remapNearest<short>,
3474 remapNearest<int>, remapNearest<float>, remapNearest<double>, 0
3477 static RemapFunc linear_tab[] =
3479 remapBilinear<FixedPtCast<int, uchar, INTER_REMAP_COEF_BITS>, RemapVec_8u, short>, 0,
3480 remapBilinear<Cast<float, ushort>, RemapNoVec, float>,
3481 remapBilinear<Cast<float, short>, RemapNoVec, float>, 0,
3482 remapBilinear<Cast<float, float>, RemapNoVec, float>,
3483 remapBilinear<Cast<double, double>, RemapNoVec, float>, 0
3486 static RemapFunc cubic_tab[] =
3488 remapBicubic<FixedPtCast<int, uchar, INTER_REMAP_COEF_BITS>, short, INTER_REMAP_COEF_SCALE>, 0,
3489 remapBicubic<Cast<float, ushort>, float, 1>,
3490 remapBicubic<Cast<float, short>, float, 1>, 0,
3491 remapBicubic<Cast<float, float>, float, 1>,
3492 remapBicubic<Cast<double, double>, float, 1>, 0
3495 static RemapFunc lanczos4_tab[] =
3497 remapLanczos4<FixedPtCast<int, uchar, INTER_REMAP_COEF_BITS>, short, INTER_REMAP_COEF_SCALE>, 0,
3498 remapLanczos4<Cast<float, ushort>, float, 1>,
3499 remapLanczos4<Cast<float, short>, float, 1>, 0,
3500 remapLanczos4<Cast<float, float>, float, 1>,
3501 remapLanczos4<Cast<double, double>, float, 1>, 0
3504 CV_Assert( _map1.size().area() > 0 );
3505 CV_Assert( _map2.empty() || (_map2.size() == _map1.size()));
3507 if (ocl::useOpenCL() && _dst.isUMat() && ocl_remap(_src, _dst, _map1, _map2, interpolation, borderType, borderValue))
3510 Mat src = _src.getMat(), map1 = _map1.getMat(), map2 = _map2.getMat();
3511 _dst.create( map1.size(), src.type() );
3512 Mat dst = _dst.getMat();
3513 if( dst.data == src.data )
3516 int depth = src.depth();
3517 RemapNNFunc nnfunc = 0;
3518 RemapFunc ifunc = 0;
3519 const void* ctab = 0;
3520 bool fixpt = depth == CV_8U;
3521 bool planar_input = false;
3523 if( interpolation == INTER_NEAREST )
3525 nnfunc = nn_tab[depth];
3526 CV_Assert( nnfunc != 0 );
3530 if( interpolation == INTER_AREA )
3531 interpolation = INTER_LINEAR;
3533 if( interpolation == INTER_LINEAR )
3534 ifunc = linear_tab[depth];
3535 else if( interpolation == INTER_CUBIC )
3536 ifunc = cubic_tab[depth];
3537 else if( interpolation == INTER_LANCZOS4 )
3538 ifunc = lanczos4_tab[depth];
3540 CV_Error( CV_StsBadArg, "Unknown interpolation method" );
3541 CV_Assert( ifunc != 0 );
3542 ctab = initInterTab2D( interpolation, fixpt );
3545 const Mat *m1 = &map1, *m2 = &map2;
3547 if( (map1.type() == CV_16SC2 && (map2.type() == CV_16UC1 || map2.type() == CV_16SC1 || !map2.data)) ||
3548 (map2.type() == CV_16SC2 && (map1.type() == CV_16UC1 || map1.type() == CV_16SC1 || !map1.data)) )
3550 if( map1.type() != CV_16SC2 )
3555 CV_Assert( ((map1.type() == CV_32FC2 || map1.type() == CV_16SC2) && !map2.data) ||
3556 (map1.type() == CV_32FC1 && map2.type() == CV_32FC1) );
3557 planar_input = map1.channels() == 1;
3560 RemapInvoker invoker(src, dst, m1, m2,
3561 borderType, borderValue, planar_input, nnfunc, ifunc,
3563 parallel_for_(Range(0, dst.rows), invoker, dst.total()/(double)(1<<16));
3567 void cv::convertMaps( InputArray _map1, InputArray _map2,
3568 OutputArray _dstmap1, OutputArray _dstmap2,
3569 int dstm1type, bool nninterpolate )
3571 Mat map1 = _map1.getMat(), map2 = _map2.getMat(), dstmap1, dstmap2;
3572 Size size = map1.size();
3573 const Mat *m1 = &map1, *m2 = &map2;
3574 int m1type = m1->type(), m2type = m2->type();
3576 CV_Assert( (m1type == CV_16SC2 && (nninterpolate || m2type == CV_16UC1 || m2type == CV_16SC1)) ||
3577 (m2type == CV_16SC2 && (nninterpolate || m1type == CV_16UC1 || m1type == CV_16SC1)) ||
3578 (m1type == CV_32FC1 && m2type == CV_32FC1) ||
3579 (m1type == CV_32FC2 && !m2->data) );
3581 if( m2type == CV_16SC2 )
3583 std::swap( m1, m2 );
3584 std::swap( m1type, m2type );
3587 if( dstm1type <= 0 )
3588 dstm1type = m1type == CV_16SC2 ? CV_32FC2 : CV_16SC2;
3589 CV_Assert( dstm1type == CV_16SC2 || dstm1type == CV_32FC1 || dstm1type == CV_32FC2 );
3590 _dstmap1.create( size, dstm1type );
3591 dstmap1 = _dstmap1.getMat();
3593 if( !nninterpolate && dstm1type != CV_32FC2 )
3595 _dstmap2.create( size, dstm1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 );
3596 dstmap2 = _dstmap2.getMat();
3601 if( m1type == dstm1type || (nninterpolate &&
3602 ((m1type == CV_16SC2 && dstm1type == CV_32FC2) ||
3603 (m1type == CV_32FC2 && dstm1type == CV_16SC2))) )
3605 m1->convertTo( dstmap1, dstmap1.type() );
3606 if( dstmap2.data && dstmap2.type() == m2->type() )
3607 m2->copyTo( dstmap2 );
3611 if( m1type == CV_32FC1 && dstm1type == CV_32FC2 )
3613 Mat vdata[] = { *m1, *m2 };
3614 merge( vdata, 2, dstmap1 );
3618 if( m1type == CV_32FC2 && dstm1type == CV_32FC1 )
3620 Mat mv[] = { dstmap1, dstmap2 };
3625 if( m1->isContinuous() && (!m2->data || m2->isContinuous()) &&
3626 dstmap1.isContinuous() && (!dstmap2.data || dstmap2.isContinuous()) )
3628 size.width *= size.height;
3632 const float scale = 1.f/INTER_TAB_SIZE;
3634 for( y = 0; y < size.height; y++ )
3636 const float* src1f = (const float*)(m1->data + m1->step*y);
3637 const float* src2f = (const float*)(m2->data + m2->step*y);
3638 const short* src1 = (const short*)src1f;
3639 const ushort* src2 = (const ushort*)src2f;
3641 float* dst1f = (float*)(dstmap1.data + dstmap1.step*y);
3642 float* dst2f = (float*)(dstmap2.data + dstmap2.step*y);
3643 short* dst1 = (short*)dst1f;
3644 ushort* dst2 = (ushort*)dst2f;
3646 if( m1type == CV_32FC1 && dstm1type == CV_16SC2 )
3649 for( x = 0; x < size.width; x++ )
3651 dst1[x*2] = saturate_cast<short>(src1f[x]);
3652 dst1[x*2+1] = saturate_cast<short>(src2f[x]);
3655 for( x = 0; x < size.width; x++ )
3657 int ix = saturate_cast<int>(src1f[x]*INTER_TAB_SIZE);
3658 int iy = saturate_cast<int>(src2f[x]*INTER_TAB_SIZE);
3659 dst1[x*2] = saturate_cast<short>(ix >> INTER_BITS);
3660 dst1[x*2+1] = saturate_cast<short>(iy >> INTER_BITS);
3661 dst2[x] = (ushort)((iy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (ix & (INTER_TAB_SIZE-1)));
3664 else if( m1type == CV_32FC2 && dstm1type == CV_16SC2 )
3667 for( x = 0; x < size.width; x++ )
3669 dst1[x*2] = saturate_cast<short>(src1f[x*2]);
3670 dst1[x*2+1] = saturate_cast<short>(src1f[x*2+1]);
3673 for( x = 0; x < size.width; x++ )
3675 int ix = saturate_cast<int>(src1f[x*2]*INTER_TAB_SIZE);
3676 int iy = saturate_cast<int>(src1f[x*2+1]*INTER_TAB_SIZE);
3677 dst1[x*2] = saturate_cast<short>(ix >> INTER_BITS);
3678 dst1[x*2+1] = saturate_cast<short>(iy >> INTER_BITS);
3679 dst2[x] = (ushort)((iy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (ix & (INTER_TAB_SIZE-1)));
3682 else if( m1type == CV_16SC2 && dstm1type == CV_32FC1 )
3684 for( x = 0; x < size.width; x++ )
3686 int fxy = src2 ? src2[x] & (INTER_TAB_SIZE2-1) : 0;
3687 dst1f[x] = src1[x*2] + (fxy & (INTER_TAB_SIZE-1))*scale;
3688 dst2f[x] = src1[x*2+1] + (fxy >> INTER_BITS)*scale;
3691 else if( m1type == CV_16SC2 && dstm1type == CV_32FC2 )
3693 for( x = 0; x < size.width; x++ )
3695 int fxy = src2 ? src2[x] & (INTER_TAB_SIZE2-1): 0;
3696 dst1f[x*2] = src1[x*2] + (fxy & (INTER_TAB_SIZE-1))*scale;
3697 dst1f[x*2+1] = src1[x*2+1] + (fxy >> INTER_BITS)*scale;
3701 CV_Error( CV_StsNotImplemented, "Unsupported combination of input/output matrices" );
3709 class warpAffineInvoker :
3710 public ParallelLoopBody
3713 warpAffineInvoker(const Mat &_src, Mat &_dst, int _interpolation, int _borderType,
3714 const Scalar &_borderValue, int *_adelta, int *_bdelta, double *_M) :
3715 ParallelLoopBody(), src(_src), dst(_dst), interpolation(_interpolation),
3716 borderType(_borderType), borderValue(_borderValue), adelta(_adelta), bdelta(_bdelta),
3721 virtual void operator() (const Range& range) const
3723 const int BLOCK_SZ = 64;
3724 short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
3725 const int AB_BITS = MAX(10, (int)INTER_BITS);
3726 const int AB_SCALE = 1 << AB_BITS;
3727 int round_delta = interpolation == INTER_NEAREST ? AB_SCALE/2 : AB_SCALE/INTER_TAB_SIZE/2, x, y, x1, y1;
3729 bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
3732 int bh0 = std::min(BLOCK_SZ/2, dst.rows);
3733 int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, dst.cols);
3734 bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, dst.rows);
3736 for( y = range.start; y < range.end; y += bh0 )
3738 for( x = 0; x < dst.cols; x += bw0 )
3740 int bw = std::min( bw0, dst.cols - x);
3741 int bh = std::min( bh0, range.end - y);
3743 Mat _XY(bh, bw, CV_16SC2, XY), matA;
3744 Mat dpart(dst, Rect(x, y, bw, bh));
3746 for( y1 = 0; y1 < bh; y1++ )
3748 short* xy = XY + y1*bw*2;
3749 int X0 = saturate_cast<int>((M[1]*(y + y1) + M[2])*AB_SCALE) + round_delta;
3750 int Y0 = saturate_cast<int>((M[4]*(y + y1) + M[5])*AB_SCALE) + round_delta;
3752 if( interpolation == INTER_NEAREST )
3753 for( x1 = 0; x1 < bw; x1++ )
3755 int X = (X0 + adelta[x+x1]) >> AB_BITS;
3756 int Y = (Y0 + bdelta[x+x1]) >> AB_BITS;
3757 xy[x1*2] = saturate_cast<short>(X);
3758 xy[x1*2+1] = saturate_cast<short>(Y);
3762 short* alpha = A + y1*bw;
3767 __m128i fxy_mask = _mm_set1_epi32(INTER_TAB_SIZE - 1);
3768 __m128i XX = _mm_set1_epi32(X0), YY = _mm_set1_epi32(Y0);
3769 for( ; x1 <= bw - 8; x1 += 8 )
3771 __m128i tx0, tx1, ty0, ty1;
3772 tx0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(adelta + x + x1)), XX);
3773 ty0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(bdelta + x + x1)), YY);
3774 tx1 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(adelta + x + x1 + 4)), XX);
3775 ty1 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(bdelta + x + x1 + 4)), YY);
3777 tx0 = _mm_srai_epi32(tx0, AB_BITS - INTER_BITS);
3778 ty0 = _mm_srai_epi32(ty0, AB_BITS - INTER_BITS);
3779 tx1 = _mm_srai_epi32(tx1, AB_BITS - INTER_BITS);
3780 ty1 = _mm_srai_epi32(ty1, AB_BITS - INTER_BITS);
3782 __m128i fx_ = _mm_packs_epi32(_mm_and_si128(tx0, fxy_mask),
3783 _mm_and_si128(tx1, fxy_mask));
3784 __m128i fy_ = _mm_packs_epi32(_mm_and_si128(ty0, fxy_mask),
3785 _mm_and_si128(ty1, fxy_mask));
3786 tx0 = _mm_packs_epi32(_mm_srai_epi32(tx0, INTER_BITS),
3787 _mm_srai_epi32(tx1, INTER_BITS));
3788 ty0 = _mm_packs_epi32(_mm_srai_epi32(ty0, INTER_BITS),
3789 _mm_srai_epi32(ty1, INTER_BITS));
3790 fx_ = _mm_adds_epi16(fx_, _mm_slli_epi16(fy_, INTER_BITS));
3792 _mm_storeu_si128((__m128i*)(xy + x1*2), _mm_unpacklo_epi16(tx0, ty0));
3793 _mm_storeu_si128((__m128i*)(xy + x1*2 + 8), _mm_unpackhi_epi16(tx0, ty0));
3794 _mm_storeu_si128((__m128i*)(alpha + x1), fx_);
3798 for( ; x1 < bw; x1++ )
3800 int X = (X0 + adelta[x+x1]) >> (AB_BITS - INTER_BITS);
3801 int Y = (Y0 + bdelta[x+x1]) >> (AB_BITS - INTER_BITS);
3802 xy[x1*2] = saturate_cast<short>(X >> INTER_BITS);
3803 xy[x1*2+1] = saturate_cast<short>(Y >> INTER_BITS);
3804 alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
3805 (X & (INTER_TAB_SIZE-1)));
3810 if( interpolation == INTER_NEAREST )
3811 remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
3814 Mat _matA(bh, bw, CV_16U, A);
3815 remap( src, dpart, _XY, _matA, interpolation, borderType, borderValue );
3824 int interpolation, borderType;
3826 int *adelta, *bdelta;
3830 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
3831 class IPPwarpAffineInvoker :
3832 public ParallelLoopBody
3835 IPPwarpAffineInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[2][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpAffineBackFunc _func, bool *_ok) :
3836 ParallelLoopBody(), src(_src), dst(_dst), mode(_interpolation), coeffs(_coeffs), borderType(_borderType), borderValue(_borderValue), func(_func), ok(_ok)
3841 virtual void operator() (const Range& range) const
3843 IppiSize srcsize = { src.cols, src.rows };
3844 IppiRect srcroi = { 0, 0, src.cols, src.rows };
3845 IppiRect dstroi = { 0, range.start, dst.cols, range.end - range.start };
3846 int cnn = src.channels();
3847 if( borderType == BORDER_CONSTANT )
3849 IppiSize setSize = { dst.cols, range.end - range.start };
3850 void *dataPointer = dst.data + dst.step[0] * range.start;
3851 if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) )
3857 if( func( src.data, srcsize, (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, coeffs, mode ) < 0) ////Aug 2013: problem in IPP 7.1, 8.0 : sometimes function return ippStsCoeffErr
3863 double (&coeffs)[2][3];
3867 ippiWarpAffineBackFunc func;
3869 const IPPwarpAffineInvoker& operator= (const IPPwarpAffineInvoker&);
3873 enum { OCL_OP_PERSPECTIVE = 1, OCL_OP_AFFINE = 0 };
3875 static bool ocl_warpTransform(InputArray _src, OutputArray _dst, InputArray _M0,
3876 Size dsize, int flags, int borderType, const Scalar& borderValue,
3879 CV_Assert(op_type == OCL_OP_AFFINE || op_type == OCL_OP_PERSPECTIVE);
3881 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), wdepth = depth;
3882 double doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
3884 int interpolation = flags & INTER_MAX;
3885 if( interpolation == INTER_AREA )
3886 interpolation = INTER_LINEAR;
3888 if ( !(borderType == cv::BORDER_CONSTANT &&
3889 (interpolation == cv::INTER_NEAREST || interpolation == cv::INTER_LINEAR || interpolation == cv::INTER_CUBIC)) ||
3890 (!doubleSupport && depth == CV_64F) || cn > 4 || cn == 3)
3893 const char * const interpolationMap[3] = { "NEAREST", "LINEAR", "CUBIC" };
3894 ocl::ProgramSource2 program = op_type == OCL_OP_AFFINE ?
3895 ocl::imgproc::warp_affine_oclsrc : ocl::imgproc::warp_perspective_oclsrc;
3896 const char * const kernelName = op_type == OCL_OP_AFFINE ? "warpAffine" : "warpPerspective";
3899 if (interpolation == INTER_NEAREST)
3901 k.create(kernelName, program,
3902 format("-D INTER_NEAREST -D T=%s%s", ocl::typeToStr(type),
3903 doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
3908 wdepth = std::max(CV_32S, depth);
3909 k.create(kernelName, program,
3910 format("-D INTER_%s -D T=%s -D WT=%s -D depth=%d -D convertToWT=%s -D convertToT=%s%s",
3911 interpolationMap[interpolation], ocl::typeToStr(type),
3912 ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)), depth,
3913 ocl::convertTypeStr(depth, wdepth, cn, cvt[0]),
3914 ocl::convertTypeStr(wdepth, depth, cn, cvt[1]),
3915 doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
3920 UMat src = _src.getUMat(), M0;
3921 _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() );
3922 UMat dst = _dst.getUMat();
3925 int matRows = (op_type == OCL_OP_AFFINE ? 2 : 3);
3926 Mat matM(matRows, 3, CV_64F, M), M1 = _M0.getMat();
3927 CV_Assert( (M1.type() == CV_32F || M1.type() == CV_64F) &&
3928 M1.rows == matRows && M1.cols == 3 );
3929 M1.convertTo(matM, matM.type());
3931 if( !(flags & WARP_INVERSE_MAP) )
3933 if (op_type == OCL_OP_PERSPECTIVE)
3937 double D = M[0]*M[4] - M[1]*M[3];
3938 D = D != 0 ? 1./D : 0;
3939 double A11 = M[4]*D, A22=M[0]*D;
3940 M[0] = A11; M[1] *= -D;
3941 M[3] *= -D; M[4] = A22;
3942 double b1 = -M[0]*M[2] - M[1]*M[5];
3943 double b2 = -M[3]*M[2] - M[4]*M[5];
3944 M[2] = b1; M[5] = b2;
3947 matM.convertTo(M0, doubleSupport ? CV_64F : CV_32F);
3949 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst), ocl::KernelArg::PtrReadOnly(M0),
3950 ocl::KernelArg::Constant(Mat(1, 1, CV_MAKE_TYPE(wdepth, cn), borderValue)));
3952 size_t globalThreads[2] = { dst.cols, dst.rows };
3953 return k.run(2, globalThreads, NULL, false);
3959 void cv::warpAffine( InputArray _src, OutputArray _dst,
3960 InputArray _M0, Size dsize,
3961 int flags, int borderType, const Scalar& borderValue )
3963 if (ocl::useOpenCL() && _dst.isUMat() &&
3964 ocl_warpTransform(_src, _dst, _M0, dsize, flags, borderType,
3965 borderValue, OCL_OP_AFFINE))
3968 Mat src = _src.getMat(), M0 = _M0.getMat();
3969 _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() );
3970 Mat dst = _dst.getMat();
3971 CV_Assert( src.cols > 0 && src.rows > 0 );
3972 if( dst.data == src.data )
3976 Mat matM(2, 3, CV_64F, M);
3977 int interpolation = flags & INTER_MAX;
3978 if( interpolation == INTER_AREA )
3979 interpolation = INTER_LINEAR;
3981 CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 2 && M0.cols == 3 );
3982 M0.convertTo(matM, matM.type());
3984 #ifdef HAVE_TEGRA_OPTIMIZATION
3985 if( tegra::warpAffine(src, dst, M, flags, borderType, borderValue) )
3989 if( !(flags & WARP_INVERSE_MAP) )
3991 double D = M[0]*M[4] - M[1]*M[3];
3992 D = D != 0 ? 1./D : 0;
3993 double A11 = M[4]*D, A22=M[0]*D;
3994 M[0] = A11; M[1] *= -D;
3995 M[3] *= -D; M[4] = A22;
3996 double b1 = -M[0]*M[2] - M[1]*M[5];
3997 double b2 = -M[3]*M[2] - M[4]*M[5];
3998 M[2] = b1; M[5] = b2;
4002 AutoBuffer<int> _abdelta(dst.cols*2);
4003 int* adelta = &_abdelta[0], *bdelta = adelta + dst.cols;
4004 const int AB_BITS = MAX(10, (int)INTER_BITS);
4005 const int AB_SCALE = 1 << AB_BITS;
4007 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
4008 int depth = src.depth();
4009 int channels = src.channels();
4010 if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) &&
4011 ( channels == 1 || channels == 3 || channels == 4 ) &&
4012 ( borderType == cv::BORDER_TRANSPARENT || ( borderType == cv::BORDER_CONSTANT ) ) )
4014 int type = src.type();
4015 ippiWarpAffineBackFunc ippFunc =
4016 type == CV_8UC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C1R :
4017 type == CV_8UC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C3R :
4018 type == CV_8UC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C4R :
4019 type == CV_16UC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C1R :
4020 type == CV_16UC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C3R :
4021 type == CV_16UC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C4R :
4022 type == CV_32FC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C1R :
4023 type == CV_32FC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C3R :
4024 type == CV_32FC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C4R :
4027 flags == INTER_LINEAR ? IPPI_INTER_LINEAR :
4028 flags == INTER_NEAREST ? IPPI_INTER_NN :
4029 flags == INTER_CUBIC ? IPPI_INTER_CUBIC :
4031 if( mode && ippFunc )
4033 double coeffs[2][3];
4034 for( int i = 0; i < 2; i++ )
4036 for( int j = 0; j < 3; j++ )
4038 coeffs[i][j] = matM.at<double>(i, j);
4042 Range range(0, dst.rows);
4043 IPPwarpAffineInvoker invoker(src, dst, coeffs, mode, borderType, borderValue, ippFunc, &ok);
4044 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4051 for( x = 0; x < dst.cols; x++ )
4053 adelta[x] = saturate_cast<int>(M[0]*x*AB_SCALE);
4054 bdelta[x] = saturate_cast<int>(M[3]*x*AB_SCALE);
4057 Range range(0, dst.rows);
4058 warpAffineInvoker invoker(src, dst, interpolation, borderType,
4059 borderValue, adelta, bdelta, M);
4060 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4067 class warpPerspectiveInvoker :
4068 public ParallelLoopBody
4072 warpPerspectiveInvoker(const Mat &_src, Mat &_dst, double *_M, int _interpolation,
4073 int _borderType, const Scalar &_borderValue) :
4074 ParallelLoopBody(), src(_src), dst(_dst), M(_M), interpolation(_interpolation),
4075 borderType(_borderType), borderValue(_borderValue)
4079 virtual void operator() (const Range& range) const
4081 const int BLOCK_SZ = 32;
4082 short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
4083 int x, y, x1, y1, width = dst.cols, height = dst.rows;
4085 int bh0 = std::min(BLOCK_SZ/2, height);
4086 int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width);
4087 bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height);
4089 for( y = range.start; y < range.end; y += bh0 )
4091 for( x = 0; x < width; x += bw0 )
4093 int bw = std::min( bw0, width - x);
4094 int bh = std::min( bh0, range.end - y); // height
4096 Mat _XY(bh, bw, CV_16SC2, XY), matA;
4097 Mat dpart(dst, Rect(x, y, bw, bh));
4099 for( y1 = 0; y1 < bh; y1++ )
4101 short* xy = XY + y1*bw*2;
4102 double X0 = M[0]*x + M[1]*(y + y1) + M[2];
4103 double Y0 = M[3]*x + M[4]*(y + y1) + M[5];
4104 double W0 = M[6]*x + M[7]*(y + y1) + M[8];
4106 if( interpolation == INTER_NEAREST )
4107 for( x1 = 0; x1 < bw; x1++ )
4109 double W = W0 + M[6]*x1;
4111 double fX = std::max((double)INT_MIN, std::min((double)INT_MAX, (X0 + M[0]*x1)*W));
4112 double fY = std::max((double)INT_MIN, std::min((double)INT_MAX, (Y0 + M[3]*x1)*W));
4113 int X = saturate_cast<int>(fX);
4114 int Y = saturate_cast<int>(fY);
4116 xy[x1*2] = saturate_cast<short>(X);
4117 xy[x1*2+1] = saturate_cast<short>(Y);
4121 short* alpha = A + y1*bw;
4122 for( x1 = 0; x1 < bw; x1++ )
4124 double W = W0 + M[6]*x1;
4125 W = W ? INTER_TAB_SIZE/W : 0;
4126 double fX = std::max((double)INT_MIN, std::min((double)INT_MAX, (X0 + M[0]*x1)*W));
4127 double fY = std::max((double)INT_MIN, std::min((double)INT_MAX, (Y0 + M[3]*x1)*W));
4128 int X = saturate_cast<int>(fX);
4129 int Y = saturate_cast<int>(fY);
4131 xy[x1*2] = saturate_cast<short>(X >> INTER_BITS);
4132 xy[x1*2+1] = saturate_cast<short>(Y >> INTER_BITS);
4133 alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
4134 (X & (INTER_TAB_SIZE-1)));
4139 if( interpolation == INTER_NEAREST )
4140 remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
4143 Mat _matA(bh, bw, CV_16U, A);
4144 remap( src, dpart, _XY, _matA, interpolation, borderType, borderValue );
4154 int interpolation, borderType;
4158 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
4159 class IPPwarpPerspectiveInvoker :
4160 public ParallelLoopBody
4163 IPPwarpPerspectiveInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[3][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpPerspectiveBackFunc _func, bool *_ok) :
4164 ParallelLoopBody(), src(_src), dst(_dst), mode(_interpolation), coeffs(_coeffs), borderType(_borderType), borderValue(_borderValue), func(_func), ok(_ok)
4169 virtual void operator() (const Range& range) const
4171 IppiSize srcsize = {src.cols, src.rows};
4172 IppiRect srcroi = {0, 0, src.cols, src.rows};
4173 IppiRect dstroi = {0, range.start, dst.cols, range.end - range.start};
4174 int cnn = src.channels();
4176 if( borderType == BORDER_CONSTANT )
4178 IppiSize setSize = {dst.cols, range.end - range.start};
4179 void *dataPointer = dst.data + dst.step[0] * range.start;
4180 if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) )
4186 if( func(src.data, srcsize, (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, coeffs, mode) < 0)
4192 double (&coeffs)[3][3];
4195 const Scalar borderValue;
4196 ippiWarpPerspectiveBackFunc func;
4198 const IPPwarpPerspectiveInvoker& operator= (const IPPwarpPerspectiveInvoker&);
4204 void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0,
4205 Size dsize, int flags, int borderType, const Scalar& borderValue )
4207 CV_Assert( _src.total() > 0 );
4209 if (ocl::useOpenCL() && _dst.isUMat() &&
4210 ocl_warpTransform(_src, _dst, _M0, dsize, flags, borderType, borderValue,
4211 OCL_OP_PERSPECTIVE))
4214 Mat src = _src.getMat(), M0 = _M0.getMat();
4215 _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() );
4216 Mat dst = _dst.getMat();
4218 if( dst.data == src.data )
4222 Mat matM(3, 3, CV_64F, M);
4223 int interpolation = flags & INTER_MAX;
4224 if( interpolation == INTER_AREA )
4225 interpolation = INTER_LINEAR;
4227 CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
4228 M0.convertTo(matM, matM.type());
4230 #ifdef HAVE_TEGRA_OPTIMIZATION
4231 if( tegra::warpPerspective(src, dst, M, flags, borderType, borderValue) )
4235 if( !(flags & WARP_INVERSE_MAP) )
4238 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
4239 int depth = src.depth();
4240 int channels = src.channels();
4241 if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) &&
4242 ( channels == 1 || channels == 3 || channels == 4 ) &&
4243 ( borderType == cv::BORDER_TRANSPARENT || borderType == cv::BORDER_CONSTANT ) )
4245 int type = src.type();
4246 ippiWarpPerspectiveBackFunc ippFunc =
4247 type == CV_8UC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C1R :
4248 type == CV_8UC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C3R :
4249 type == CV_8UC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C4R :
4250 type == CV_16UC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C1R :
4251 type == CV_16UC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C3R :
4252 type == CV_16UC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C4R :
4253 type == CV_32FC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C1R :
4254 type == CV_32FC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C3R :
4255 type == CV_32FC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C4R :
4258 flags == INTER_LINEAR ? IPPI_INTER_LINEAR :
4259 flags == INTER_NEAREST ? IPPI_INTER_NN :
4260 flags == INTER_CUBIC ? IPPI_INTER_CUBIC :
4262 if( mode && ippFunc )
4264 double coeffs[3][3];
4265 for( int i = 0; i < 3; i++ )
4267 for( int j = 0; j < 3; j++ )
4269 coeffs[i][j] = matM.at<double>(i, j);
4273 Range range(0, dst.rows);
4274 IPPwarpPerspectiveInvoker invoker(src, dst, coeffs, mode, borderType, borderValue, ippFunc, &ok);
4275 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4282 Range range(0, dst.rows);
4283 warpPerspectiveInvoker invoker(src, dst, M, interpolation, borderType, borderValue);
4284 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4288 cv::Mat cv::getRotationMatrix2D( Point2f center, double angle, double scale )
4291 double alpha = cos(angle)*scale;
4292 double beta = sin(angle)*scale;
4294 Mat M(2, 3, CV_64F);
4295 double* m = (double*)M.data;
4299 m[2] = (1-alpha)*center.x - beta*center.y;
4302 m[5] = beta*center.x + (1-alpha)*center.y;
4307 /* Calculates coefficients of perspective transformation
4308 * which maps (xi,yi) to (ui,vi), (i=1,2,3,4):
4310 * c00*xi + c01*yi + c02
4311 * ui = ---------------------
4312 * c20*xi + c21*yi + c22
4314 * c10*xi + c11*yi + c12
4315 * vi = ---------------------
4316 * c20*xi + c21*yi + c22
4318 * Coefficients are calculated by solving linear system:
4319 * / x0 y0 1 0 0 0 -x0*u0 -y0*u0 \ /c00\ /u0\
4320 * | x1 y1 1 0 0 0 -x1*u1 -y1*u1 | |c01| |u1|
4321 * | x2 y2 1 0 0 0 -x2*u2 -y2*u2 | |c02| |u2|
4322 * | x3 y3 1 0 0 0 -x3*u3 -y3*u3 |.|c10|=|u3|,
4323 * | 0 0 0 x0 y0 1 -x0*v0 -y0*v0 | |c11| |v0|
4324 * | 0 0 0 x1 y1 1 -x1*v1 -y1*v1 | |c12| |v1|
4325 * | 0 0 0 x2 y2 1 -x2*v2 -y2*v2 | |c20| |v2|
4326 * \ 0 0 0 x3 y3 1 -x3*v3 -y3*v3 / \c21/ \v3/
4329 * cij - matrix coefficients, c22 = 1
4331 cv::Mat cv::getPerspectiveTransform( const Point2f src[], const Point2f dst[] )
4333 Mat M(3, 3, CV_64F), X(8, 1, CV_64F, M.data);
4334 double a[8][8], b[8];
4335 Mat A(8, 8, CV_64F, a), B(8, 1, CV_64F, b);
4337 for( int i = 0; i < 4; ++i )
4339 a[i][0] = a[i+4][3] = src[i].x;
4340 a[i][1] = a[i+4][4] = src[i].y;
4341 a[i][2] = a[i+4][5] = 1;
4342 a[i][3] = a[i][4] = a[i][5] =
4343 a[i+4][0] = a[i+4][1] = a[i+4][2] = 0;
4344 a[i][6] = -src[i].x*dst[i].x;
4345 a[i][7] = -src[i].y*dst[i].x;
4346 a[i+4][6] = -src[i].x*dst[i].y;
4347 a[i+4][7] = -src[i].y*dst[i].y;
4352 solve( A, B, X, DECOMP_SVD );
4353 ((double*)M.data)[8] = 1.;
4358 /* Calculates coefficients of affine transformation
4359 * which maps (xi,yi) to (ui,vi), (i=1,2,3):
4361 * ui = c00*xi + c01*yi + c02
4363 * vi = c10*xi + c11*yi + c12
4365 * Coefficients are calculated by solving linear system:
4366 * / x0 y0 1 0 0 0 \ /c00\ /u0\
4367 * | x1 y1 1 0 0 0 | |c01| |u1|
4368 * | x2 y2 1 0 0 0 | |c02| |u2|
4369 * | 0 0 0 x0 y0 1 | |c10| |v0|
4370 * | 0 0 0 x1 y1 1 | |c11| |v1|
4371 * \ 0 0 0 x2 y2 1 / |c12| |v2|
4374 * cij - matrix coefficients
4377 cv::Mat cv::getAffineTransform( const Point2f src[], const Point2f dst[] )
4379 Mat M(2, 3, CV_64F), X(6, 1, CV_64F, M.data);
4380 double a[6*6], b[6];
4381 Mat A(6, 6, CV_64F, a), B(6, 1, CV_64F, b);
4383 for( int i = 0; i < 3; i++ )
4387 a[j] = a[k+3] = src[i].x;
4388 a[j+1] = a[k+4] = src[i].y;
4389 a[j+2] = a[k+5] = 1;
4390 a[j+3] = a[j+4] = a[j+5] = 0;
4391 a[k] = a[k+1] = a[k+2] = 0;
4393 b[i*2+1] = dst[i].y;
4400 void cv::invertAffineTransform(InputArray _matM, OutputArray __iM)
4402 Mat matM = _matM.getMat();
4403 CV_Assert(matM.rows == 2 && matM.cols == 3);
4404 __iM.create(2, 3, matM.type());
4405 Mat _iM = __iM.getMat();
4407 if( matM.type() == CV_32F )
4409 const float* M = (const float*)matM.data;
4410 float* iM = (float*)_iM.data;
4411 int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0]));
4413 double D = M[0]*M[step+1] - M[1]*M[step];
4414 D = D != 0 ? 1./D : 0;
4415 double A11 = M[step+1]*D, A22 = M[0]*D, A12 = -M[1]*D, A21 = -M[step]*D;
4416 double b1 = -A11*M[2] - A12*M[step+2];
4417 double b2 = -A21*M[2] - A22*M[step+2];
4419 iM[0] = (float)A11; iM[1] = (float)A12; iM[2] = (float)b1;
4420 iM[istep] = (float)A21; iM[istep+1] = (float)A22; iM[istep+2] = (float)b2;
4422 else if( matM.type() == CV_64F )
4424 const double* M = (const double*)matM.data;
4425 double* iM = (double*)_iM.data;
4426 int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0]));
4428 double D = M[0]*M[step+1] - M[1]*M[step];
4429 D = D != 0 ? 1./D : 0;
4430 double A11 = M[step+1]*D, A22 = M[0]*D, A12 = -M[1]*D, A21 = -M[step]*D;
4431 double b1 = -A11*M[2] - A12*M[step+2];
4432 double b2 = -A21*M[2] - A22*M[step+2];
4434 iM[0] = A11; iM[1] = A12; iM[2] = b1;
4435 iM[istep] = A21; iM[istep+1] = A22; iM[istep+2] = b2;
4438 CV_Error( CV_StsUnsupportedFormat, "" );
4441 cv::Mat cv::getPerspectiveTransform(InputArray _src, InputArray _dst)
4443 Mat src = _src.getMat(), dst = _dst.getMat();
4444 CV_Assert(src.checkVector(2, CV_32F) == 4 && dst.checkVector(2, CV_32F) == 4);
4445 return getPerspectiveTransform((const Point2f*)src.data, (const Point2f*)dst.data);
4448 cv::Mat cv::getAffineTransform(InputArray _src, InputArray _dst)
4450 Mat src = _src.getMat(), dst = _dst.getMat();
4451 CV_Assert(src.checkVector(2, CV_32F) == 3 && dst.checkVector(2, CV_32F) == 3);
4452 return getAffineTransform((const Point2f*)src.data, (const Point2f*)dst.data);
4456 cvResize( const CvArr* srcarr, CvArr* dstarr, int method )
4458 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
4459 CV_Assert( src.type() == dst.type() );
4460 cv::resize( src, dst, dst.size(), (double)dst.cols/src.cols,
4461 (double)dst.rows/src.rows, method );
4466 cvWarpAffine( const CvArr* srcarr, CvArr* dstarr, const CvMat* marr,
4467 int flags, CvScalar fillval )
4469 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
4470 cv::Mat matrix = cv::cvarrToMat(marr);
4471 CV_Assert( src.type() == dst.type() );
4472 cv::warpAffine( src, dst, matrix, dst.size(), flags,
4473 (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT,
4478 cvWarpPerspective( const CvArr* srcarr, CvArr* dstarr, const CvMat* marr,
4479 int flags, CvScalar fillval )
4481 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
4482 cv::Mat matrix = cv::cvarrToMat(marr);
4483 CV_Assert( src.type() == dst.type() );
4484 cv::warpPerspective( src, dst, matrix, dst.size(), flags,
4485 (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT,
4490 cvRemap( const CvArr* srcarr, CvArr* dstarr,
4491 const CvArr* _mapx, const CvArr* _mapy,
4492 int flags, CvScalar fillval )
4494 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst;
4495 cv::Mat mapx = cv::cvarrToMat(_mapx), mapy = cv::cvarrToMat(_mapy);
4496 CV_Assert( src.type() == dst.type() && dst.size() == mapx.size() );
4497 cv::remap( src, dst, mapx, mapy, flags & cv::INTER_MAX,
4498 (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT,
4500 CV_Assert( dst0.data == dst.data );
4505 cv2DRotationMatrix( CvPoint2D32f center, double angle,
4506 double scale, CvMat* matrix )
4508 cv::Mat M0 = cv::cvarrToMat(matrix), M = cv::getRotationMatrix2D(center, angle, scale);
4509 CV_Assert( M.size() == M0.size() );
4510 M.convertTo(M0, M0.type());
4516 cvGetPerspectiveTransform( const CvPoint2D32f* src,
4517 const CvPoint2D32f* dst,
4520 cv::Mat M0 = cv::cvarrToMat(matrix),
4521 M = cv::getPerspectiveTransform((const cv::Point2f*)src, (const cv::Point2f*)dst);
4522 CV_Assert( M.size() == M0.size() );
4523 M.convertTo(M0, M0.type());
4529 cvGetAffineTransform( const CvPoint2D32f* src,
4530 const CvPoint2D32f* dst,
4533 cv::Mat M0 = cv::cvarrToMat(matrix),
4534 M = cv::getAffineTransform((const cv::Point2f*)src, (const cv::Point2f*)dst);
4535 CV_Assert( M.size() == M0.size() );
4536 M.convertTo(M0, M0.type());
4542 cvConvertMaps( const CvArr* arr1, const CvArr* arr2, CvArr* dstarr1, CvArr* dstarr2 )
4544 cv::Mat map1 = cv::cvarrToMat(arr1), map2;
4545 cv::Mat dstmap1 = cv::cvarrToMat(dstarr1), dstmap2;
4548 map2 = cv::cvarrToMat(arr2);
4551 dstmap2 = cv::cvarrToMat(dstarr2);
4552 if( dstmap2.type() == CV_16SC1 )
4553 dstmap2 = cv::Mat(dstmap2.size(), CV_16UC1, dstmap2.data, dstmap2.step);
4556 cv::convertMaps( map1, map2, dstmap1, dstmap2, dstmap1.type(), false );
4559 /****************************************************************************************\
4560 * Log-Polar Transform *
4561 \****************************************************************************************/
4563 /* now it is done via Remap; more correct implementation should use
4564 some super-sampling technique outside of the "fovea" circle */
4566 cvLogPolar( const CvArr* srcarr, CvArr* dstarr,
4567 CvPoint2D32f center, double M, int flags )
4569 cv::Ptr<CvMat> mapx, mapy;
4571 CvMat srcstub, *src = cvGetMat(srcarr, &srcstub);
4572 CvMat dststub, *dst = cvGetMat(dstarr, &dststub);
4573 CvSize ssize, dsize;
4575 if( !CV_ARE_TYPES_EQ( src, dst ))
4576 CV_Error( CV_StsUnmatchedFormats, "" );
4579 CV_Error( CV_StsOutOfRange, "M should be >0" );
4581 ssize = cvGetMatSize(src);
4582 dsize = cvGetMatSize(dst);
4584 mapx.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4585 mapy.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4587 if( !(flags & CV_WARP_INVERSE_MAP) )
4590 cv::AutoBuffer<double> _exp_tab(dsize.width);
4591 double* exp_tab = _exp_tab;
4593 for( rho = 0; rho < dst->width; rho++ )
4594 exp_tab[rho] = std::exp(rho/M);
4596 for( phi = 0; phi < dsize.height; phi++ )
4598 double cp = cos(phi*2*CV_PI/dsize.height);
4599 double sp = sin(phi*2*CV_PI/dsize.height);
4600 float* mx = (float*)(mapx->data.ptr + phi*mapx->step);
4601 float* my = (float*)(mapy->data.ptr + phi*mapy->step);
4603 for( rho = 0; rho < dsize.width; rho++ )
4605 double r = exp_tab[rho];
4606 double x = r*cp + center.x;
4607 double y = r*sp + center.y;
4617 CvMat bufx, bufy, bufp, bufa;
4618 double ascale = ssize.height/(2*CV_PI);
4619 cv::AutoBuffer<float> _buf(4*dsize.width);
4622 bufx = cvMat( 1, dsize.width, CV_32F, buf );
4623 bufy = cvMat( 1, dsize.width, CV_32F, buf + dsize.width );
4624 bufp = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*2 );
4625 bufa = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*3 );
4627 for( x = 0; x < dsize.width; x++ )
4628 bufx.data.fl[x] = (float)x - center.x;
4630 for( y = 0; y < dsize.height; y++ )
4632 float* mx = (float*)(mapx->data.ptr + y*mapx->step);
4633 float* my = (float*)(mapy->data.ptr + y*mapy->step);
4635 for( x = 0; x < dsize.width; x++ )
4636 bufy.data.fl[x] = (float)y - center.y;
4639 cvCartToPolar( &bufx, &bufy, &bufp, &bufa );
4641 for( x = 0; x < dsize.width; x++ )
4642 bufp.data.fl[x] += 1.f;
4644 cvLog( &bufp, &bufp );
4646 for( x = 0; x < dsize.width; x++ )
4648 double rho = bufp.data.fl[x]*M;
4649 double phi = bufa.data.fl[x]*ascale;
4655 for( x = 0; x < dsize.width; x++ )
4657 double xx = bufx.data.fl[x];
4658 double yy = bufy.data.fl[x];
4660 double p = log(std::sqrt(xx*xx + yy*yy) + 1.)*M;
4661 double a = atan2(yy,xx);
4673 cvRemap( src, dst, mapx, mapy, flags, cvScalarAll(0) );
4676 void cv::logPolar( InputArray _src, OutputArray _dst,
4677 Point2f center, double M, int flags )
4679 Mat src = _src.getMat();
4680 _dst.create( src.size(), src.type() );
4681 CvMat c_src = src, c_dst = _dst.getMat();
4682 cvLogPolar( &c_src, &c_dst, center, M, flags );
4685 /****************************************************************************************
4686 Linear-Polar Transform
4687 J.L. Blanco, Apr 2009
4688 ****************************************************************************************/
4690 void cvLinearPolar( const CvArr* srcarr, CvArr* dstarr,
4691 CvPoint2D32f center, double maxRadius, int flags )
4693 cv::Ptr<CvMat> mapx, mapy;
4695 CvMat srcstub, *src = (CvMat*)srcarr;
4696 CvMat dststub, *dst = (CvMat*)dstarr;
4697 CvSize ssize, dsize;
4699 src = cvGetMat( srcarr, &srcstub,0,0 );
4700 dst = cvGetMat( dstarr, &dststub,0,0 );
4702 if( !CV_ARE_TYPES_EQ( src, dst ))
4703 CV_Error( CV_StsUnmatchedFormats, "" );
4705 ssize.width = src->cols;
4706 ssize.height = src->rows;
4707 dsize.width = dst->cols;
4708 dsize.height = dst->rows;
4710 mapx.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4711 mapy.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4713 if( !(flags & CV_WARP_INVERSE_MAP) )
4717 for( phi = 0; phi < dsize.height; phi++ )
4719 double cp = cos(phi*2*CV_PI/dsize.height);
4720 double sp = sin(phi*2*CV_PI/dsize.height);
4721 float* mx = (float*)(mapx->data.ptr + phi*mapx->step);
4722 float* my = (float*)(mapy->data.ptr + phi*mapy->step);
4724 for( rho = 0; rho < dsize.width; rho++ )
4726 double r = maxRadius*(rho+1)/dsize.width;
4727 double x = r*cp + center.x;
4728 double y = r*sp + center.y;
4738 CvMat bufx, bufy, bufp, bufa;
4739 const double ascale = ssize.height/(2*CV_PI);
4740 const double pscale = ssize.width/maxRadius;
4742 cv::AutoBuffer<float> _buf(4*dsize.width);
4745 bufx = cvMat( 1, dsize.width, CV_32F, buf );
4746 bufy = cvMat( 1, dsize.width, CV_32F, buf + dsize.width );
4747 bufp = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*2 );
4748 bufa = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*3 );
4750 for( x = 0; x < dsize.width; x++ )
4751 bufx.data.fl[x] = (float)x - center.x;
4753 for( y = 0; y < dsize.height; y++ )
4755 float* mx = (float*)(mapx->data.ptr + y*mapx->step);
4756 float* my = (float*)(mapy->data.ptr + y*mapy->step);
4758 for( x = 0; x < dsize.width; x++ )
4759 bufy.data.fl[x] = (float)y - center.y;
4761 cvCartToPolar( &bufx, &bufy, &bufp, &bufa, 0 );
4763 for( x = 0; x < dsize.width; x++ )
4764 bufp.data.fl[x] += 1.f;
4766 for( x = 0; x < dsize.width; x++ )
4768 double rho = bufp.data.fl[x]*pscale;
4769 double phi = bufa.data.fl[x]*ascale;
4776 cvRemap( src, dst, mapx, mapy, flags, cvScalarAll(0) );
4779 void cv::linearPolar( InputArray _src, OutputArray _dst,
4780 Point2f center, double maxRadius, int flags )
4782 Mat src = _src.getMat();
4783 _dst.create( src.size(), src.type() );
4784 CvMat c_src = src, c_dst = _dst.getMat();
4785 cvLinearPolar( &c_src, &c_dst, center, maxRadius, flags );