1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 /* ////////////////////////////////////////////////////////////////////
45 // Geometrical transforms on images and matrices: rotation, zoom etc.
49 #include "precomp.hpp"
50 #include "opencl_kernels.hpp"
52 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
53 static IppStatus sts = ippInit();
59 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
60 typedef IppStatus (CV_STDCALL* ippiSetFunc)(const void*, void *, int, IppiSize);
61 typedef IppStatus (CV_STDCALL* ippiWarpPerspectiveBackFunc)(const void*, IppiSize, int, IppiRect, void *, int, IppiRect, double [3][3], int);
62 typedef IppStatus (CV_STDCALL* ippiWarpAffineBackFunc)(const void*, IppiSize, int, IppiRect, void *, int, IppiRect, double [2][3], int);
63 typedef IppStatus (CV_STDCALL* ippiResizeSqrPixelFunc)(const void*, IppiSize, int, IppiRect, void*, int, IppiRect, double, double, double, double, int, Ipp8u *);
65 template <int channels, typename Type>
66 bool IPPSetSimple(cv::Scalar value, void *dataPointer, int step, IppiSize &size, ippiSetFunc func)
68 Type values[channels];
69 for( int i = 0; i < channels; i++ )
70 values[i] = (Type)value[i];
71 return func(values, dataPointer, step, size) >= 0;
74 bool IPPSet(const cv::Scalar &value, void *dataPointer, int step, IppiSize &size, int channels, int depth)
81 return ippiSet_8u_C1R((Ipp8u)value[0], (Ipp8u *)dataPointer, step, size) >= 0;
83 return ippiSet_16u_C1R((Ipp16u)value[0], (Ipp16u *)dataPointer, step, size) >= 0;
85 return ippiSet_32f_C1R((Ipp32f)value[0], (Ipp32f *)dataPointer, step, size) >= 0;
95 return IPPSetSimple<3, Ipp8u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_8u_C3R);
97 return IPPSetSimple<3, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C3R);
99 return IPPSetSimple<3, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C3R);
102 else if( channels == 4 )
107 return IPPSetSimple<4, Ipp8u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_8u_C4R);
109 return IPPSetSimple<4, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C4R);
111 return IPPSetSimple<4, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C4R);
119 /************** interpolation formulas and tables ***************/
121 const int INTER_RESIZE_COEF_BITS=11;
122 const int INTER_RESIZE_COEF_SCALE=1 << INTER_RESIZE_COEF_BITS;
124 const int INTER_REMAP_COEF_BITS=15;
125 const int INTER_REMAP_COEF_SCALE=1 << INTER_REMAP_COEF_BITS;
127 static uchar NNDeltaTab_i[INTER_TAB_SIZE2][2];
129 static float BilinearTab_f[INTER_TAB_SIZE2][2][2];
130 static short BilinearTab_i[INTER_TAB_SIZE2][2][2];
133 static short BilinearTab_iC4_buf[INTER_TAB_SIZE2+2][2][8];
134 static short (*BilinearTab_iC4)[2][8] = (short (*)[2][8])alignPtr(BilinearTab_iC4_buf, 16);
137 static float BicubicTab_f[INTER_TAB_SIZE2][4][4];
138 static short BicubicTab_i[INTER_TAB_SIZE2][4][4];
140 static float Lanczos4Tab_f[INTER_TAB_SIZE2][8][8];
141 static short Lanczos4Tab_i[INTER_TAB_SIZE2][8][8];
143 static inline void interpolateLinear( float x, float* coeffs )
149 static inline void interpolateCubic( float x, float* coeffs )
151 const float A = -0.75f;
153 coeffs[0] = ((A*(x + 1) - 5*A)*(x + 1) + 8*A)*(x + 1) - 4*A;
154 coeffs[1] = ((A + 2)*x - (A + 3))*x*x + 1;
155 coeffs[2] = ((A + 2)*(1 - x) - (A + 3))*(1 - x)*(1 - x) + 1;
156 coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2];
159 static inline void interpolateLanczos4( float x, float* coeffs )
161 static const double s45 = 0.70710678118654752440084436210485;
162 static const double cs[][2]=
163 {{1, 0}, {-s45, -s45}, {0, 1}, {s45, -s45}, {-1, 0}, {s45, s45}, {0, -1}, {-s45, s45}};
165 if( x < FLT_EPSILON )
167 for( int i = 0; i < 8; i++ )
174 double y0=-(x+3)*CV_PI*0.25, s0 = sin(y0), c0=cos(y0);
175 for(int i = 0; i < 8; i++ )
177 double y = -(x+3-i)*CV_PI*0.25;
178 coeffs[i] = (float)((cs[i][0]*s0 + cs[i][1]*c0)/(y*y));
183 for(int i = 0; i < 8; i++ )
187 static void initInterTab1D(int method, float* tab, int tabsz)
189 float scale = 1.f/tabsz;
190 if( method == INTER_LINEAR )
192 for( int i = 0; i < tabsz; i++, tab += 2 )
193 interpolateLinear( i*scale, tab );
195 else if( method == INTER_CUBIC )
197 for( int i = 0; i < tabsz; i++, tab += 4 )
198 interpolateCubic( i*scale, tab );
200 else if( method == INTER_LANCZOS4 )
202 for( int i = 0; i < tabsz; i++, tab += 8 )
203 interpolateLanczos4( i*scale, tab );
206 CV_Error( CV_StsBadArg, "Unknown interpolation method" );
210 static const void* initInterTab2D( int method, bool fixpt )
212 static bool inittab[INTER_MAX+1] = {false};
216 if( method == INTER_LINEAR )
217 tab = BilinearTab_f[0][0], itab = BilinearTab_i[0][0], ksize=2;
218 else if( method == INTER_CUBIC )
219 tab = BicubicTab_f[0][0], itab = BicubicTab_i[0][0], ksize=4;
220 else if( method == INTER_LANCZOS4 )
221 tab = Lanczos4Tab_f[0][0], itab = Lanczos4Tab_i[0][0], ksize=8;
223 CV_Error( CV_StsBadArg, "Unknown/unsupported interpolation type" );
225 if( !inittab[method] )
227 AutoBuffer<float> _tab(8*INTER_TAB_SIZE);
229 initInterTab1D(method, _tab, INTER_TAB_SIZE);
230 for( i = 0; i < INTER_TAB_SIZE; i++ )
231 for( j = 0; j < INTER_TAB_SIZE; j++, tab += ksize*ksize, itab += ksize*ksize )
234 NNDeltaTab_i[i*INTER_TAB_SIZE+j][0] = j < INTER_TAB_SIZE/2;
235 NNDeltaTab_i[i*INTER_TAB_SIZE+j][1] = i < INTER_TAB_SIZE/2;
237 for( k1 = 0; k1 < ksize; k1++ )
239 float vy = _tab[i*ksize + k1];
240 for( k2 = 0; k2 < ksize; k2++ )
242 float v = vy*_tab[j*ksize + k2];
243 tab[k1*ksize + k2] = v;
244 isum += itab[k1*ksize + k2] = saturate_cast<short>(v*INTER_REMAP_COEF_SCALE);
248 if( isum != INTER_REMAP_COEF_SCALE )
250 int diff = isum - INTER_REMAP_COEF_SCALE;
251 int ksize2 = ksize/2, Mk1=ksize2, Mk2=ksize2, mk1=ksize2, mk2=ksize2;
252 for( k1 = ksize2; k1 < ksize2+2; k1++ )
253 for( k2 = ksize2; k2 < ksize2+2; k2++ )
255 if( itab[k1*ksize+k2] < itab[mk1*ksize+mk2] )
257 else if( itab[k1*ksize+k2] > itab[Mk1*ksize+Mk2] )
261 itab[Mk1*ksize + Mk2] = (short)(itab[Mk1*ksize + Mk2] - diff);
263 itab[mk1*ksize + mk2] = (short)(itab[mk1*ksize + mk2] - diff);
266 tab -= INTER_TAB_SIZE2*ksize*ksize;
267 itab -= INTER_TAB_SIZE2*ksize*ksize;
269 if( method == INTER_LINEAR )
271 for( i = 0; i < INTER_TAB_SIZE2; i++ )
272 for( j = 0; j < 4; j++ )
274 BilinearTab_iC4[i][0][j*2] = BilinearTab_i[i][0][0];
275 BilinearTab_iC4[i][0][j*2+1] = BilinearTab_i[i][0][1];
276 BilinearTab_iC4[i][1][j*2] = BilinearTab_i[i][1][0];
277 BilinearTab_iC4[i][1][j*2+1] = BilinearTab_i[i][1][1];
281 inittab[method] = true;
283 return fixpt ? (const void*)itab : (const void*)tab;
287 static bool initAllInterTab2D()
289 return initInterTab2D( INTER_LINEAR, false ) &&
290 initInterTab2D( INTER_LINEAR, true ) &&
291 initInterTab2D( INTER_CUBIC, false ) &&
292 initInterTab2D( INTER_CUBIC, true ) &&
293 initInterTab2D( INTER_LANCZOS4, false ) &&
294 initInterTab2D( INTER_LANCZOS4, true );
297 static volatile bool doInitAllInterTab2D = initAllInterTab2D();
300 template<typename ST, typename DT> struct Cast
305 DT operator()(ST val) const { return saturate_cast<DT>(val); }
308 template<typename ST, typename DT, int bits> struct FixedPtCast
312 enum { SHIFT = bits, DELTA = 1 << (bits-1) };
314 DT operator()(ST val) const { return saturate_cast<DT>((val + DELTA)>>SHIFT); }
317 /****************************************************************************************\
319 \****************************************************************************************/
321 class resizeNNInvoker :
322 public ParallelLoopBody
325 resizeNNInvoker(const Mat& _src, Mat &_dst, int *_x_ofs, int _pix_size4, double _ify) :
326 ParallelLoopBody(), src(_src), dst(_dst), x_ofs(_x_ofs), pix_size4(_pix_size4),
331 virtual void operator() (const Range& range) const
333 Size ssize = src.size(), dsize = dst.size();
334 int y, x, pix_size = (int)src.elemSize();
336 for( y = range.start; y < range.end; y++ )
338 uchar* D = dst.data + dst.step*y;
339 int sy = std::min(cvFloor(y*ify), ssize.height-1);
340 const uchar* S = src.data + src.step*sy;
345 for( x = 0; x <= dsize.width - 2; x += 2 )
347 uchar t0 = S[x_ofs[x]];
348 uchar t1 = S[x_ofs[x+1]];
353 for( ; x < dsize.width; x++ )
357 for( x = 0; x < dsize.width; x++ )
358 *(ushort*)(D + x*2) = *(ushort*)(S + x_ofs[x]);
361 for( x = 0; x < dsize.width; x++, D += 3 )
363 const uchar* _tS = S + x_ofs[x];
364 D[0] = _tS[0]; D[1] = _tS[1]; D[2] = _tS[2];
368 for( x = 0; x < dsize.width; x++ )
369 *(int*)(D + x*4) = *(int*)(S + x_ofs[x]);
372 for( x = 0; x < dsize.width; x++, D += 6 )
374 const ushort* _tS = (const ushort*)(S + x_ofs[x]);
375 ushort* _tD = (ushort*)D;
376 _tD[0] = _tS[0]; _tD[1] = _tS[1]; _tD[2] = _tS[2];
380 for( x = 0; x < dsize.width; x++, D += 8 )
382 const int* _tS = (const int*)(S + x_ofs[x]);
384 _tD[0] = _tS[0]; _tD[1] = _tS[1];
388 for( x = 0; x < dsize.width; x++, D += 12 )
390 const int* _tS = (const int*)(S + x_ofs[x]);
392 _tD[0] = _tS[0]; _tD[1] = _tS[1]; _tD[2] = _tS[2];
396 for( x = 0; x < dsize.width; x++, D += pix_size )
398 const int* _tS = (const int*)(S + x_ofs[x]);
400 for( int k = 0; k < pix_size4; k++ )
410 int* x_ofs, pix_size4;
413 resizeNNInvoker(const resizeNNInvoker&);
414 resizeNNInvoker& operator=(const resizeNNInvoker&);
418 resizeNN( const Mat& src, Mat& dst, double fx, double fy )
420 Size ssize = src.size(), dsize = dst.size();
421 AutoBuffer<int> _x_ofs(dsize.width);
423 int pix_size = (int)src.elemSize();
424 int pix_size4 = (int)(pix_size / sizeof(int));
425 double ifx = 1./fx, ify = 1./fy;
428 for( x = 0; x < dsize.width; x++ )
430 int sx = cvFloor(x*ifx);
431 x_ofs[x] = std::min(sx, ssize.width-1)*pix_size;
434 Range range(0, dsize.height);
435 resizeNNInvoker invoker(src, dst, x_ofs, pix_size4, ify);
436 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
442 int operator()(const uchar**, uchar*, const uchar*, int ) const { return 0; }
447 int operator()(const uchar**, uchar**, int, const int*,
448 const uchar*, int, int, int, int, int) const { return 0; }
453 struct VResizeLinearVec_32s8u
455 int operator()(const uchar** _src, uchar* dst, const uchar* _beta, int width ) const
457 if( !checkHardwareSupport(CV_CPU_SSE2) )
460 const int** src = (const int**)_src;
461 const short* beta = (const short*)_beta;
462 const int *S0 = src[0], *S1 = src[1];
464 __m128i b0 = _mm_set1_epi16(beta[0]), b1 = _mm_set1_epi16(beta[1]);
465 __m128i delta = _mm_set1_epi16(2);
467 if( (((size_t)S0|(size_t)S1)&15) == 0 )
468 for( ; x <= width - 16; x += 16 )
470 __m128i x0, x1, x2, y0, y1, y2;
471 x0 = _mm_load_si128((const __m128i*)(S0 + x));
472 x1 = _mm_load_si128((const __m128i*)(S0 + x + 4));
473 y0 = _mm_load_si128((const __m128i*)(S1 + x));
474 y1 = _mm_load_si128((const __m128i*)(S1 + x + 4));
475 x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4));
476 y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4));
478 x1 = _mm_load_si128((const __m128i*)(S0 + x + 8));
479 x2 = _mm_load_si128((const __m128i*)(S0 + x + 12));
480 y1 = _mm_load_si128((const __m128i*)(S1 + x + 8));
481 y2 = _mm_load_si128((const __m128i*)(S1 + x + 12));
482 x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4));
483 y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4));
485 x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 ));
486 x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 ));
488 x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
489 x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2);
490 _mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1));
493 for( ; x <= width - 16; x += 16 )
495 __m128i x0, x1, x2, y0, y1, y2;
496 x0 = _mm_loadu_si128((const __m128i*)(S0 + x));
497 x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4));
498 y0 = _mm_loadu_si128((const __m128i*)(S1 + x));
499 y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4));
500 x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4));
501 y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4));
503 x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 8));
504 x2 = _mm_loadu_si128((const __m128i*)(S0 + x + 12));
505 y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 8));
506 y2 = _mm_loadu_si128((const __m128i*)(S1 + x + 12));
507 x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4));
508 y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4));
510 x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 ));
511 x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 ));
513 x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
514 x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2);
515 _mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1));
518 for( ; x < width - 4; x += 4 )
521 x0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S0 + x)), 4);
522 y0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S1 + x)), 4);
523 x0 = _mm_packs_epi32(x0, x0);
524 y0 = _mm_packs_epi32(y0, y0);
525 x0 = _mm_adds_epi16(_mm_mulhi_epi16(x0, b0), _mm_mulhi_epi16(y0, b1));
526 x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
527 x0 = _mm_packus_epi16(x0, x0);
528 *(int*)(dst + x) = _mm_cvtsi128_si32(x0);
536 template<int shiftval> struct VResizeLinearVec_32f16
538 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
540 if( !checkHardwareSupport(CV_CPU_SSE2) )
543 const float** src = (const float**)_src;
544 const float* beta = (const float*)_beta;
545 const float *S0 = src[0], *S1 = src[1];
546 ushort* dst = (ushort*)_dst;
549 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]);
550 __m128i preshift = _mm_set1_epi32(shiftval);
551 __m128i postshift = _mm_set1_epi16((short)shiftval);
553 if( (((size_t)S0|(size_t)S1)&15) == 0 )
554 for( ; x <= width - 16; x += 16 )
556 __m128 x0, x1, y0, y1;
558 x0 = _mm_load_ps(S0 + x);
559 x1 = _mm_load_ps(S0 + x + 4);
560 y0 = _mm_load_ps(S1 + x);
561 y1 = _mm_load_ps(S1 + x + 4);
563 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
564 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
565 t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
566 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
567 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift);
569 x0 = _mm_load_ps(S0 + x + 8);
570 x1 = _mm_load_ps(S0 + x + 12);
571 y0 = _mm_load_ps(S1 + x + 8);
572 y1 = _mm_load_ps(S1 + x + 12);
574 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
575 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
576 t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
577 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
578 t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift);
580 _mm_storeu_si128( (__m128i*)(dst + x), t0);
581 _mm_storeu_si128( (__m128i*)(dst + x + 8), t1);
584 for( ; x <= width - 16; x += 16 )
586 __m128 x0, x1, y0, y1;
588 x0 = _mm_loadu_ps(S0 + x);
589 x1 = _mm_loadu_ps(S0 + x + 4);
590 y0 = _mm_loadu_ps(S1 + x);
591 y1 = _mm_loadu_ps(S1 + x + 4);
593 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
594 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
595 t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
596 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
597 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift);
599 x0 = _mm_loadu_ps(S0 + x + 8);
600 x1 = _mm_loadu_ps(S0 + x + 12);
601 y0 = _mm_loadu_ps(S1 + x + 8);
602 y1 = _mm_loadu_ps(S1 + x + 12);
604 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
605 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
606 t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
607 t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
608 t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift);
610 _mm_storeu_si128( (__m128i*)(dst + x), t0);
611 _mm_storeu_si128( (__m128i*)(dst + x + 8), t1);
614 for( ; x < width - 4; x += 4 )
618 x0 = _mm_loadu_ps(S0 + x);
619 y0 = _mm_loadu_ps(S1 + x);
621 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
622 t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
623 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t0), postshift);
624 _mm_storel_epi64( (__m128i*)(dst + x), t0);
631 typedef VResizeLinearVec_32f16<SHRT_MIN> VResizeLinearVec_32f16u;
632 typedef VResizeLinearVec_32f16<0> VResizeLinearVec_32f16s;
634 struct VResizeLinearVec_32f
636 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
638 if( !checkHardwareSupport(CV_CPU_SSE) )
641 const float** src = (const float**)_src;
642 const float* beta = (const float*)_beta;
643 const float *S0 = src[0], *S1 = src[1];
644 float* dst = (float*)_dst;
647 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]);
649 if( (((size_t)S0|(size_t)S1)&15) == 0 )
650 for( ; x <= width - 8; x += 8 )
652 __m128 x0, x1, y0, y1;
653 x0 = _mm_load_ps(S0 + x);
654 x1 = _mm_load_ps(S0 + x + 4);
655 y0 = _mm_load_ps(S1 + x);
656 y1 = _mm_load_ps(S1 + x + 4);
658 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
659 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
661 _mm_storeu_ps( dst + x, x0);
662 _mm_storeu_ps( dst + x + 4, x1);
665 for( ; x <= width - 8; x += 8 )
667 __m128 x0, x1, y0, y1;
668 x0 = _mm_loadu_ps(S0 + x);
669 x1 = _mm_loadu_ps(S0 + x + 4);
670 y0 = _mm_loadu_ps(S1 + x);
671 y1 = _mm_loadu_ps(S1 + x + 4);
673 x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
674 x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
676 _mm_storeu_ps( dst + x, x0);
677 _mm_storeu_ps( dst + x + 4, x1);
685 struct VResizeCubicVec_32s8u
687 int operator()(const uchar** _src, uchar* dst, const uchar* _beta, int width ) const
689 if( !checkHardwareSupport(CV_CPU_SSE2) )
692 const int** src = (const int**)_src;
693 const short* beta = (const short*)_beta;
694 const int *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
696 float scale = 1.f/(INTER_RESIZE_COEF_SCALE*INTER_RESIZE_COEF_SCALE);
697 __m128 b0 = _mm_set1_ps(beta[0]*scale), b1 = _mm_set1_ps(beta[1]*scale),
698 b2 = _mm_set1_ps(beta[2]*scale), b3 = _mm_set1_ps(beta[3]*scale);
700 if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&15) == 0 )
701 for( ; x <= width - 8; x += 8 )
703 __m128i x0, x1, y0, y1;
704 __m128 s0, s1, f0, f1;
705 x0 = _mm_load_si128((const __m128i*)(S0 + x));
706 x1 = _mm_load_si128((const __m128i*)(S0 + x + 4));
707 y0 = _mm_load_si128((const __m128i*)(S1 + x));
708 y1 = _mm_load_si128((const __m128i*)(S1 + x + 4));
710 s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0);
711 s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0);
712 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1);
713 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1);
714 s0 = _mm_add_ps(s0, f0);
715 s1 = _mm_add_ps(s1, f1);
717 x0 = _mm_load_si128((const __m128i*)(S2 + x));
718 x1 = _mm_load_si128((const __m128i*)(S2 + x + 4));
719 y0 = _mm_load_si128((const __m128i*)(S3 + x));
720 y1 = _mm_load_si128((const __m128i*)(S3 + x + 4));
722 f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2);
723 f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2);
724 s0 = _mm_add_ps(s0, f0);
725 s1 = _mm_add_ps(s1, f1);
726 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3);
727 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3);
728 s0 = _mm_add_ps(s0, f0);
729 s1 = _mm_add_ps(s1, f1);
731 x0 = _mm_cvtps_epi32(s0);
732 x1 = _mm_cvtps_epi32(s1);
734 x0 = _mm_packs_epi32(x0, x1);
735 _mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0));
738 for( ; x <= width - 8; x += 8 )
740 __m128i x0, x1, y0, y1;
741 __m128 s0, s1, f0, f1;
742 x0 = _mm_loadu_si128((const __m128i*)(S0 + x));
743 x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4));
744 y0 = _mm_loadu_si128((const __m128i*)(S1 + x));
745 y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4));
747 s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0);
748 s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0);
749 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1);
750 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1);
751 s0 = _mm_add_ps(s0, f0);
752 s1 = _mm_add_ps(s1, f1);
754 x0 = _mm_loadu_si128((const __m128i*)(S2 + x));
755 x1 = _mm_loadu_si128((const __m128i*)(S2 + x + 4));
756 y0 = _mm_loadu_si128((const __m128i*)(S3 + x));
757 y1 = _mm_loadu_si128((const __m128i*)(S3 + x + 4));
759 f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2);
760 f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2);
761 s0 = _mm_add_ps(s0, f0);
762 s1 = _mm_add_ps(s1, f1);
763 f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3);
764 f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3);
765 s0 = _mm_add_ps(s0, f0);
766 s1 = _mm_add_ps(s1, f1);
768 x0 = _mm_cvtps_epi32(s0);
769 x1 = _mm_cvtps_epi32(s1);
771 x0 = _mm_packs_epi32(x0, x1);
772 _mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0));
780 template<int shiftval> struct VResizeCubicVec_32f16
782 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
784 if( !checkHardwareSupport(CV_CPU_SSE2) )
787 const float** src = (const float**)_src;
788 const float* beta = (const float*)_beta;
789 const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
790 ushort* dst = (ushort*)_dst;
792 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]),
793 b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]);
794 __m128i preshift = _mm_set1_epi32(shiftval);
795 __m128i postshift = _mm_set1_epi16((short)shiftval);
797 for( ; x <= width - 8; x += 8 )
799 __m128 x0, x1, y0, y1, s0, s1;
801 x0 = _mm_loadu_ps(S0 + x);
802 x1 = _mm_loadu_ps(S0 + x + 4);
803 y0 = _mm_loadu_ps(S1 + x);
804 y1 = _mm_loadu_ps(S1 + x + 4);
806 s0 = _mm_mul_ps(x0, b0);
807 s1 = _mm_mul_ps(x1, b0);
808 y0 = _mm_mul_ps(y0, b1);
809 y1 = _mm_mul_ps(y1, b1);
810 s0 = _mm_add_ps(s0, y0);
811 s1 = _mm_add_ps(s1, y1);
813 x0 = _mm_loadu_ps(S2 + x);
814 x1 = _mm_loadu_ps(S2 + x + 4);
815 y0 = _mm_loadu_ps(S3 + x);
816 y1 = _mm_loadu_ps(S3 + x + 4);
818 x0 = _mm_mul_ps(x0, b2);
819 x1 = _mm_mul_ps(x1, b2);
820 y0 = _mm_mul_ps(y0, b3);
821 y1 = _mm_mul_ps(y1, b3);
822 s0 = _mm_add_ps(s0, x0);
823 s1 = _mm_add_ps(s1, x1);
824 s0 = _mm_add_ps(s0, y0);
825 s1 = _mm_add_ps(s1, y1);
827 t0 = _mm_add_epi32(_mm_cvtps_epi32(s0), preshift);
828 t1 = _mm_add_epi32(_mm_cvtps_epi32(s1), preshift);
830 t0 = _mm_add_epi16(_mm_packs_epi32(t0, t1), postshift);
831 _mm_storeu_si128( (__m128i*)(dst + x), t0);
838 typedef VResizeCubicVec_32f16<SHRT_MIN> VResizeCubicVec_32f16u;
839 typedef VResizeCubicVec_32f16<0> VResizeCubicVec_32f16s;
841 struct VResizeCubicVec_32f
843 int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
845 if( !checkHardwareSupport(CV_CPU_SSE) )
848 const float** src = (const float**)_src;
849 const float* beta = (const float*)_beta;
850 const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
851 float* dst = (float*)_dst;
853 __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]),
854 b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]);
856 for( ; x <= width - 8; x += 8 )
858 __m128 x0, x1, y0, y1, s0, s1;
859 x0 = _mm_loadu_ps(S0 + x);
860 x1 = _mm_loadu_ps(S0 + x + 4);
861 y0 = _mm_loadu_ps(S1 + x);
862 y1 = _mm_loadu_ps(S1 + x + 4);
864 s0 = _mm_mul_ps(x0, b0);
865 s1 = _mm_mul_ps(x1, b0);
866 y0 = _mm_mul_ps(y0, b1);
867 y1 = _mm_mul_ps(y1, b1);
868 s0 = _mm_add_ps(s0, y0);
869 s1 = _mm_add_ps(s1, y1);
871 x0 = _mm_loadu_ps(S2 + x);
872 x1 = _mm_loadu_ps(S2 + x + 4);
873 y0 = _mm_loadu_ps(S3 + x);
874 y1 = _mm_loadu_ps(S3 + x + 4);
876 x0 = _mm_mul_ps(x0, b2);
877 x1 = _mm_mul_ps(x1, b2);
878 y0 = _mm_mul_ps(y0, b3);
879 y1 = _mm_mul_ps(y1, b3);
880 s0 = _mm_add_ps(s0, x0);
881 s1 = _mm_add_ps(s1, x1);
882 s0 = _mm_add_ps(s0, y0);
883 s1 = _mm_add_ps(s1, y1);
885 _mm_storeu_ps( dst + x, s0);
886 _mm_storeu_ps( dst + x + 4, s1);
895 typedef VResizeNoVec VResizeLinearVec_32s8u;
896 typedef VResizeNoVec VResizeLinearVec_32f16u;
897 typedef VResizeNoVec VResizeLinearVec_32f16s;
898 typedef VResizeNoVec VResizeLinearVec_32f;
900 typedef VResizeNoVec VResizeCubicVec_32s8u;
901 typedef VResizeNoVec VResizeCubicVec_32f16u;
902 typedef VResizeNoVec VResizeCubicVec_32f16s;
903 typedef VResizeNoVec VResizeCubicVec_32f;
907 typedef HResizeNoVec HResizeLinearVec_8u32s;
908 typedef HResizeNoVec HResizeLinearVec_16u32f;
909 typedef HResizeNoVec HResizeLinearVec_16s32f;
910 typedef HResizeNoVec HResizeLinearVec_32f;
911 typedef HResizeNoVec HResizeLinearVec_64f;
914 template<typename T, typename WT, typename AT, int ONE, class VecOp>
917 typedef T value_type;
919 typedef AT alpha_type;
921 void operator()(const T** src, WT** dst, int count,
922 const int* xofs, const AT* alpha,
923 int swidth, int dwidth, int cn, int xmin, int xmax ) const
928 int dx0 = vecOp((const uchar**)src, (uchar**)dst, count,
929 xofs, (const uchar*)alpha, swidth, dwidth, cn, xmin, xmax );
931 for( k = 0; k <= count - 2; k++ )
933 const T *S0 = src[k], *S1 = src[k+1];
934 WT *D0 = dst[k], *D1 = dst[k+1];
935 for( dx = dx0; dx < xmax; dx++ )
938 WT a0 = alpha[dx*2], a1 = alpha[dx*2+1];
939 WT t0 = S0[sx]*a0 + S0[sx + cn]*a1;
940 WT t1 = S1[sx]*a0 + S1[sx + cn]*a1;
941 D0[dx] = t0; D1[dx] = t1;
944 for( ; dx < dwidth; dx++ )
947 D0[dx] = WT(S0[sx]*ONE); D1[dx] = WT(S1[sx]*ONE);
951 for( ; k < count; k++ )
955 for( dx = 0; dx < xmax; dx++ )
958 D[dx] = S[sx]*alpha[dx*2] + S[sx+cn]*alpha[dx*2+1];
961 for( ; dx < dwidth; dx++ )
962 D[dx] = WT(S[xofs[dx]]*ONE);
968 template<typename T, typename WT, typename AT, class CastOp, class VecOp>
971 typedef T value_type;
973 typedef AT alpha_type;
975 void operator()(const WT** src, T* dst, const AT* beta, int width ) const
977 WT b0 = beta[0], b1 = beta[1];
978 const WT *S0 = src[0], *S1 = src[1];
982 int x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
983 #if CV_ENABLE_UNROLLED
984 for( ; x <= width - 4; x += 4 )
987 t0 = S0[x]*b0 + S1[x]*b1;
988 t1 = S0[x+1]*b0 + S1[x+1]*b1;
989 dst[x] = castOp(t0); dst[x+1] = castOp(t1);
990 t0 = S0[x+2]*b0 + S1[x+2]*b1;
991 t1 = S0[x+3]*b0 + S1[x+3]*b1;
992 dst[x+2] = castOp(t0); dst[x+3] = castOp(t1);
995 for( ; x < width; x++ )
996 dst[x] = castOp(S0[x]*b0 + S1[x]*b1);
1001 struct VResizeLinear<uchar, int, short, FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>, VResizeLinearVec_32s8u>
1003 typedef uchar value_type;
1004 typedef int buf_type;
1005 typedef short alpha_type;
1007 void operator()(const buf_type** src, value_type* dst, const alpha_type* beta, int width ) const
1009 alpha_type b0 = beta[0], b1 = beta[1];
1010 const buf_type *S0 = src[0], *S1 = src[1];
1011 VResizeLinearVec_32s8u vecOp;
1013 int x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
1014 #if CV_ENABLE_UNROLLED
1015 for( ; x <= width - 4; x += 4 )
1017 dst[x+0] = uchar(( ((b0 * (S0[x+0] >> 4)) >> 16) + ((b1 * (S1[x+0] >> 4)) >> 16) + 2)>>2);
1018 dst[x+1] = uchar(( ((b0 * (S0[x+1] >> 4)) >> 16) + ((b1 * (S1[x+1] >> 4)) >> 16) + 2)>>2);
1019 dst[x+2] = uchar(( ((b0 * (S0[x+2] >> 4)) >> 16) + ((b1 * (S1[x+2] >> 4)) >> 16) + 2)>>2);
1020 dst[x+3] = uchar(( ((b0 * (S0[x+3] >> 4)) >> 16) + ((b1 * (S1[x+3] >> 4)) >> 16) + 2)>>2);
1023 for( ; x < width; x++ )
1024 dst[x] = uchar(( ((b0 * (S0[x] >> 4)) >> 16) + ((b1 * (S1[x] >> 4)) >> 16) + 2)>>2);
1029 template<typename T, typename WT, typename AT>
1032 typedef T value_type;
1033 typedef WT buf_type;
1034 typedef AT alpha_type;
1036 void operator()(const T** src, WT** dst, int count,
1037 const int* xofs, const AT* alpha,
1038 int swidth, int dwidth, int cn, int xmin, int xmax ) const
1040 for( int k = 0; k < count; k++ )
1042 const T *S = src[k];
1044 int dx = 0, limit = xmin;
1047 for( ; dx < limit; dx++, alpha += 4 )
1049 int j, sx = xofs[dx] - cn;
1051 for( j = 0; j < 4; j++ )
1053 int sxj = sx + j*cn;
1054 if( (unsigned)sxj >= (unsigned)swidth )
1058 while( sxj >= swidth )
1061 v += S[sxj]*alpha[j];
1065 if( limit == dwidth )
1067 for( ; dx < xmax; dx++, alpha += 4 )
1070 D[dx] = S[sx-cn]*alpha[0] + S[sx]*alpha[1] +
1071 S[sx+cn]*alpha[2] + S[sx+cn*2]*alpha[3];
1081 template<typename T, typename WT, typename AT, class CastOp, class VecOp>
1084 typedef T value_type;
1085 typedef WT buf_type;
1086 typedef AT alpha_type;
1088 void operator()(const WT** src, T* dst, const AT* beta, int width ) const
1090 WT b0 = beta[0], b1 = beta[1], b2 = beta[2], b3 = beta[3];
1091 const WT *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
1095 int x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
1096 for( ; x < width; x++ )
1097 dst[x] = castOp(S0[x]*b0 + S1[x]*b1 + S2[x]*b2 + S3[x]*b3);
1102 template<typename T, typename WT, typename AT>
1103 struct HResizeLanczos4
1105 typedef T value_type;
1106 typedef WT buf_type;
1107 typedef AT alpha_type;
1109 void operator()(const T** src, WT** dst, int count,
1110 const int* xofs, const AT* alpha,
1111 int swidth, int dwidth, int cn, int xmin, int xmax ) const
1113 for( int k = 0; k < count; k++ )
1115 const T *S = src[k];
1117 int dx = 0, limit = xmin;
1120 for( ; dx < limit; dx++, alpha += 8 )
1122 int j, sx = xofs[dx] - cn*3;
1124 for( j = 0; j < 8; j++ )
1126 int sxj = sx + j*cn;
1127 if( (unsigned)sxj >= (unsigned)swidth )
1131 while( sxj >= swidth )
1134 v += S[sxj]*alpha[j];
1138 if( limit == dwidth )
1140 for( ; dx < xmax; dx++, alpha += 8 )
1143 D[dx] = S[sx-cn*3]*alpha[0] + S[sx-cn*2]*alpha[1] +
1144 S[sx-cn]*alpha[2] + S[sx]*alpha[3] +
1145 S[sx+cn]*alpha[4] + S[sx+cn*2]*alpha[5] +
1146 S[sx+cn*3]*alpha[6] + S[sx+cn*4]*alpha[7];
1156 template<typename T, typename WT, typename AT, class CastOp, class VecOp>
1157 struct VResizeLanczos4
1159 typedef T value_type;
1160 typedef WT buf_type;
1161 typedef AT alpha_type;
1163 void operator()(const WT** src, T* dst, const AT* beta, int width ) const
1167 int k, x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width);
1168 #if CV_ENABLE_UNROLLED
1169 for( ; x <= width - 4; x += 4 )
1172 const WT* S = src[0];
1173 WT s0 = S[x]*b, s1 = S[x+1]*b, s2 = S[x+2]*b, s3 = S[x+3]*b;
1175 for( k = 1; k < 8; k++ )
1177 b = beta[k]; S = src[k];
1178 s0 += S[x]*b; s1 += S[x+1]*b;
1179 s2 += S[x+2]*b; s3 += S[x+3]*b;
1182 dst[x] = castOp(s0); dst[x+1] = castOp(s1);
1183 dst[x+2] = castOp(s2); dst[x+3] = castOp(s3);
1186 for( ; x < width; x++ )
1188 dst[x] = castOp(src[0][x]*beta[0] + src[1][x]*beta[1] +
1189 src[2][x]*beta[2] + src[3][x]*beta[3] + src[4][x]*beta[4] +
1190 src[5][x]*beta[5] + src[6][x]*beta[6] + src[7][x]*beta[7]);
1196 static inline int clip(int x, int a, int b)
1198 return x >= a ? (x < b ? x : b-1) : a;
1201 static const int MAX_ESIZE=16;
1203 template <typename HResize, typename VResize>
1204 class resizeGeneric_Invoker :
1205 public ParallelLoopBody
1208 typedef typename HResize::value_type T;
1209 typedef typename HResize::buf_type WT;
1210 typedef typename HResize::alpha_type AT;
1212 resizeGeneric_Invoker(const Mat& _src, Mat &_dst, const int *_xofs, const int *_yofs,
1213 const AT* _alpha, const AT* __beta, const Size& _ssize, const Size &_dsize,
1214 int _ksize, int _xmin, int _xmax) :
1215 ParallelLoopBody(), src(_src), dst(_dst), xofs(_xofs), yofs(_yofs),
1216 alpha(_alpha), _beta(__beta), ssize(_ssize), dsize(_dsize),
1217 ksize(_ksize), xmin(_xmin), xmax(_xmax)
1221 virtual void operator() (const Range& range) const
1223 int dy, cn = src.channels();
1227 int bufstep = (int)alignSize(dsize.width, 16);
1228 AutoBuffer<WT> _buffer(bufstep*ksize);
1229 const T* srows[MAX_ESIZE]={0};
1230 WT* rows[MAX_ESIZE]={0};
1231 int prev_sy[MAX_ESIZE];
1233 for(int k = 0; k < ksize; k++ )
1236 rows[k] = (WT*)_buffer + bufstep*k;
1239 const AT* beta = _beta + ksize * range.start;
1241 for( dy = range.start; dy < range.end; dy++, beta += ksize )
1243 int sy0 = yofs[dy], k0=ksize, k1=0, ksize2 = ksize/2;
1245 for(int k = 0; k < ksize; k++ )
1247 int sy = clip(sy0 - ksize2 + 1 + k, 0, ssize.height);
1248 for( k1 = std::max(k1, k); k1 < ksize; k1++ )
1250 if( sy == prev_sy[k1] ) // if the sy-th row has been computed already, reuse it.
1253 memcpy( rows[k], rows[k1], bufstep*sizeof(rows[0][0]) );
1258 k0 = std::min(k0, k); // remember the first row that needs to be computed
1259 srows[k] = (T*)(src.data + src.step*sy);
1264 hresize( (const T**)(srows + k0), (WT**)(rows + k0), ksize - k0, xofs, (const AT*)(alpha),
1265 ssize.width, dsize.width, cn, xmin, xmax );
1266 vresize( (const WT**)rows, (T*)(dst.data + dst.step*dy), beta, dsize.width );
1273 const int* xofs, *yofs;
1274 const AT* alpha, *_beta;
1276 int ksize, xmin, xmax;
1279 template<class HResize, class VResize>
1280 static void resizeGeneric_( const Mat& src, Mat& dst,
1281 const int* xofs, const void* _alpha,
1282 const int* yofs, const void* _beta,
1283 int xmin, int xmax, int ksize )
1285 typedef typename HResize::alpha_type AT;
1287 const AT* beta = (const AT*)_beta;
1288 Size ssize = src.size(), dsize = dst.size();
1289 int cn = src.channels();
1294 // image resize is a separable operation. In case of not too strong
1296 Range range(0, dsize.height);
1297 resizeGeneric_Invoker<HResize, VResize> invoker(src, dst, xofs, yofs, (const AT*)_alpha, beta,
1298 ssize, dsize, ksize, xmin, xmax);
1299 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
1302 template <typename T, typename WT>
1303 struct ResizeAreaFastNoVec
1305 ResizeAreaFastNoVec(int, int) { }
1306 ResizeAreaFastNoVec(int, int, int, int) { }
1307 int operator() (const T*, T*, int) const
1312 class ResizeAreaFastVec_SIMD_8u
1315 ResizeAreaFastVec_SIMD_8u(int _cn, int _step) :
1316 cn(_cn), step(_step)
1318 use_simd = checkHardwareSupport(CV_CPU_SSE2);
1321 int operator() (const uchar* S, uchar* D, int w) const
1327 const uchar* S0 = S;
1328 const uchar* S1 = S0 + step;
1329 __m128i zero = _mm_setzero_si128();
1330 __m128i delta2 = _mm_set1_epi16(2);
1334 __m128i masklow = _mm_set1_epi16(0x00ff);
1335 for ( ; dx <= w - 8; dx += 8, S0 += 16, S1 += 16, D += 8)
1337 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1338 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1340 __m128i s0 = _mm_add_epi16(_mm_srli_epi16(r0, 8), _mm_and_si128(r0, masklow));
1341 __m128i s1 = _mm_add_epi16(_mm_srli_epi16(r1, 8), _mm_and_si128(r1, masklow));
1342 s0 = _mm_add_epi16(_mm_add_epi16(s0, s1), delta2);
1343 s0 = _mm_packus_epi16(_mm_srli_epi16(s0, 2), zero);
1345 _mm_storel_epi64((__m128i*)D, s0);
1349 for ( ; dx <= w - 11; dx += 6, S0 += 12, S1 += 12, D += 6)
1351 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1352 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1354 __m128i r0_16l = _mm_unpacklo_epi8(r0, zero);
1355 __m128i r0_16h = _mm_unpacklo_epi8(_mm_srli_si128(r0, 6), zero);
1356 __m128i r1_16l = _mm_unpacklo_epi8(r1, zero);
1357 __m128i r1_16h = _mm_unpacklo_epi8(_mm_srli_si128(r1, 6), zero);
1359 __m128i s0 = _mm_add_epi16(r0_16l, _mm_srli_si128(r0_16l, 6));
1360 __m128i s1 = _mm_add_epi16(r1_16l, _mm_srli_si128(r1_16l, 6));
1361 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1362 s0 = _mm_packus_epi16(_mm_srli_epi16(s0, 2), zero);
1363 _mm_storel_epi64((__m128i*)D, s0);
1365 s0 = _mm_add_epi16(r0_16h, _mm_srli_si128(r0_16h, 6));
1366 s1 = _mm_add_epi16(r1_16h, _mm_srli_si128(r1_16h, 6));
1367 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1368 s0 = _mm_packus_epi16(_mm_srli_epi16(s0, 2), zero);
1369 _mm_storel_epi64((__m128i*)(D+3), s0);
1374 int v[] = { 0, 0, -1, -1 };
1375 __m128i mask = _mm_loadu_si128((const __m128i*)v);
1377 for ( ; dx <= w - 8; dx += 8, S0 += 16, S1 += 16, D += 8)
1379 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1380 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1382 __m128i r0_16l = _mm_unpacklo_epi8(r0, zero);
1383 __m128i r0_16h = _mm_unpackhi_epi8(r0, zero);
1384 __m128i r1_16l = _mm_unpacklo_epi8(r1, zero);
1385 __m128i r1_16h = _mm_unpackhi_epi8(r1, zero);
1387 __m128i s0 = _mm_add_epi16(r0_16l, _mm_srli_si128(r0_16l, 8));
1388 __m128i s1 = _mm_add_epi16(r1_16l, _mm_srli_si128(r1_16l, 8));
1389 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1390 __m128i res0 = _mm_srli_epi16(s0, 2);
1392 s0 = _mm_add_epi16(r0_16h, _mm_srli_si128(r0_16h, 8));
1393 s1 = _mm_add_epi16(r1_16h, _mm_srli_si128(r1_16h, 8));
1394 s0 = _mm_add_epi16(s1, _mm_add_epi16(s0, delta2));
1395 __m128i res1 = _mm_srli_epi16(s0, 2);
1396 s0 = _mm_packus_epi16(_mm_or_si128(_mm_andnot_si128(mask, res0),
1397 _mm_and_si128(mask, _mm_slli_si128(res1, 8))), zero);
1398 _mm_storel_epi64((__m128i*)(D), s0);
1411 class ResizeAreaFastVec_SIMD_16u
1414 ResizeAreaFastVec_SIMD_16u(int _cn, int _step) :
1415 cn(_cn), step(_step)
1417 use_simd = checkHardwareSupport(CV_CPU_SSE2);
1420 int operator() (const ushort* S, ushort* D, int w) const
1426 const ushort* S0 = (const ushort*)S;
1427 const ushort* S1 = (const ushort*)((const uchar*)(S) + step);
1428 __m128i masklow = _mm_set1_epi32(0x0000ffff);
1429 __m128i zero = _mm_setzero_si128();
1430 __m128i delta2 = _mm_set1_epi32(2);
1432 #define _mm_packus_epi32(a, zero) _mm_packs_epi32(_mm_srai_epi32(_mm_slli_epi32(a, 16), 16), zero)
1436 for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4)
1438 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1439 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1441 __m128i s0 = _mm_add_epi32(_mm_srli_epi32(r0, 16), _mm_and_si128(r0, masklow));
1442 __m128i s1 = _mm_add_epi32(_mm_srli_epi32(r1, 16), _mm_and_si128(r1, masklow));
1443 s0 = _mm_add_epi32(_mm_add_epi32(s0, s1), delta2);
1444 s0 = _mm_srli_epi32(s0, 2);
1445 s0 = _mm_packus_epi32(s0, zero);
1447 _mm_storel_epi64((__m128i*)D, s0);
1451 for ( ; dx <= w - 4; dx += 3, S0 += 6, S1 += 6, D += 3)
1453 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1454 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1456 __m128i r0_16l = _mm_unpacklo_epi16(r0, zero);
1457 __m128i r0_16h = _mm_unpacklo_epi16(_mm_srli_si128(r0, 6), zero);
1458 __m128i r1_16l = _mm_unpacklo_epi16(r1, zero);
1459 __m128i r1_16h = _mm_unpacklo_epi16(_mm_srli_si128(r1, 6), zero);
1461 __m128i s0 = _mm_add_epi32(r0_16l, r0_16h);
1462 __m128i s1 = _mm_add_epi32(r1_16l, r1_16h);
1463 s0 = _mm_add_epi32(delta2, _mm_add_epi32(s0, s1));
1464 s0 = _mm_packus_epi32(_mm_srli_epi32(s0, 2), zero);
1465 _mm_storel_epi64((__m128i*)D, s0);
1470 for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4)
1472 __m128i r0 = _mm_loadu_si128((const __m128i*)S0);
1473 __m128i r1 = _mm_loadu_si128((const __m128i*)S1);
1475 __m128i r0_32l = _mm_unpacklo_epi16(r0, zero);
1476 __m128i r0_32h = _mm_unpackhi_epi16(r0, zero);
1477 __m128i r1_32l = _mm_unpacklo_epi16(r1, zero);
1478 __m128i r1_32h = _mm_unpackhi_epi16(r1, zero);
1480 __m128i s0 = _mm_add_epi32(r0_32l, r0_32h);
1481 __m128i s1 = _mm_add_epi32(r1_32l, r1_32h);
1482 s0 = _mm_add_epi32(s1, _mm_add_epi32(s0, delta2));
1483 s0 = _mm_packus_epi32(_mm_srli_epi32(s0, 2), zero);
1484 _mm_storel_epi64((__m128i*)D, s0);
1488 #undef _mm_packus_epi32
1500 typedef ResizeAreaFastNoVec<uchar, uchar> ResizeAreaFastVec_SIMD_8u;
1501 typedef ResizeAreaFastNoVec<ushort, ushort> ResizeAreaFastVec_SIMD_16u;
1504 template<typename T, typename SIMDVecOp>
1505 struct ResizeAreaFastVec
1507 ResizeAreaFastVec(int _scale_x, int _scale_y, int _cn, int _step) :
1508 scale_x(_scale_x), scale_y(_scale_y), cn(_cn), step(_step), vecOp(_cn, _step)
1510 fast_mode = scale_x == 2 && scale_y == 2 && (cn == 1 || cn == 3 || cn == 4);
1513 int operator() (const T* S, T* D, int w) const
1518 const T* nextS = (const T*)((const uchar*)S + step);
1519 int dx = vecOp(S, D, w);
1522 for( ; dx < w; ++dx )
1525 D[dx] = (T)((S[index] + S[index+1] + nextS[index] + nextS[index+1] + 2) >> 2);
1528 for( ; dx < w; dx += 3 )
1531 D[dx] = (T)((S[index] + S[index+3] + nextS[index] + nextS[index+3] + 2) >> 2);
1532 D[dx+1] = (T)((S[index+1] + S[index+4] + nextS[index+1] + nextS[index+4] + 2) >> 2);
1533 D[dx+2] = (T)((S[index+2] + S[index+5] + nextS[index+2] + nextS[index+5] + 2) >> 2);
1538 for( ; dx < w; dx += 4 )
1541 D[dx] = (T)((S[index] + S[index+4] + nextS[index] + nextS[index+4] + 2) >> 2);
1542 D[dx+1] = (T)((S[index+1] + S[index+5] + nextS[index+1] + nextS[index+5] + 2) >> 2);
1543 D[dx+2] = (T)((S[index+2] + S[index+6] + nextS[index+2] + nextS[index+6] + 2) >> 2);
1544 D[dx+3] = (T)((S[index+3] + S[index+7] + nextS[index+3] + nextS[index+7] + 2) >> 2);
1552 int scale_x, scale_y;
1559 template <typename T, typename WT, typename VecOp>
1560 class resizeAreaFast_Invoker :
1561 public ParallelLoopBody
1564 resizeAreaFast_Invoker(const Mat &_src, Mat &_dst,
1565 int _scale_x, int _scale_y, const int* _ofs, const int* _xofs) :
1566 ParallelLoopBody(), src(_src), dst(_dst), scale_x(_scale_x),
1567 scale_y(_scale_y), ofs(_ofs), xofs(_xofs)
1571 virtual void operator() (const Range& range) const
1573 Size ssize = src.size(), dsize = dst.size();
1574 int cn = src.channels();
1575 int area = scale_x*scale_y;
1576 float scale = 1.f/(area);
1577 int dwidth1 = (ssize.width/scale_x)*cn;
1582 VecOp vop(scale_x, scale_y, src.channels(), (int)src.step/*, area_ofs*/);
1584 for( dy = range.start; dy < range.end; dy++ )
1586 T* D = (T*)(dst.data + dst.step*dy);
1587 int sy0 = dy*scale_y;
1588 int w = sy0 + scale_y <= ssize.height ? dwidth1 : 0;
1590 if( sy0 >= ssize.height )
1592 for( dx = 0; dx < dsize.width; dx++ )
1597 dx = vop((const T*)(src.data + src.step * sy0), D, w);
1598 for( ; dx < w; dx++ )
1600 const T* S = (const T*)(src.data + src.step * sy0) + xofs[dx];
1603 #if CV_ENABLE_UNROLLED
1604 for( ; k <= area - 4; k += 4 )
1605 sum += S[ofs[k]] + S[ofs[k+1]] + S[ofs[k+2]] + S[ofs[k+3]];
1607 for( ; k < area; k++ )
1610 D[dx] = saturate_cast<T>(sum * scale);
1613 for( ; dx < dsize.width; dx++ )
1616 int count = 0, sx0 = xofs[dx];
1617 if( sx0 >= ssize.width )
1620 for( int sy = 0; sy < scale_y; sy++ )
1622 if( sy0 + sy >= ssize.height )
1624 const T* S = (const T*)(src.data + src.step*(sy0 + sy)) + sx0;
1625 for( int sx = 0; sx < scale_x*cn; sx += cn )
1627 if( sx0 + sx >= ssize.width )
1634 D[dx] = saturate_cast<T>((float)sum/count);
1642 int scale_x, scale_y;
1643 const int *ofs, *xofs;
1646 template<typename T, typename WT, typename VecOp>
1647 static void resizeAreaFast_( const Mat& src, Mat& dst, const int* ofs, const int* xofs,
1648 int scale_x, int scale_y )
1650 Range range(0, dst.rows);
1651 resizeAreaFast_Invoker<T, WT, VecOp> invoker(src, dst, scale_x,
1652 scale_y, ofs, xofs);
1653 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
1656 struct DecimateAlpha
1663 template<typename T, typename WT> class ResizeArea_Invoker :
1664 public ParallelLoopBody
1667 ResizeArea_Invoker( const Mat& _src, Mat& _dst,
1668 const DecimateAlpha* _xtab, int _xtab_size,
1669 const DecimateAlpha* _ytab, int _ytab_size,
1670 const int* _tabofs )
1675 xtab_size0 = _xtab_size;
1677 ytab_size = _ytab_size;
1681 virtual void operator() (const Range& range) const
1683 Size dsize = dst->size();
1684 int cn = dst->channels();
1686 AutoBuffer<WT> _buffer(dsize.width*2);
1687 const DecimateAlpha* xtab = xtab0;
1688 int xtab_size = xtab_size0;
1689 WT *buf = _buffer, *sum = buf + dsize.width;
1690 int j_start = tabofs[range.start], j_end = tabofs[range.end], j, k, dx, prev_dy = ytab[j_start].di;
1692 for( dx = 0; dx < dsize.width; dx++ )
1695 for( j = j_start; j < j_end; j++ )
1697 WT beta = ytab[j].alpha;
1698 int dy = ytab[j].di;
1699 int sy = ytab[j].si;
1702 const T* S = (const T*)(src->data + src->step*sy);
1703 for( dx = 0; dx < dsize.width; dx++ )
1707 for( k = 0; k < xtab_size; k++ )
1709 int dxn = xtab[k].di;
1710 WT alpha = xtab[k].alpha;
1711 buf[dxn] += S[xtab[k].si]*alpha;
1714 for( k = 0; k < xtab_size; k++ )
1716 int sxn = xtab[k].si;
1717 int dxn = xtab[k].di;
1718 WT alpha = xtab[k].alpha;
1719 WT t0 = buf[dxn] + S[sxn]*alpha;
1720 WT t1 = buf[dxn+1] + S[sxn+1]*alpha;
1721 buf[dxn] = t0; buf[dxn+1] = t1;
1724 for( k = 0; k < xtab_size; k++ )
1726 int sxn = xtab[k].si;
1727 int dxn = xtab[k].di;
1728 WT alpha = xtab[k].alpha;
1729 WT t0 = buf[dxn] + S[sxn]*alpha;
1730 WT t1 = buf[dxn+1] + S[sxn+1]*alpha;
1731 WT t2 = buf[dxn+2] + S[sxn+2]*alpha;
1732 buf[dxn] = t0; buf[dxn+1] = t1; buf[dxn+2] = t2;
1736 for( k = 0; k < xtab_size; k++ )
1738 int sxn = xtab[k].si;
1739 int dxn = xtab[k].di;
1740 WT alpha = xtab[k].alpha;
1741 WT t0 = buf[dxn] + S[sxn]*alpha;
1742 WT t1 = buf[dxn+1] + S[sxn+1]*alpha;
1743 buf[dxn] = t0; buf[dxn+1] = t1;
1744 t0 = buf[dxn+2] + S[sxn+2]*alpha;
1745 t1 = buf[dxn+3] + S[sxn+3]*alpha;
1746 buf[dxn+2] = t0; buf[dxn+3] = t1;
1751 for( k = 0; k < xtab_size; k++ )
1753 int sxn = xtab[k].si;
1754 int dxn = xtab[k].di;
1755 WT alpha = xtab[k].alpha;
1756 for( int c = 0; c < cn; c++ )
1757 buf[dxn + c] += S[sxn + c]*alpha;
1764 T* D = (T*)(dst->data + dst->step*prev_dy);
1766 for( dx = 0; dx < dsize.width; dx++ )
1768 D[dx] = saturate_cast<T>(sum[dx]);
1769 sum[dx] = beta*buf[dx];
1775 for( dx = 0; dx < dsize.width; dx++ )
1776 sum[dx] += beta*buf[dx];
1781 T* D = (T*)(dst->data + dst->step*prev_dy);
1782 for( dx = 0; dx < dsize.width; dx++ )
1783 D[dx] = saturate_cast<T>(sum[dx]);
1790 const DecimateAlpha* xtab0;
1791 const DecimateAlpha* ytab;
1792 int xtab_size0, ytab_size;
1797 template <typename T, typename WT>
1798 static void resizeArea_( const Mat& src, Mat& dst,
1799 const DecimateAlpha* xtab, int xtab_size,
1800 const DecimateAlpha* ytab, int ytab_size,
1803 parallel_for_(Range(0, dst.rows),
1804 ResizeArea_Invoker<T, WT>(src, dst, xtab, xtab_size, ytab, ytab_size, tabofs),
1805 dst.total()/((double)(1 << 16)));
1809 typedef void (*ResizeFunc)( const Mat& src, Mat& dst,
1810 const int* xofs, const void* alpha,
1811 const int* yofs, const void* beta,
1812 int xmin, int xmax, int ksize );
1814 typedef void (*ResizeAreaFastFunc)( const Mat& src, Mat& dst,
1815 const int* ofs, const int *xofs,
1816 int scale_x, int scale_y );
1818 typedef void (*ResizeAreaFunc)( const Mat& src, Mat& dst,
1819 const DecimateAlpha* xtab, int xtab_size,
1820 const DecimateAlpha* ytab, int ytab_size,
1824 static int computeResizeAreaTab( int ssize, int dsize, int cn, double scale, DecimateAlpha* tab )
1827 for(int dx = 0; dx < dsize; dx++ )
1829 double fsx1 = dx * scale;
1830 double fsx2 = fsx1 + scale;
1831 double cellWidth = std::min(scale, ssize - fsx1);
1833 int sx1 = cvCeil(fsx1), sx2 = cvFloor(fsx2);
1835 sx2 = std::min(sx2, ssize - 1);
1836 sx1 = std::min(sx1, sx2);
1838 if( sx1 - fsx1 > 1e-3 )
1840 assert( k < ssize*2 );
1841 tab[k].di = dx * cn;
1842 tab[k].si = (sx1 - 1) * cn;
1843 tab[k++].alpha = (float)((sx1 - fsx1) / cellWidth);
1846 for(int sx = sx1; sx < sx2; sx++ )
1848 assert( k < ssize*2 );
1849 tab[k].di = dx * cn;
1850 tab[k].si = sx * cn;
1851 tab[k++].alpha = float(1.0 / cellWidth);
1854 if( fsx2 - sx2 > 1e-3 )
1856 assert( k < ssize*2 );
1857 tab[k].di = dx * cn;
1858 tab[k].si = sx2 * cn;
1859 tab[k++].alpha = (float)(std::min(std::min(fsx2 - sx2, 1.), cellWidth) / cellWidth);
1865 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
1866 class IPPresizeInvoker :
1867 public ParallelLoopBody
1870 IPPresizeInvoker(Mat &_src, Mat &_dst, double &_inv_scale_x, double &_inv_scale_y, int _mode, ippiResizeSqrPixelFunc _func, bool *_ok) :
1871 ParallelLoopBody(), src(_src), dst(_dst), inv_scale_x(_inv_scale_x), inv_scale_y(_inv_scale_y), mode(_mode), func(_func), ok(_ok)
1876 virtual void operator() (const Range& range) const
1878 int cn = src.channels();
1879 IppiRect srcroi = { 0, range.start, src.cols, range.end - range.start };
1880 int dsty = CV_IMIN(cvRound(range.start * inv_scale_y), dst.rows);
1881 int dstwidth = CV_IMIN(cvRound(src.cols * inv_scale_x), dst.cols);
1882 int dstheight = CV_IMIN(cvRound(range.end * inv_scale_y), dst.rows);
1883 IppiRect dstroi = { 0, dsty, dstwidth, dstheight - dsty };
1885 ippiResizeGetBufSize( srcroi, dstroi, cn, mode, &bufsize );
1886 AutoBuffer<uchar> buf(bufsize + 64);
1887 uchar* bufptr = alignPtr((uchar*)buf, 32);
1888 if( func( src.data, ippiSize(src.cols, src.rows), (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, inv_scale_x, inv_scale_y, 0, 0, mode, bufptr ) < 0 )
1897 ippiResizeSqrPixelFunc func;
1899 const IPPresizeInvoker& operator= (const IPPresizeInvoker&);
1905 static void ocl_computeResizeAreaTabs(int ssize, int dsize, double scale, int * const map_tab,
1906 float * const alpha_tab, int * const ofs_tab)
1909 for ( ; dx < dsize; dx++)
1913 double fsx1 = dx * scale;
1914 double fsx2 = fsx1 + scale;
1915 double cellWidth = std::min(scale, ssize - fsx1);
1917 int sx1 = cvCeil(fsx1), sx2 = cvFloor(fsx2);
1919 sx2 = std::min(sx2, ssize - 1);
1920 sx1 = std::min(sx1, sx2);
1922 if (sx1 - fsx1 > 1e-3)
1924 map_tab[k] = sx1 - 1;
1925 alpha_tab[k++] = (float)((sx1 - fsx1) / cellWidth);
1928 for (int sx = sx1; sx < sx2; sx++)
1931 alpha_tab[k++] = float(1.0 / cellWidth);
1934 if (fsx2 - sx2 > 1e-3)
1937 alpha_tab[k++] = (float)(std::min(std::min(fsx2 - sx2, 1.), cellWidth) / cellWidth);
1943 static void ocl_computeResizeAreaFastTabs(int * dmap_tab, int * smap_tab, int scale, int dcols, int scol)
1945 for (int i = 0; i < dcols; ++i)
1946 dmap_tab[i] = scale * i;
1948 for (int i = 0, size = dcols * scale; i < size; ++i)
1949 smap_tab[i] = std::min(scol - 1, i);
1952 static bool ocl_resize( InputArray _src, OutputArray _dst, Size dsize,
1953 double fx, double fy, int interpolation)
1955 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
1957 double inv_fx = 1. / fx, inv_fy = 1. / fy;
1958 float inv_fxf = (float)inv_fx, inv_fyf = (float)inv_fy;
1960 if( cn == 3 || !(cn <= 4 &&
1961 (interpolation == INTER_NEAREST || interpolation == INTER_LINEAR ||
1962 (interpolation == INTER_AREA && inv_fx >= 1 && inv_fy >= 1) )) )
1965 UMat src = _src.getUMat();
1966 _dst.create(dsize, type);
1967 UMat dst = _dst.getUMat();
1970 size_t globalsize[] = { dst.cols, dst.rows };
1972 if (interpolation == INTER_LINEAR)
1974 int wdepth = std::max(depth, CV_32S);
1975 int wtype = CV_MAKETYPE(wdepth, cn);
1977 k.create("resizeLN", ocl::imgproc::resize_oclsrc,
1978 format("-D INTER_LINEAR -D depth=%d -D PIXTYPE=%s -D WORKTYPE=%s -D convertToWT=%s -D convertToDT=%s",
1979 depth, ocl::typeToStr(type), ocl::typeToStr(wtype),
1980 ocl::convertTypeStr(depth, wdepth, cn, buf[0]),
1981 ocl::convertTypeStr(wdepth, depth, cn, buf[1])));
1983 else if (interpolation == INTER_NEAREST)
1985 k.create("resizeNN", ocl::imgproc::resize_oclsrc,
1986 format("-D INTER_NEAREST -D PIXTYPE=%s -D cn", ocl::memopTypeToStr(type), cn));
1988 else if (interpolation == INTER_AREA)
1990 int iscale_x = saturate_cast<int>(inv_fx);
1991 int iscale_y = saturate_cast<int>(inv_fy);
1992 bool is_area_fast = std::abs(inv_fx - iscale_x) < DBL_EPSILON &&
1993 std::abs(inv_fy - iscale_y) < DBL_EPSILON;
1994 int wdepth = std::max(depth, is_area_fast ? CV_32S : CV_32F);
1995 int wtype = CV_MAKE_TYPE(wdepth, cn);
1998 String buildOption = format("-D INTER_AREA -D T=%s -D WTV=%s -D convertToWTV=%s",
1999 ocl::typeToStr(type), ocl::typeToStr(wtype),
2000 ocl::convertTypeStr(depth, wdepth, cn, cvt[0]));
2002 UMat alphaOcl, tabofsOcl, mapOcl;
2007 int wdepth2 = std::max(CV_32F, depth), wtype2 = CV_MAKE_TYPE(wdepth2, cn);
2008 buildOption = buildOption + format(" -D convertToT=%s -D WT2V=%s -D convertToWT2V=%s -D INTER_AREA_FAST"
2009 " -D XSCALE=%d -D YSCALE=%d -D SCALE=%ff",
2010 ocl::convertTypeStr(wdepth2, depth, cn, cvt[0]),
2011 ocl::typeToStr(wtype2), ocl::convertTypeStr(wdepth, wdepth2, cn, cvt[1]),
2012 iscale_x, iscale_y, 1.0f / (iscale_x * iscale_y));
2014 k.create("resizeAREA_FAST", ocl::imgproc::resize_oclsrc, buildOption);
2018 int smap_tab_size = dst.cols * iscale_x + dst.rows * iscale_y;
2019 AutoBuffer<int> dmap_tab(dst.cols + dst.rows), smap_tab(smap_tab_size);
2020 int * dxmap_tab = dmap_tab, * dymap_tab = dxmap_tab + dst.cols;
2021 int * sxmap_tab = smap_tab, * symap_tab = smap_tab + dst.cols * iscale_y;
2023 ocl_computeResizeAreaFastTabs(dxmap_tab, sxmap_tab, iscale_x, dst.cols, src.cols);
2024 ocl_computeResizeAreaFastTabs(dymap_tab, symap_tab, iscale_y, dst.rows, src.rows);
2026 Mat(1, dst.cols + dst.rows, CV_32SC1, (void *)dmap_tab).copyTo(dmap);
2027 Mat(1, smap_tab_size, CV_32SC1, (void *)smap_tab).copyTo(smap);
2031 buildOption = buildOption + format(" -D convertToT=%s", ocl::convertTypeStr(wdepth, depth, cn, cvt[0]));
2032 k.create("resizeAREA", ocl::imgproc::resize_oclsrc, buildOption);
2036 Size ssize = src.size();
2037 int xytab_size = (ssize.width + ssize.height) << 1;
2038 int tabofs_size = dsize.height + dsize.width + 2;
2040 AutoBuffer<int> _xymap_tab(xytab_size), _xyofs_tab(tabofs_size);
2041 AutoBuffer<float> _xyalpha_tab(xytab_size);
2042 int * xmap_tab = _xymap_tab, * ymap_tab = _xymap_tab + (ssize.width << 1);
2043 float * xalpha_tab = _xyalpha_tab, * yalpha_tab = _xyalpha_tab + (ssize.width << 1);
2044 int * xofs_tab = _xyofs_tab, * yofs_tab = _xyofs_tab + dsize.width + 1;
2046 ocl_computeResizeAreaTabs(ssize.width, dsize.width, inv_fx, xmap_tab, xalpha_tab, xofs_tab);
2047 ocl_computeResizeAreaTabs(ssize.height, dsize.height, inv_fy, ymap_tab, yalpha_tab, yofs_tab);
2049 // loading precomputed arrays to GPU
2050 Mat(1, xytab_size, CV_32FC1, (void *)_xyalpha_tab).copyTo(alphaOcl);
2051 Mat(1, xytab_size, CV_32SC1, (void *)_xymap_tab).copyTo(mapOcl);
2052 Mat(1, tabofs_size, CV_32SC1, (void *)_xyofs_tab).copyTo(tabofsOcl);
2055 ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src), dstarg = ocl::KernelArg::WriteOnly(dst);
2058 k.args(srcarg, dstarg, ocl::KernelArg::PtrReadOnly(dmap), ocl::KernelArg::PtrReadOnly(smap));
2060 k.args(srcarg, dstarg, inv_fxf, inv_fyf, ocl::KernelArg::PtrReadOnly(tabofsOcl),
2061 ocl::KernelArg::PtrReadOnly(mapOcl), ocl::KernelArg::PtrReadOnly(alphaOcl));
2063 return k.run(2, globalsize, NULL, false);
2068 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
2069 (float)inv_fx, (float)inv_fy);
2071 return k.run(2, globalsize, 0, false);
2078 //////////////////////////////////////////////////////////////////////////////////////////
2080 void cv::resize( InputArray _src, OutputArray _dst, Size dsize,
2081 double inv_scale_x, double inv_scale_y, int interpolation )
2083 static ResizeFunc linear_tab[] =
2086 HResizeLinear<uchar, int, short,
2087 INTER_RESIZE_COEF_SCALE,
2088 HResizeLinearVec_8u32s>,
2089 VResizeLinear<uchar, int, short,
2090 FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>,
2091 VResizeLinearVec_32s8u> >,
2094 HResizeLinear<ushort, float, float, 1,
2095 HResizeLinearVec_16u32f>,
2096 VResizeLinear<ushort, float, float, Cast<float, ushort>,
2097 VResizeLinearVec_32f16u> >,
2099 HResizeLinear<short, float, float, 1,
2100 HResizeLinearVec_16s32f>,
2101 VResizeLinear<short, float, float, Cast<float, short>,
2102 VResizeLinearVec_32f16s> >,
2105 HResizeLinear<float, float, float, 1,
2106 HResizeLinearVec_32f>,
2107 VResizeLinear<float, float, float, Cast<float, float>,
2108 VResizeLinearVec_32f> >,
2110 HResizeLinear<double, double, float, 1,
2112 VResizeLinear<double, double, float, Cast<double, double>,
2117 static ResizeFunc cubic_tab[] =
2120 HResizeCubic<uchar, int, short>,
2121 VResizeCubic<uchar, int, short,
2122 FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>,
2123 VResizeCubicVec_32s8u> >,
2126 HResizeCubic<ushort, float, float>,
2127 VResizeCubic<ushort, float, float, Cast<float, ushort>,
2128 VResizeCubicVec_32f16u> >,
2130 HResizeCubic<short, float, float>,
2131 VResizeCubic<short, float, float, Cast<float, short>,
2132 VResizeCubicVec_32f16s> >,
2135 HResizeCubic<float, float, float>,
2136 VResizeCubic<float, float, float, Cast<float, float>,
2137 VResizeCubicVec_32f> >,
2139 HResizeCubic<double, double, float>,
2140 VResizeCubic<double, double, float, Cast<double, double>,
2145 static ResizeFunc lanczos4_tab[] =
2147 resizeGeneric_<HResizeLanczos4<uchar, int, short>,
2148 VResizeLanczos4<uchar, int, short,
2149 FixedPtCast<int, uchar, INTER_RESIZE_COEF_BITS*2>,
2152 resizeGeneric_<HResizeLanczos4<ushort, float, float>,
2153 VResizeLanczos4<ushort, float, float, Cast<float, ushort>,
2155 resizeGeneric_<HResizeLanczos4<short, float, float>,
2156 VResizeLanczos4<short, float, float, Cast<float, short>,
2159 resizeGeneric_<HResizeLanczos4<float, float, float>,
2160 VResizeLanczos4<float, float, float, Cast<float, float>,
2162 resizeGeneric_<HResizeLanczos4<double, double, float>,
2163 VResizeLanczos4<double, double, float, Cast<double, double>,
2168 static ResizeAreaFastFunc areafast_tab[] =
2170 resizeAreaFast_<uchar, int, ResizeAreaFastVec<uchar, ResizeAreaFastVec_SIMD_8u> >,
2172 resizeAreaFast_<ushort, float, ResizeAreaFastVec<ushort, ResizeAreaFastVec_SIMD_16u> >,
2173 resizeAreaFast_<short, float, ResizeAreaFastVec<short, ResizeAreaFastNoVec<short, float> > >,
2175 resizeAreaFast_<float, float, ResizeAreaFastNoVec<float, float> >,
2176 resizeAreaFast_<double, double, ResizeAreaFastNoVec<double, double> >,
2180 static ResizeAreaFunc area_tab[] =
2182 resizeArea_<uchar, float>, 0, resizeArea_<ushort, float>,
2183 resizeArea_<short, float>, 0, resizeArea_<float, float>,
2184 resizeArea_<double, double>, 0
2187 Size ssize = _src.size();
2189 CV_Assert( ssize.area() > 0 );
2190 CV_Assert( dsize.area() > 0 || (inv_scale_x > 0 && inv_scale_y > 0) );
2191 if( dsize.area() == 0 )
2193 dsize = Size(saturate_cast<int>(ssize.width*inv_scale_x),
2194 saturate_cast<int>(ssize.height*inv_scale_y));
2195 CV_Assert( dsize.area() > 0 );
2199 inv_scale_x = (double)dsize.width/ssize.width;
2200 inv_scale_y = (double)dsize.height/ssize.height;
2203 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
2204 ocl_resize(_src, _dst, dsize, inv_scale_x, inv_scale_y, interpolation))
2206 Mat src = _src.getMat();
2207 _dst.create(dsize, src.type());
2208 Mat dst = _dst.getMat();
2210 #ifdef HAVE_TEGRA_OPTIMIZATION
2211 if (tegra::resize(src, dst, (float)inv_scale_x, (float)inv_scale_y, interpolation))
2215 int depth = src.depth(), cn = src.channels();
2216 double scale_x = 1./inv_scale_x, scale_y = 1./inv_scale_y;
2217 int k, sx, sy, dx, dy;
2219 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
2220 int mode = interpolation == INTER_LINEAR ? IPPI_INTER_LINEAR : 0;
2221 int type = src.type();
2222 ippiResizeSqrPixelFunc ippFunc =
2223 type == CV_8UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C1R :
2224 type == CV_8UC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C3R :
2225 type == CV_8UC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C4R :
2226 type == CV_16UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C1R :
2227 type == CV_16UC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C3R :
2228 type == CV_16UC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C4R :
2229 type == CV_16SC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C1R :
2230 type == CV_16SC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C3R :
2231 type == CV_16SC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C4R :
2232 type == CV_32FC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C1R :
2233 type == CV_32FC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C3R :
2234 type == CV_32FC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C4R :
2236 if( ippFunc && mode != 0 )
2239 Range range(0, src.rows);
2240 IPPresizeInvoker invoker(src, dst, inv_scale_x, inv_scale_y, mode, ippFunc, &ok);
2241 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
2247 if( interpolation == INTER_NEAREST )
2249 resizeNN( src, dst, inv_scale_x, inv_scale_y );
2254 int iscale_x = saturate_cast<int>(scale_x);
2255 int iscale_y = saturate_cast<int>(scale_y);
2257 bool is_area_fast = std::abs(scale_x - iscale_x) < DBL_EPSILON &&
2258 std::abs(scale_y - iscale_y) < DBL_EPSILON;
2260 // in case of scale_x && scale_y is equal to 2
2261 // INTER_AREA (fast) also is equal to INTER_LINEAR
2262 if( interpolation == INTER_LINEAR && is_area_fast && iscale_x == 2 && iscale_y == 2 )
2263 interpolation = INTER_AREA;
2265 // true "area" interpolation is only implemented for the case (scale_x <= 1 && scale_y <= 1).
2266 // In other cases it is emulated using some variant of bilinear interpolation
2267 if( interpolation == INTER_AREA && scale_x >= 1 && scale_y >= 1 )
2271 int area = iscale_x*iscale_y;
2272 size_t srcstep = src.step / src.elemSize1();
2273 AutoBuffer<int> _ofs(area + dsize.width*cn);
2275 int* xofs = ofs + area;
2276 ResizeAreaFastFunc func = areafast_tab[depth];
2277 CV_Assert( func != 0 );
2279 for( sy = 0, k = 0; sy < iscale_y; sy++ )
2280 for( sx = 0; sx < iscale_x; sx++ )
2281 ofs[k++] = (int)(sy*srcstep + sx*cn);
2283 for( dx = 0; dx < dsize.width; dx++ )
2287 for( k = 0; k < cn; k++ )
2288 xofs[j + k] = sx + k;
2291 func( src, dst, ofs, xofs, iscale_x, iscale_y );
2295 ResizeAreaFunc func = area_tab[depth];
2296 CV_Assert( func != 0 && cn <= 4 );
2298 AutoBuffer<DecimateAlpha> _xytab((ssize.width + ssize.height)*2);
2299 DecimateAlpha* xtab = _xytab, *ytab = xtab + ssize.width*2;
2301 int xtab_size = computeResizeAreaTab(ssize.width, dsize.width, cn, scale_x, xtab);
2302 int ytab_size = computeResizeAreaTab(ssize.height, dsize.height, 1, scale_y, ytab);
2304 AutoBuffer<int> _tabofs(dsize.height + 1);
2305 int* tabofs = _tabofs;
2306 for( k = 0, dy = 0; k < ytab_size; k++ )
2308 if( k == 0 || ytab[k].di != ytab[k-1].di )
2310 assert( ytab[k].di == dy );
2314 tabofs[dy] = ytab_size;
2316 func( src, dst, xtab, xtab_size, ytab, ytab_size, tabofs );
2321 int xmin = 0, xmax = dsize.width, width = dsize.width*cn;
2322 bool area_mode = interpolation == INTER_AREA;
2323 bool fixpt = depth == CV_8U;
2326 int ksize=0, ksize2;
2327 if( interpolation == INTER_CUBIC )
2328 ksize = 4, func = cubic_tab[depth];
2329 else if( interpolation == INTER_LANCZOS4 )
2330 ksize = 8, func = lanczos4_tab[depth];
2331 else if( interpolation == INTER_LINEAR || interpolation == INTER_AREA )
2332 ksize = 2, func = linear_tab[depth];
2334 CV_Error( CV_StsBadArg, "Unknown interpolation method" );
2337 CV_Assert( func != 0 );
2339 AutoBuffer<uchar> _buffer((width + dsize.height)*(sizeof(int) + sizeof(float)*ksize));
2340 int* xofs = (int*)(uchar*)_buffer;
2341 int* yofs = xofs + width;
2342 float* alpha = (float*)(yofs + dsize.height);
2343 short* ialpha = (short*)alpha;
2344 float* beta = alpha + width*ksize;
2345 short* ibeta = ialpha + width*ksize;
2346 float cbuf[MAX_ESIZE];
2348 for( dx = 0; dx < dsize.width; dx++ )
2352 fx = (float)((dx+0.5)*scale_x - 0.5);
2358 sx = cvFloor(dx*scale_x);
2359 fx = (float)((dx+1) - (sx+1)*inv_scale_x);
2360 fx = fx <= 0 ? 0.f : fx - cvFloor(fx);
2370 if( sx + ksize2 >= ssize.width )
2372 xmax = std::min( xmax, dx );
2373 if( sx >= ssize.width-1 )
2374 fx = 0, sx = ssize.width-1;
2377 for( k = 0, sx *= cn; k < cn; k++ )
2378 xofs[dx*cn + k] = sx + k;
2380 if( interpolation == INTER_CUBIC )
2381 interpolateCubic( fx, cbuf );
2382 else if( interpolation == INTER_LANCZOS4 )
2383 interpolateLanczos4( fx, cbuf );
2391 for( k = 0; k < ksize; k++ )
2392 ialpha[dx*cn*ksize + k] = saturate_cast<short>(cbuf[k]*INTER_RESIZE_COEF_SCALE);
2393 for( ; k < cn*ksize; k++ )
2394 ialpha[dx*cn*ksize + k] = ialpha[dx*cn*ksize + k - ksize];
2398 for( k = 0; k < ksize; k++ )
2399 alpha[dx*cn*ksize + k] = cbuf[k];
2400 for( ; k < cn*ksize; k++ )
2401 alpha[dx*cn*ksize + k] = alpha[dx*cn*ksize + k - ksize];
2405 for( dy = 0; dy < dsize.height; dy++ )
2409 fy = (float)((dy+0.5)*scale_y - 0.5);
2415 sy = cvFloor(dy*scale_y);
2416 fy = (float)((dy+1) - (sy+1)*inv_scale_y);
2417 fy = fy <= 0 ? 0.f : fy - cvFloor(fy);
2421 if( interpolation == INTER_CUBIC )
2422 interpolateCubic( fy, cbuf );
2423 else if( interpolation == INTER_LANCZOS4 )
2424 interpolateLanczos4( fy, cbuf );
2433 for( k = 0; k < ksize; k++ )
2434 ibeta[dy*ksize + k] = saturate_cast<short>(cbuf[k]*INTER_RESIZE_COEF_SCALE);
2438 for( k = 0; k < ksize; k++ )
2439 beta[dy*ksize + k] = cbuf[k];
2443 func( src, dst, xofs, fixpt ? (void*)ialpha : (void*)alpha, yofs,
2444 fixpt ? (void*)ibeta : (void*)beta, xmin, xmax, ksize );
2448 /****************************************************************************************\
2449 * General warping (affine, perspective, remap) *
2450 \****************************************************************************************/
2455 template<typename T>
2456 static void remapNearest( const Mat& _src, Mat& _dst, const Mat& _xy,
2457 int borderType, const Scalar& _borderValue )
2459 Size ssize = _src.size(), dsize = _dst.size();
2460 int cn = _src.channels();
2461 const T* S0 = (const T*)_src.data;
2462 size_t sstep = _src.step/sizeof(S0[0]);
2463 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
2464 saturate_cast<T>(_borderValue[1]),
2465 saturate_cast<T>(_borderValue[2]),
2466 saturate_cast<T>(_borderValue[3]));
2469 unsigned width1 = ssize.width, height1 = ssize.height;
2471 if( _dst.isContinuous() && _xy.isContinuous() )
2473 dsize.width *= dsize.height;
2477 for( dy = 0; dy < dsize.height; dy++ )
2479 T* D = (T*)(_dst.data + _dst.step*dy);
2480 const short* XY = (const short*)(_xy.data + _xy.step*dy);
2484 for( dx = 0; dx < dsize.width; dx++ )
2486 int sx = XY[dx*2], sy = XY[dx*2+1];
2487 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
2488 D[dx] = S0[sy*sstep + sx];
2491 if( borderType == BORDER_REPLICATE )
2493 sx = clip(sx, 0, ssize.width);
2494 sy = clip(sy, 0, ssize.height);
2495 D[dx] = S0[sy*sstep + sx];
2497 else if( borderType == BORDER_CONSTANT )
2499 else if( borderType != BORDER_TRANSPARENT )
2501 sx = borderInterpolate(sx, ssize.width, borderType);
2502 sy = borderInterpolate(sy, ssize.height, borderType);
2503 D[dx] = S0[sy*sstep + sx];
2510 for( dx = 0; dx < dsize.width; dx++, D += cn )
2512 int sx = XY[dx*2], sy = XY[dx*2+1], k;
2514 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
2518 S = S0 + sy*sstep + sx*3;
2519 D[0] = S[0], D[1] = S[1], D[2] = S[2];
2523 S = S0 + sy*sstep + sx*4;
2524 D[0] = S[0], D[1] = S[1], D[2] = S[2], D[3] = S[3];
2528 S = S0 + sy*sstep + sx*cn;
2529 for( k = 0; k < cn; k++ )
2533 else if( borderType != BORDER_TRANSPARENT )
2535 if( borderType == BORDER_REPLICATE )
2537 sx = clip(sx, 0, ssize.width);
2538 sy = clip(sy, 0, ssize.height);
2539 S = S0 + sy*sstep + sx*cn;
2541 else if( borderType == BORDER_CONSTANT )
2545 sx = borderInterpolate(sx, ssize.width, borderType);
2546 sy = borderInterpolate(sy, ssize.height, borderType);
2547 S = S0 + sy*sstep + sx*cn;
2549 for( k = 0; k < cn; k++ )
2560 int operator()( const Mat&, void*, const short*, const ushort*,
2561 const void*, int ) const { return 0; }
2568 int operator()( const Mat& _src, void* _dst, const short* XY,
2569 const ushort* FXY, const void* _wtab, int width ) const
2571 int cn = _src.channels(), x = 0, sstep = (int)_src.step;
2573 if( (cn != 1 && cn != 3 && cn != 4) || !checkHardwareSupport(CV_CPU_SSE2) ||
2577 const uchar *S0 = _src.data, *S1 = _src.data + _src.step;
2578 const short* wtab = cn == 1 ? (const short*)_wtab : &BilinearTab_iC4[0][0][0];
2579 uchar* D = (uchar*)_dst;
2580 __m128i delta = _mm_set1_epi32(INTER_REMAP_COEF_SCALE/2);
2581 __m128i xy2ofs = _mm_set1_epi32(cn + (sstep << 16));
2582 __m128i z = _mm_setzero_si128();
2583 int CV_DECL_ALIGNED(16) iofs0[4], iofs1[4];
2587 for( ; x <= width - 8; x += 8 )
2589 __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2));
2590 __m128i xy1 = _mm_loadu_si128( (const __m128i*)(XY + x*2 + 8));
2591 __m128i v0, v1, v2, v3, a0, a1, b0, b1;
2594 xy0 = _mm_madd_epi16( xy0, xy2ofs );
2595 xy1 = _mm_madd_epi16( xy1, xy2ofs );
2596 _mm_store_si128( (__m128i*)iofs0, xy0 );
2597 _mm_store_si128( (__m128i*)iofs1, xy1 );
2599 i0 = *(ushort*)(S0 + iofs0[0]) + (*(ushort*)(S0 + iofs0[1]) << 16);
2600 i1 = *(ushort*)(S0 + iofs0[2]) + (*(ushort*)(S0 + iofs0[3]) << 16);
2601 v0 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2602 i0 = *(ushort*)(S1 + iofs0[0]) + (*(ushort*)(S1 + iofs0[1]) << 16);
2603 i1 = *(ushort*)(S1 + iofs0[2]) + (*(ushort*)(S1 + iofs0[3]) << 16);
2604 v1 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2605 v0 = _mm_unpacklo_epi8(v0, z);
2606 v1 = _mm_unpacklo_epi8(v1, z);
2608 a0 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x]*4)),
2609 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+1]*4)));
2610 a1 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+2]*4)),
2611 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+3]*4)));
2612 b0 = _mm_unpacklo_epi64(a0, a1);
2613 b1 = _mm_unpackhi_epi64(a0, a1);
2614 v0 = _mm_madd_epi16(v0, b0);
2615 v1 = _mm_madd_epi16(v1, b1);
2616 v0 = _mm_add_epi32(_mm_add_epi32(v0, v1), delta);
2618 i0 = *(ushort*)(S0 + iofs1[0]) + (*(ushort*)(S0 + iofs1[1]) << 16);
2619 i1 = *(ushort*)(S0 + iofs1[2]) + (*(ushort*)(S0 + iofs1[3]) << 16);
2620 v2 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2621 i0 = *(ushort*)(S1 + iofs1[0]) + (*(ushort*)(S1 + iofs1[1]) << 16);
2622 i1 = *(ushort*)(S1 + iofs1[2]) + (*(ushort*)(S1 + iofs1[3]) << 16);
2623 v3 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1));
2624 v2 = _mm_unpacklo_epi8(v2, z);
2625 v3 = _mm_unpacklo_epi8(v3, z);
2627 a0 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+4]*4)),
2628 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+5]*4)));
2629 a1 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+6]*4)),
2630 _mm_loadl_epi64((__m128i*)(wtab+FXY[x+7]*4)));
2631 b0 = _mm_unpacklo_epi64(a0, a1);
2632 b1 = _mm_unpackhi_epi64(a0, a1);
2633 v2 = _mm_madd_epi16(v2, b0);
2634 v3 = _mm_madd_epi16(v3, b1);
2635 v2 = _mm_add_epi32(_mm_add_epi32(v2, v3), delta);
2637 v0 = _mm_srai_epi32(v0, INTER_REMAP_COEF_BITS);
2638 v2 = _mm_srai_epi32(v2, INTER_REMAP_COEF_BITS);
2639 v0 = _mm_packus_epi16(_mm_packs_epi32(v0, v2), z);
2640 _mm_storel_epi64( (__m128i*)(D + x), v0 );
2645 for( ; x <= width - 5; x += 4, D += 12 )
2647 __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2));
2648 __m128i u0, v0, u1, v1;
2650 xy0 = _mm_madd_epi16( xy0, xy2ofs );
2651 _mm_store_si128( (__m128i*)iofs0, xy0 );
2652 const __m128i *w0, *w1;
2653 w0 = (const __m128i*)(wtab + FXY[x]*16);
2654 w1 = (const __m128i*)(wtab + FXY[x+1]*16);
2656 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[0])),
2657 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[0] + 3)));
2658 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[0])),
2659 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[0] + 3)));
2660 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[1])),
2661 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[1] + 3)));
2662 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[1])),
2663 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[1] + 3)));
2664 u0 = _mm_unpacklo_epi8(u0, z);
2665 v0 = _mm_unpacklo_epi8(v0, z);
2666 u1 = _mm_unpacklo_epi8(u1, z);
2667 v1 = _mm_unpacklo_epi8(v1, z);
2668 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2669 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2670 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2671 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2672 u0 = _mm_slli_si128(u0, 4);
2673 u0 = _mm_packs_epi32(u0, u1);
2674 u0 = _mm_packus_epi16(u0, u0);
2675 _mm_storel_epi64((__m128i*)D, _mm_srli_si128(u0,1));
2677 w0 = (const __m128i*)(wtab + FXY[x+2]*16);
2678 w1 = (const __m128i*)(wtab + FXY[x+3]*16);
2680 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[2])),
2681 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[2] + 3)));
2682 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[2])),
2683 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[2] + 3)));
2684 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[3])),
2685 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[3] + 3)));
2686 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[3])),
2687 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[3] + 3)));
2688 u0 = _mm_unpacklo_epi8(u0, z);
2689 v0 = _mm_unpacklo_epi8(v0, z);
2690 u1 = _mm_unpacklo_epi8(u1, z);
2691 v1 = _mm_unpacklo_epi8(v1, z);
2692 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2693 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2694 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2695 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2696 u0 = _mm_slli_si128(u0, 4);
2697 u0 = _mm_packs_epi32(u0, u1);
2698 u0 = _mm_packus_epi16(u0, u0);
2699 _mm_storel_epi64((__m128i*)(D + 6), _mm_srli_si128(u0,1));
2704 for( ; x <= width - 4; x += 4, D += 16 )
2706 __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2));
2707 __m128i u0, v0, u1, v1;
2709 xy0 = _mm_madd_epi16( xy0, xy2ofs );
2710 _mm_store_si128( (__m128i*)iofs0, xy0 );
2711 const __m128i *w0, *w1;
2712 w0 = (const __m128i*)(wtab + FXY[x]*16);
2713 w1 = (const __m128i*)(wtab + FXY[x+1]*16);
2715 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[0])),
2716 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[0] + 4)));
2717 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[0])),
2718 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[0] + 4)));
2719 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[1])),
2720 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[1] + 4)));
2721 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[1])),
2722 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[1] + 4)));
2723 u0 = _mm_unpacklo_epi8(u0, z);
2724 v0 = _mm_unpacklo_epi8(v0, z);
2725 u1 = _mm_unpacklo_epi8(u1, z);
2726 v1 = _mm_unpacklo_epi8(v1, z);
2727 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2728 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2729 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2730 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2731 u0 = _mm_packs_epi32(u0, u1);
2732 u0 = _mm_packus_epi16(u0, u0);
2733 _mm_storel_epi64((__m128i*)D, u0);
2735 w0 = (const __m128i*)(wtab + FXY[x+2]*16);
2736 w1 = (const __m128i*)(wtab + FXY[x+3]*16);
2738 u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[2])),
2739 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[2] + 4)));
2740 v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[2])),
2741 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[2] + 4)));
2742 u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[3])),
2743 _mm_cvtsi32_si128(*(int*)(S0 + iofs0[3] + 4)));
2744 v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[3])),
2745 _mm_cvtsi32_si128(*(int*)(S1 + iofs0[3] + 4)));
2746 u0 = _mm_unpacklo_epi8(u0, z);
2747 v0 = _mm_unpacklo_epi8(v0, z);
2748 u1 = _mm_unpacklo_epi8(u1, z);
2749 v1 = _mm_unpacklo_epi8(v1, z);
2750 u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1]));
2751 u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1]));
2752 u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS);
2753 u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS);
2754 u0 = _mm_packs_epi32(u0, u1);
2755 u0 = _mm_packus_epi16(u0, u0);
2756 _mm_storel_epi64((__m128i*)(D + 8), u0);
2766 typedef RemapNoVec RemapVec_8u;
2771 template<class CastOp, class VecOp, typename AT>
2772 static void remapBilinear( const Mat& _src, Mat& _dst, const Mat& _xy,
2773 const Mat& _fxy, const void* _wtab,
2774 int borderType, const Scalar& _borderValue )
2776 typedef typename CastOp::rtype T;
2777 typedef typename CastOp::type1 WT;
2778 Size ssize = _src.size(), dsize = _dst.size();
2779 int cn = _src.channels();
2780 const AT* wtab = (const AT*)_wtab;
2781 const T* S0 = (const T*)_src.data;
2782 size_t sstep = _src.step/sizeof(S0[0]);
2783 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
2784 saturate_cast<T>(_borderValue[1]),
2785 saturate_cast<T>(_borderValue[2]),
2786 saturate_cast<T>(_borderValue[3]));
2791 unsigned width1 = std::max(ssize.width-1, 0), height1 = std::max(ssize.height-1, 0);
2792 CV_Assert( cn <= 4 && ssize.area() > 0 );
2794 if( _src.type() == CV_8UC3 )
2795 width1 = std::max(ssize.width-2, 0);
2798 for( dy = 0; dy < dsize.height; dy++ )
2800 T* D = (T*)(_dst.data + _dst.step*dy);
2801 const short* XY = (const short*)(_xy.data + _xy.step*dy);
2802 const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
2804 bool prevInlier = false;
2806 for( dx = 0; dx <= dsize.width; dx++ )
2808 bool curInlier = dx < dsize.width ?
2809 (unsigned)XY[dx*2] < width1 &&
2810 (unsigned)XY[dx*2+1] < height1 : !prevInlier;
2811 if( curInlier == prevInlier )
2817 prevInlier = curInlier;
2821 int len = vecOp( _src, D, XY + dx*2, FXY + dx, wtab, X1 - dx );
2827 for( ; dx < X1; dx++, D++ )
2829 int sx = XY[dx*2], sy = XY[dx*2+1];
2830 const AT* w = wtab + FXY[dx]*4;
2831 const T* S = S0 + sy*sstep + sx;
2832 *D = castOp(WT(S[0]*w[0] + S[1]*w[1] + S[sstep]*w[2] + S[sstep+1]*w[3]));
2836 for( ; dx < X1; dx++, D += 2 )
2838 int sx = XY[dx*2], sy = XY[dx*2+1];
2839 const AT* w = wtab + FXY[dx]*4;
2840 const T* S = S0 + sy*sstep + sx*2;
2841 WT t0 = S[0]*w[0] + S[2]*w[1] + S[sstep]*w[2] + S[sstep+2]*w[3];
2842 WT t1 = S[1]*w[0] + S[3]*w[1] + S[sstep+1]*w[2] + S[sstep+3]*w[3];
2843 D[0] = castOp(t0); D[1] = castOp(t1);
2846 for( ; dx < X1; dx++, D += 3 )
2848 int sx = XY[dx*2], sy = XY[dx*2+1];
2849 const AT* w = wtab + FXY[dx]*4;
2850 const T* S = S0 + sy*sstep + sx*3;
2851 WT t0 = S[0]*w[0] + S[3]*w[1] + S[sstep]*w[2] + S[sstep+3]*w[3];
2852 WT t1 = S[1]*w[0] + S[4]*w[1] + S[sstep+1]*w[2] + S[sstep+4]*w[3];
2853 WT t2 = S[2]*w[0] + S[5]*w[1] + S[sstep+2]*w[2] + S[sstep+5]*w[3];
2854 D[0] = castOp(t0); D[1] = castOp(t1); D[2] = castOp(t2);
2857 for( ; dx < X1; dx++, D += 4 )
2859 int sx = XY[dx*2], sy = XY[dx*2+1];
2860 const AT* w = wtab + FXY[dx]*4;
2861 const T* S = S0 + sy*sstep + sx*4;
2862 WT t0 = S[0]*w[0] + S[4]*w[1] + S[sstep]*w[2] + S[sstep+4]*w[3];
2863 WT t1 = S[1]*w[0] + S[5]*w[1] + S[sstep+1]*w[2] + S[sstep+5]*w[3];
2864 D[0] = castOp(t0); D[1] = castOp(t1);
2865 t0 = S[2]*w[0] + S[6]*w[1] + S[sstep+2]*w[2] + S[sstep+6]*w[3];
2866 t1 = S[3]*w[0] + S[7]*w[1] + S[sstep+3]*w[2] + S[sstep+7]*w[3];
2867 D[2] = castOp(t0); D[3] = castOp(t1);
2872 if( borderType == BORDER_TRANSPARENT && cn != 3 )
2880 for( ; dx < X1; dx++, D++ )
2882 int sx = XY[dx*2], sy = XY[dx*2+1];
2883 if( borderType == BORDER_CONSTANT &&
2884 (sx >= ssize.width || sx+1 < 0 ||
2885 sy >= ssize.height || sy+1 < 0) )
2891 int sx0, sx1, sy0, sy1;
2893 const AT* w = wtab + FXY[dx]*4;
2894 if( borderType == BORDER_REPLICATE )
2896 sx0 = clip(sx, 0, ssize.width);
2897 sx1 = clip(sx+1, 0, ssize.width);
2898 sy0 = clip(sy, 0, ssize.height);
2899 sy1 = clip(sy+1, 0, ssize.height);
2900 v0 = S0[sy0*sstep + sx0];
2901 v1 = S0[sy0*sstep + sx1];
2902 v2 = S0[sy1*sstep + sx0];
2903 v3 = S0[sy1*sstep + sx1];
2907 sx0 = borderInterpolate(sx, ssize.width, borderType);
2908 sx1 = borderInterpolate(sx+1, ssize.width, borderType);
2909 sy0 = borderInterpolate(sy, ssize.height, borderType);
2910 sy1 = borderInterpolate(sy+1, ssize.height, borderType);
2911 v0 = sx0 >= 0 && sy0 >= 0 ? S0[sy0*sstep + sx0] : cval[0];
2912 v1 = sx1 >= 0 && sy0 >= 0 ? S0[sy0*sstep + sx1] : cval[0];
2913 v2 = sx0 >= 0 && sy1 >= 0 ? S0[sy1*sstep + sx0] : cval[0];
2914 v3 = sx1 >= 0 && sy1 >= 0 ? S0[sy1*sstep + sx1] : cval[0];
2916 D[0] = castOp(WT(v0*w[0] + v1*w[1] + v2*w[2] + v3*w[3]));
2920 for( ; dx < X1; dx++, D += cn )
2922 int sx = XY[dx*2], sy = XY[dx*2+1], k;
2923 if( borderType == BORDER_CONSTANT &&
2924 (sx >= ssize.width || sx+1 < 0 ||
2925 sy >= ssize.height || sy+1 < 0) )
2927 for( k = 0; k < cn; k++ )
2932 int sx0, sx1, sy0, sy1;
2933 const T *v0, *v1, *v2, *v3;
2934 const AT* w = wtab + FXY[dx]*4;
2935 if( borderType == BORDER_REPLICATE )
2937 sx0 = clip(sx, 0, ssize.width);
2938 sx1 = clip(sx+1, 0, ssize.width);
2939 sy0 = clip(sy, 0, ssize.height);
2940 sy1 = clip(sy+1, 0, ssize.height);
2941 v0 = S0 + sy0*sstep + sx0*cn;
2942 v1 = S0 + sy0*sstep + sx1*cn;
2943 v2 = S0 + sy1*sstep + sx0*cn;
2944 v3 = S0 + sy1*sstep + sx1*cn;
2946 else if( borderType == BORDER_TRANSPARENT &&
2947 ((unsigned)sx >= (unsigned)(ssize.width-1) ||
2948 (unsigned)sy >= (unsigned)(ssize.height-1)))
2952 sx0 = borderInterpolate(sx, ssize.width, borderType);
2953 sx1 = borderInterpolate(sx+1, ssize.width, borderType);
2954 sy0 = borderInterpolate(sy, ssize.height, borderType);
2955 sy1 = borderInterpolate(sy+1, ssize.height, borderType);
2956 v0 = sx0 >= 0 && sy0 >= 0 ? S0 + sy0*sstep + sx0*cn : &cval[0];
2957 v1 = sx1 >= 0 && sy0 >= 0 ? S0 + sy0*sstep + sx1*cn : &cval[0];
2958 v2 = sx0 >= 0 && sy1 >= 0 ? S0 + sy1*sstep + sx0*cn : &cval[0];
2959 v3 = sx1 >= 0 && sy1 >= 0 ? S0 + sy1*sstep + sx1*cn : &cval[0];
2961 for( k = 0; k < cn; k++ )
2962 D[k] = castOp(WT(v0[k]*w[0] + v1[k]*w[1] + v2[k]*w[2] + v3[k]*w[3]));
2971 template<class CastOp, typename AT, int ONE>
2972 static void remapBicubic( const Mat& _src, Mat& _dst, const Mat& _xy,
2973 const Mat& _fxy, const void* _wtab,
2974 int borderType, const Scalar& _borderValue )
2976 typedef typename CastOp::rtype T;
2977 typedef typename CastOp::type1 WT;
2978 Size ssize = _src.size(), dsize = _dst.size();
2979 int cn = _src.channels();
2980 const AT* wtab = (const AT*)_wtab;
2981 const T* S0 = (const T*)_src.data;
2982 size_t sstep = _src.step/sizeof(S0[0]);
2983 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
2984 saturate_cast<T>(_borderValue[1]),
2985 saturate_cast<T>(_borderValue[2]),
2986 saturate_cast<T>(_borderValue[3]));
2989 int borderType1 = borderType != BORDER_TRANSPARENT ? borderType : BORDER_REFLECT_101;
2991 unsigned width1 = std::max(ssize.width-3, 0), height1 = std::max(ssize.height-3, 0);
2993 if( _dst.isContinuous() && _xy.isContinuous() && _fxy.isContinuous() )
2995 dsize.width *= dsize.height;
2999 for( dy = 0; dy < dsize.height; dy++ )
3001 T* D = (T*)(_dst.data + _dst.step*dy);
3002 const short* XY = (const short*)(_xy.data + _xy.step*dy);
3003 const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
3005 for( dx = 0; dx < dsize.width; dx++, D += cn )
3007 int sx = XY[dx*2]-1, sy = XY[dx*2+1]-1;
3008 const AT* w = wtab + FXY[dx]*16;
3010 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
3012 const T* S = S0 + sy*sstep + sx*cn;
3013 for( k = 0; k < cn; k++ )
3015 WT sum = S[0]*w[0] + S[cn]*w[1] + S[cn*2]*w[2] + S[cn*3]*w[3];
3017 sum += S[0]*w[4] + S[cn]*w[5] + S[cn*2]*w[6] + S[cn*3]*w[7];
3019 sum += S[0]*w[8] + S[cn]*w[9] + S[cn*2]*w[10] + S[cn*3]*w[11];
3021 sum += S[0]*w[12] + S[cn]*w[13] + S[cn*2]*w[14] + S[cn*3]*w[15];
3029 if( borderType == BORDER_TRANSPARENT &&
3030 ((unsigned)(sx+1) >= (unsigned)ssize.width ||
3031 (unsigned)(sy+1) >= (unsigned)ssize.height) )
3034 if( borderType1 == BORDER_CONSTANT &&
3035 (sx >= ssize.width || sx+4 <= 0 ||
3036 sy >= ssize.height || sy+4 <= 0))
3038 for( k = 0; k < cn; k++ )
3043 for( i = 0; i < 4; i++ )
3045 x[i] = borderInterpolate(sx + i, ssize.width, borderType1)*cn;
3046 y[i] = borderInterpolate(sy + i, ssize.height, borderType1);
3049 for( k = 0; k < cn; k++, S0++, w -= 16 )
3051 WT cv = cval[k], sum = cv*ONE;
3052 for( i = 0; i < 4; i++, w += 4 )
3055 const T* S = S0 + yi*sstep;
3059 sum += (S[x[0]] - cv)*w[0];
3061 sum += (S[x[1]] - cv)*w[1];
3063 sum += (S[x[2]] - cv)*w[2];
3065 sum += (S[x[3]] - cv)*w[3];
3076 template<class CastOp, typename AT, int ONE>
3077 static void remapLanczos4( const Mat& _src, Mat& _dst, const Mat& _xy,
3078 const Mat& _fxy, const void* _wtab,
3079 int borderType, const Scalar& _borderValue )
3081 typedef typename CastOp::rtype T;
3082 typedef typename CastOp::type1 WT;
3083 Size ssize = _src.size(), dsize = _dst.size();
3084 int cn = _src.channels();
3085 const AT* wtab = (const AT*)_wtab;
3086 const T* S0 = (const T*)_src.data;
3087 size_t sstep = _src.step/sizeof(S0[0]);
3088 Scalar_<T> cval(saturate_cast<T>(_borderValue[0]),
3089 saturate_cast<T>(_borderValue[1]),
3090 saturate_cast<T>(_borderValue[2]),
3091 saturate_cast<T>(_borderValue[3]));
3094 int borderType1 = borderType != BORDER_TRANSPARENT ? borderType : BORDER_REFLECT_101;
3096 unsigned width1 = std::max(ssize.width-7, 0), height1 = std::max(ssize.height-7, 0);
3098 if( _dst.isContinuous() && _xy.isContinuous() && _fxy.isContinuous() )
3100 dsize.width *= dsize.height;
3104 for( dy = 0; dy < dsize.height; dy++ )
3106 T* D = (T*)(_dst.data + _dst.step*dy);
3107 const short* XY = (const short*)(_xy.data + _xy.step*dy);
3108 const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy);
3110 for( dx = 0; dx < dsize.width; dx++, D += cn )
3112 int sx = XY[dx*2]-3, sy = XY[dx*2+1]-3;
3113 const AT* w = wtab + FXY[dx]*64;
3114 const T* S = S0 + sy*sstep + sx*cn;
3116 if( (unsigned)sx < width1 && (unsigned)sy < height1 )
3118 for( k = 0; k < cn; k++ )
3121 for( int r = 0; r < 8; r++, S += sstep, w += 8 )
3122 sum += S[0]*w[0] + S[cn]*w[1] + S[cn*2]*w[2] + S[cn*3]*w[3] +
3123 S[cn*4]*w[4] + S[cn*5]*w[5] + S[cn*6]*w[6] + S[cn*7]*w[7];
3132 if( borderType == BORDER_TRANSPARENT &&
3133 ((unsigned)(sx+3) >= (unsigned)ssize.width ||
3134 (unsigned)(sy+3) >= (unsigned)ssize.height) )
3137 if( borderType1 == BORDER_CONSTANT &&
3138 (sx >= ssize.width || sx+8 <= 0 ||
3139 sy >= ssize.height || sy+8 <= 0))
3141 for( k = 0; k < cn; k++ )
3146 for( i = 0; i < 8; i++ )
3148 x[i] = borderInterpolate(sx + i, ssize.width, borderType1)*cn;
3149 y[i] = borderInterpolate(sy + i, ssize.height, borderType1);
3152 for( k = 0; k < cn; k++, S0++, w -= 64 )
3154 WT cv = cval[k], sum = cv*ONE;
3155 for( i = 0; i < 8; i++, w += 8 )
3158 const T* S1 = S0 + yi*sstep;
3162 sum += (S1[x[0]] - cv)*w[0];
3164 sum += (S1[x[1]] - cv)*w[1];
3166 sum += (S1[x[2]] - cv)*w[2];
3168 sum += (S1[x[3]] - cv)*w[3];
3170 sum += (S1[x[4]] - cv)*w[4];
3172 sum += (S1[x[5]] - cv)*w[5];
3174 sum += (S1[x[6]] - cv)*w[6];
3176 sum += (S1[x[7]] - cv)*w[7];
3187 typedef void (*RemapNNFunc)(const Mat& _src, Mat& _dst, const Mat& _xy,
3188 int borderType, const Scalar& _borderValue );
3190 typedef void (*RemapFunc)(const Mat& _src, Mat& _dst, const Mat& _xy,
3191 const Mat& _fxy, const void* _wtab,
3192 int borderType, const Scalar& _borderValue);
3194 class RemapInvoker :
3195 public ParallelLoopBody
3198 RemapInvoker(const Mat& _src, Mat& _dst, const Mat *_m1,
3199 const Mat *_m2, int _borderType, const Scalar &_borderValue,
3200 int _planar_input, RemapNNFunc _nnfunc, RemapFunc _ifunc, const void *_ctab) :
3201 ParallelLoopBody(), src(&_src), dst(&_dst), m1(_m1), m2(_m2),
3202 borderType(_borderType), borderValue(_borderValue),
3203 planar_input(_planar_input), nnfunc(_nnfunc), ifunc(_ifunc), ctab(_ctab)
3207 virtual void operator() (const Range& range) const
3210 const int buf_size = 1 << 14;
3211 int brows0 = std::min(128, dst->rows), map_depth = m1->depth();
3212 int bcols0 = std::min(buf_size/brows0, dst->cols);
3213 brows0 = std::min(buf_size/bcols0, dst->rows);
3215 bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
3218 Mat _bufxy(brows0, bcols0, CV_16SC2), _bufa;
3220 _bufa.create(brows0, bcols0, CV_16UC1);
3222 for( y = range.start; y < range.end; y += brows0 )
3224 for( x = 0; x < dst->cols; x += bcols0 )
3226 int brows = std::min(brows0, range.end - y);
3227 int bcols = std::min(bcols0, dst->cols - x);
3228 Mat dpart(*dst, Rect(x, y, bcols, brows));
3229 Mat bufxy(_bufxy, Rect(0, 0, bcols, brows));
3233 if( m1->type() == CV_16SC2 && !m2->data ) // the data is already in the right format
3234 bufxy = (*m1)(Rect(x, y, bcols, brows));
3235 else if( map_depth != CV_32F )
3237 for( y1 = 0; y1 < brows; y1++ )
3239 short* XY = (short*)(bufxy.data + bufxy.step*y1);
3240 const short* sXY = (const short*)(m1->data + m1->step*(y+y1)) + x*2;
3241 const ushort* sA = (const ushort*)(m2->data + m2->step*(y+y1)) + x;
3243 for( x1 = 0; x1 < bcols; x1++ )
3245 int a = sA[x1] & (INTER_TAB_SIZE2-1);
3246 XY[x1*2] = sXY[x1*2] + NNDeltaTab_i[a][0];
3247 XY[x1*2+1] = sXY[x1*2+1] + NNDeltaTab_i[a][1];
3251 else if( !planar_input )
3252 (*m1)(Rect(x, y, bcols, brows)).convertTo(bufxy, bufxy.depth());
3255 for( y1 = 0; y1 < brows; y1++ )
3257 short* XY = (short*)(bufxy.data + bufxy.step*y1);
3258 const float* sX = (const float*)(m1->data + m1->step*(y+y1)) + x;
3259 const float* sY = (const float*)(m2->data + m2->step*(y+y1)) + x;
3265 for( ; x1 <= bcols - 8; x1 += 8 )
3267 __m128 fx0 = _mm_loadu_ps(sX + x1);
3268 __m128 fx1 = _mm_loadu_ps(sX + x1 + 4);
3269 __m128 fy0 = _mm_loadu_ps(sY + x1);
3270 __m128 fy1 = _mm_loadu_ps(sY + x1 + 4);
3271 __m128i ix0 = _mm_cvtps_epi32(fx0);
3272 __m128i ix1 = _mm_cvtps_epi32(fx1);
3273 __m128i iy0 = _mm_cvtps_epi32(fy0);
3274 __m128i iy1 = _mm_cvtps_epi32(fy1);
3275 ix0 = _mm_packs_epi32(ix0, ix1);
3276 iy0 = _mm_packs_epi32(iy0, iy1);
3277 ix1 = _mm_unpacklo_epi16(ix0, iy0);
3278 iy1 = _mm_unpackhi_epi16(ix0, iy0);
3279 _mm_storeu_si128((__m128i*)(XY + x1*2), ix1);
3280 _mm_storeu_si128((__m128i*)(XY + x1*2 + 8), iy1);
3285 for( ; x1 < bcols; x1++ )
3287 XY[x1*2] = saturate_cast<short>(sX[x1]);
3288 XY[x1*2+1] = saturate_cast<short>(sY[x1]);
3292 nnfunc( *src, dpart, bufxy, borderType, borderValue );
3296 Mat bufa(_bufa, Rect(0, 0, bcols, brows));
3297 for( y1 = 0; y1 < brows; y1++ )
3299 short* XY = (short*)(bufxy.data + bufxy.step*y1);
3300 ushort* A = (ushort*)(bufa.data + bufa.step*y1);
3302 if( m1->type() == CV_16SC2 && (m2->type() == CV_16UC1 || m2->type() == CV_16SC1) )
3304 bufxy = (*m1)(Rect(x, y, bcols, brows));
3306 const ushort* sA = (const ushort*)(m2->data + m2->step*(y+y1)) + x;
3307 for( x1 = 0; x1 < bcols; x1++ )
3308 A[x1] = (ushort)(sA[x1] & (INTER_TAB_SIZE2-1));
3310 else if( planar_input )
3312 const float* sX = (const float*)(m1->data + m1->step*(y+y1)) + x;
3313 const float* sY = (const float*)(m2->data + m2->step*(y+y1)) + x;
3319 __m128 scale = _mm_set1_ps((float)INTER_TAB_SIZE);
3320 __m128i mask = _mm_set1_epi32(INTER_TAB_SIZE-1);
3321 for( ; x1 <= bcols - 8; x1 += 8 )
3323 __m128 fx0 = _mm_loadu_ps(sX + x1);
3324 __m128 fx1 = _mm_loadu_ps(sX + x1 + 4);
3325 __m128 fy0 = _mm_loadu_ps(sY + x1);
3326 __m128 fy1 = _mm_loadu_ps(sY + x1 + 4);
3327 __m128i ix0 = _mm_cvtps_epi32(_mm_mul_ps(fx0, scale));
3328 __m128i ix1 = _mm_cvtps_epi32(_mm_mul_ps(fx1, scale));
3329 __m128i iy0 = _mm_cvtps_epi32(_mm_mul_ps(fy0, scale));
3330 __m128i iy1 = _mm_cvtps_epi32(_mm_mul_ps(fy1, scale));
3331 __m128i mx0 = _mm_and_si128(ix0, mask);
3332 __m128i mx1 = _mm_and_si128(ix1, mask);
3333 __m128i my0 = _mm_and_si128(iy0, mask);
3334 __m128i my1 = _mm_and_si128(iy1, mask);
3335 mx0 = _mm_packs_epi32(mx0, mx1);
3336 my0 = _mm_packs_epi32(my0, my1);
3337 my0 = _mm_slli_epi16(my0, INTER_BITS);
3338 mx0 = _mm_or_si128(mx0, my0);
3339 _mm_storeu_si128((__m128i*)(A + x1), mx0);
3340 ix0 = _mm_srai_epi32(ix0, INTER_BITS);
3341 ix1 = _mm_srai_epi32(ix1, INTER_BITS);
3342 iy0 = _mm_srai_epi32(iy0, INTER_BITS);
3343 iy1 = _mm_srai_epi32(iy1, INTER_BITS);
3344 ix0 = _mm_packs_epi32(ix0, ix1);
3345 iy0 = _mm_packs_epi32(iy0, iy1);
3346 ix1 = _mm_unpacklo_epi16(ix0, iy0);
3347 iy1 = _mm_unpackhi_epi16(ix0, iy0);
3348 _mm_storeu_si128((__m128i*)(XY + x1*2), ix1);
3349 _mm_storeu_si128((__m128i*)(XY + x1*2 + 8), iy1);
3354 for( ; x1 < bcols; x1++ )
3356 int sx = cvRound(sX[x1]*INTER_TAB_SIZE);
3357 int sy = cvRound(sY[x1]*INTER_TAB_SIZE);
3358 int v = (sy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (sx & (INTER_TAB_SIZE-1));
3359 XY[x1*2] = saturate_cast<short>(sx >> INTER_BITS);
3360 XY[x1*2+1] = saturate_cast<short>(sy >> INTER_BITS);
3366 const float* sXY = (const float*)(m1->data + m1->step*(y+y1)) + x*2;
3368 for( x1 = 0; x1 < bcols; x1++ )
3370 int sx = cvRound(sXY[x1*2]*INTER_TAB_SIZE);
3371 int sy = cvRound(sXY[x1*2+1]*INTER_TAB_SIZE);
3372 int v = (sy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (sx & (INTER_TAB_SIZE-1));
3373 XY[x1*2] = saturate_cast<short>(sx >> INTER_BITS);
3374 XY[x1*2+1] = saturate_cast<short>(sy >> INTER_BITS);
3379 ifunc(*src, dpart, bufxy, bufa, ctab, borderType, borderValue);
3398 static bool ocl_remap(InputArray _src, OutputArray _dst, InputArray _map1, InputArray _map2,
3399 int interpolation, int borderType, const Scalar& borderValue)
3401 int cn = _src.channels(), type = _src.type(), depth = _src.depth();
3403 if (borderType == BORDER_TRANSPARENT || cn == 3 || !(interpolation == INTER_LINEAR || interpolation == INTER_NEAREST)
3404 || _map1.type() == CV_16SC1 || _map2.type() == CV_16SC1)
3407 UMat src = _src.getUMat(), map1 = _map1.getUMat(), map2 = _map2.getUMat();
3409 if( (map1.type() == CV_16SC2 && (map2.type() == CV_16UC1 || map2.empty())) ||
3410 (map2.type() == CV_16SC2 && (map1.type() == CV_16UC1 || map1.empty())) )
3412 if (map1.type() != CV_16SC2)
3413 std::swap(map1, map2);
3416 CV_Assert( map1.type() == CV_32FC2 || (map1.type() == CV_32FC1 && map2.type() == CV_32FC1) );
3418 _dst.create(map1.size(), type);
3419 UMat dst = _dst.getUMat();
3421 String kernelName = "remap";
3422 if (map1.type() == CV_32FC2 && map2.empty())
3423 kernelName += "_32FC2";
3424 else if (map1.type() == CV_16SC2)
3426 kernelName += "_16SC2";
3428 kernelName += "_16UC1";
3430 else if (map1.type() == CV_32FC1 && map2.type() == CV_32FC1)
3431 kernelName += "_2_32FC1";
3433 CV_Error(Error::StsBadArg, "Unsupported map types");
3435 static const char * const interMap[] = { "INTER_NEAREST", "INTER_LINEAR", "INTER_CUBIC", "INTER_LINEAR", "INTER_LANCZOS" };
3436 static const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP",
3437 "BORDER_REFLECT_101", "BORDER_TRANSPARENT" };
3438 String buildOptions = format("-D %s -D %s -D T=%s", interMap[interpolation], borderMap[borderType], ocl::typeToStr(type));
3440 if (interpolation != INTER_NEAREST)
3443 int wdepth = std::max(CV_32F, dst.depth());
3444 buildOptions = buildOptions
3445 + format(" -D WT=%s -D convertToT=%s -D convertToWT=%s"
3446 " -D convertToWT2=%s -D WT2=%s",
3447 ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
3448 ocl::convertTypeStr(wdepth, depth, cn, cvt[0]),
3449 ocl::convertTypeStr(depth, wdepth, cn, cvt[1]),
3450 ocl::convertTypeStr(CV_32S, wdepth, 2, cvt[2]),
3451 ocl::typeToStr(CV_MAKE_TYPE(wdepth, 2)));
3454 ocl::Kernel k(kernelName.c_str(), ocl::imgproc::remap_oclsrc, buildOptions);
3456 Mat scalar(1, 1, type, borderValue);
3457 ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src), dstarg = ocl::KernelArg::WriteOnly(dst),
3458 map1arg = ocl::KernelArg::ReadOnlyNoSize(map1),
3459 scalararg = ocl::KernelArg::Constant((void*)scalar.data, scalar.elemSize());
3462 k.args(srcarg, dstarg, map1arg, scalararg);
3464 k.args(srcarg, dstarg, map1arg, ocl::KernelArg::ReadOnlyNoSize(map2), scalararg);
3466 size_t globalThreads[2] = { dst.cols, dst.rows };
3467 return k.run(2, globalThreads, NULL, false);
3474 void cv::remap( InputArray _src, OutputArray _dst,
3475 InputArray _map1, InputArray _map2,
3476 int interpolation, int borderType, const Scalar& borderValue )
3478 static RemapNNFunc nn_tab[] =
3480 remapNearest<uchar>, remapNearest<schar>, remapNearest<ushort>, remapNearest<short>,
3481 remapNearest<int>, remapNearest<float>, remapNearest<double>, 0
3484 static RemapFunc linear_tab[] =
3486 remapBilinear<FixedPtCast<int, uchar, INTER_REMAP_COEF_BITS>, RemapVec_8u, short>, 0,
3487 remapBilinear<Cast<float, ushort>, RemapNoVec, float>,
3488 remapBilinear<Cast<float, short>, RemapNoVec, float>, 0,
3489 remapBilinear<Cast<float, float>, RemapNoVec, float>,
3490 remapBilinear<Cast<double, double>, RemapNoVec, float>, 0
3493 static RemapFunc cubic_tab[] =
3495 remapBicubic<FixedPtCast<int, uchar, INTER_REMAP_COEF_BITS>, short, INTER_REMAP_COEF_SCALE>, 0,
3496 remapBicubic<Cast<float, ushort>, float, 1>,
3497 remapBicubic<Cast<float, short>, float, 1>, 0,
3498 remapBicubic<Cast<float, float>, float, 1>,
3499 remapBicubic<Cast<double, double>, float, 1>, 0
3502 static RemapFunc lanczos4_tab[] =
3504 remapLanczos4<FixedPtCast<int, uchar, INTER_REMAP_COEF_BITS>, short, INTER_REMAP_COEF_SCALE>, 0,
3505 remapLanczos4<Cast<float, ushort>, float, 1>,
3506 remapLanczos4<Cast<float, short>, float, 1>, 0,
3507 remapLanczos4<Cast<float, float>, float, 1>,
3508 remapLanczos4<Cast<double, double>, float, 1>, 0
3511 CV_Assert( _map1.size().area() > 0 );
3512 CV_Assert( _map2.empty() || (_map2.size() == _map1.size()));
3514 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
3515 ocl_remap(_src, _dst, _map1, _map2, interpolation, borderType, borderValue))
3517 Mat src = _src.getMat(), map1 = _map1.getMat(), map2 = _map2.getMat();
3518 _dst.create( map1.size(), src.type() );
3519 Mat dst = _dst.getMat();
3520 if( dst.data == src.data )
3523 int depth = src.depth();
3524 RemapNNFunc nnfunc = 0;
3525 RemapFunc ifunc = 0;
3526 const void* ctab = 0;
3527 bool fixpt = depth == CV_8U;
3528 bool planar_input = false;
3530 if( interpolation == INTER_NEAREST )
3532 nnfunc = nn_tab[depth];
3533 CV_Assert( nnfunc != 0 );
3537 if( interpolation == INTER_AREA )
3538 interpolation = INTER_LINEAR;
3540 if( interpolation == INTER_LINEAR )
3541 ifunc = linear_tab[depth];
3542 else if( interpolation == INTER_CUBIC )
3543 ifunc = cubic_tab[depth];
3544 else if( interpolation == INTER_LANCZOS4 )
3545 ifunc = lanczos4_tab[depth];
3547 CV_Error( CV_StsBadArg, "Unknown interpolation method" );
3548 CV_Assert( ifunc != 0 );
3549 ctab = initInterTab2D( interpolation, fixpt );
3552 const Mat *m1 = &map1, *m2 = &map2;
3554 if( (map1.type() == CV_16SC2 && (map2.type() == CV_16UC1 || map2.type() == CV_16SC1 || !map2.data)) ||
3555 (map2.type() == CV_16SC2 && (map1.type() == CV_16UC1 || map1.type() == CV_16SC1 || !map1.data)) )
3557 if( map1.type() != CV_16SC2 )
3562 CV_Assert( ((map1.type() == CV_32FC2 || map1.type() == CV_16SC2) && !map2.data) ||
3563 (map1.type() == CV_32FC1 && map2.type() == CV_32FC1) );
3564 planar_input = map1.channels() == 1;
3567 RemapInvoker invoker(src, dst, m1, m2,
3568 borderType, borderValue, planar_input, nnfunc, ifunc,
3570 parallel_for_(Range(0, dst.rows), invoker, dst.total()/(double)(1<<16));
3574 void cv::convertMaps( InputArray _map1, InputArray _map2,
3575 OutputArray _dstmap1, OutputArray _dstmap2,
3576 int dstm1type, bool nninterpolate )
3578 Mat map1 = _map1.getMat(), map2 = _map2.getMat(), dstmap1, dstmap2;
3579 Size size = map1.size();
3580 const Mat *m1 = &map1, *m2 = &map2;
3581 int m1type = m1->type(), m2type = m2->type();
3583 CV_Assert( (m1type == CV_16SC2 && (nninterpolate || m2type == CV_16UC1 || m2type == CV_16SC1)) ||
3584 (m2type == CV_16SC2 && (nninterpolate || m1type == CV_16UC1 || m1type == CV_16SC1)) ||
3585 (m1type == CV_32FC1 && m2type == CV_32FC1) ||
3586 (m1type == CV_32FC2 && !m2->data) );
3588 if( m2type == CV_16SC2 )
3590 std::swap( m1, m2 );
3591 std::swap( m1type, m2type );
3594 if( dstm1type <= 0 )
3595 dstm1type = m1type == CV_16SC2 ? CV_32FC2 : CV_16SC2;
3596 CV_Assert( dstm1type == CV_16SC2 || dstm1type == CV_32FC1 || dstm1type == CV_32FC2 );
3597 _dstmap1.create( size, dstm1type );
3598 dstmap1 = _dstmap1.getMat();
3600 if( !nninterpolate && dstm1type != CV_32FC2 )
3602 _dstmap2.create( size, dstm1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 );
3603 dstmap2 = _dstmap2.getMat();
3608 if( m1type == dstm1type || (nninterpolate &&
3609 ((m1type == CV_16SC2 && dstm1type == CV_32FC2) ||
3610 (m1type == CV_32FC2 && dstm1type == CV_16SC2))) )
3612 m1->convertTo( dstmap1, dstmap1.type() );
3613 if( dstmap2.data && dstmap2.type() == m2->type() )
3614 m2->copyTo( dstmap2 );
3618 if( m1type == CV_32FC1 && dstm1type == CV_32FC2 )
3620 Mat vdata[] = { *m1, *m2 };
3621 merge( vdata, 2, dstmap1 );
3625 if( m1type == CV_32FC2 && dstm1type == CV_32FC1 )
3627 Mat mv[] = { dstmap1, dstmap2 };
3632 if( m1->isContinuous() && (!m2->data || m2->isContinuous()) &&
3633 dstmap1.isContinuous() && (!dstmap2.data || dstmap2.isContinuous()) )
3635 size.width *= size.height;
3639 const float scale = 1.f/INTER_TAB_SIZE;
3641 for( y = 0; y < size.height; y++ )
3643 const float* src1f = (const float*)(m1->data + m1->step*y);
3644 const float* src2f = (const float*)(m2->data + m2->step*y);
3645 const short* src1 = (const short*)src1f;
3646 const ushort* src2 = (const ushort*)src2f;
3648 float* dst1f = (float*)(dstmap1.data + dstmap1.step*y);
3649 float* dst2f = (float*)(dstmap2.data + dstmap2.step*y);
3650 short* dst1 = (short*)dst1f;
3651 ushort* dst2 = (ushort*)dst2f;
3653 if( m1type == CV_32FC1 && dstm1type == CV_16SC2 )
3656 for( x = 0; x < size.width; x++ )
3658 dst1[x*2] = saturate_cast<short>(src1f[x]);
3659 dst1[x*2+1] = saturate_cast<short>(src2f[x]);
3662 for( x = 0; x < size.width; x++ )
3664 int ix = saturate_cast<int>(src1f[x]*INTER_TAB_SIZE);
3665 int iy = saturate_cast<int>(src2f[x]*INTER_TAB_SIZE);
3666 dst1[x*2] = saturate_cast<short>(ix >> INTER_BITS);
3667 dst1[x*2+1] = saturate_cast<short>(iy >> INTER_BITS);
3668 dst2[x] = (ushort)((iy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (ix & (INTER_TAB_SIZE-1)));
3671 else if( m1type == CV_32FC2 && dstm1type == CV_16SC2 )
3674 for( x = 0; x < size.width; x++ )
3676 dst1[x*2] = saturate_cast<short>(src1f[x*2]);
3677 dst1[x*2+1] = saturate_cast<short>(src1f[x*2+1]);
3680 for( x = 0; x < size.width; x++ )
3682 int ix = saturate_cast<int>(src1f[x*2]*INTER_TAB_SIZE);
3683 int iy = saturate_cast<int>(src1f[x*2+1]*INTER_TAB_SIZE);
3684 dst1[x*2] = saturate_cast<short>(ix >> INTER_BITS);
3685 dst1[x*2+1] = saturate_cast<short>(iy >> INTER_BITS);
3686 dst2[x] = (ushort)((iy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (ix & (INTER_TAB_SIZE-1)));
3689 else if( m1type == CV_16SC2 && dstm1type == CV_32FC1 )
3691 for( x = 0; x < size.width; x++ )
3693 int fxy = src2 ? src2[x] & (INTER_TAB_SIZE2-1) : 0;
3694 dst1f[x] = src1[x*2] + (fxy & (INTER_TAB_SIZE-1))*scale;
3695 dst2f[x] = src1[x*2+1] + (fxy >> INTER_BITS)*scale;
3698 else if( m1type == CV_16SC2 && dstm1type == CV_32FC2 )
3700 for( x = 0; x < size.width; x++ )
3702 int fxy = src2 ? src2[x] & (INTER_TAB_SIZE2-1): 0;
3703 dst1f[x*2] = src1[x*2] + (fxy & (INTER_TAB_SIZE-1))*scale;
3704 dst1f[x*2+1] = src1[x*2+1] + (fxy >> INTER_BITS)*scale;
3708 CV_Error( CV_StsNotImplemented, "Unsupported combination of input/output matrices" );
3716 class warpAffineInvoker :
3717 public ParallelLoopBody
3720 warpAffineInvoker(const Mat &_src, Mat &_dst, int _interpolation, int _borderType,
3721 const Scalar &_borderValue, int *_adelta, int *_bdelta, double *_M) :
3722 ParallelLoopBody(), src(_src), dst(_dst), interpolation(_interpolation),
3723 borderType(_borderType), borderValue(_borderValue), adelta(_adelta), bdelta(_bdelta),
3728 virtual void operator() (const Range& range) const
3730 const int BLOCK_SZ = 64;
3731 short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
3732 const int AB_BITS = MAX(10, (int)INTER_BITS);
3733 const int AB_SCALE = 1 << AB_BITS;
3734 int round_delta = interpolation == INTER_NEAREST ? AB_SCALE/2 : AB_SCALE/INTER_TAB_SIZE/2, x, y, x1, y1;
3736 bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
3739 int bh0 = std::min(BLOCK_SZ/2, dst.rows);
3740 int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, dst.cols);
3741 bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, dst.rows);
3743 for( y = range.start; y < range.end; y += bh0 )
3745 for( x = 0; x < dst.cols; x += bw0 )
3747 int bw = std::min( bw0, dst.cols - x);
3748 int bh = std::min( bh0, range.end - y);
3750 Mat _XY(bh, bw, CV_16SC2, XY), matA;
3751 Mat dpart(dst, Rect(x, y, bw, bh));
3753 for( y1 = 0; y1 < bh; y1++ )
3755 short* xy = XY + y1*bw*2;
3756 int X0 = saturate_cast<int>((M[1]*(y + y1) + M[2])*AB_SCALE) + round_delta;
3757 int Y0 = saturate_cast<int>((M[4]*(y + y1) + M[5])*AB_SCALE) + round_delta;
3759 if( interpolation == INTER_NEAREST )
3760 for( x1 = 0; x1 < bw; x1++ )
3762 int X = (X0 + adelta[x+x1]) >> AB_BITS;
3763 int Y = (Y0 + bdelta[x+x1]) >> AB_BITS;
3764 xy[x1*2] = saturate_cast<short>(X);
3765 xy[x1*2+1] = saturate_cast<short>(Y);
3769 short* alpha = A + y1*bw;
3774 __m128i fxy_mask = _mm_set1_epi32(INTER_TAB_SIZE - 1);
3775 __m128i XX = _mm_set1_epi32(X0), YY = _mm_set1_epi32(Y0);
3776 for( ; x1 <= bw - 8; x1 += 8 )
3778 __m128i tx0, tx1, ty0, ty1;
3779 tx0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(adelta + x + x1)), XX);
3780 ty0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(bdelta + x + x1)), YY);
3781 tx1 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(adelta + x + x1 + 4)), XX);
3782 ty1 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(bdelta + x + x1 + 4)), YY);
3784 tx0 = _mm_srai_epi32(tx0, AB_BITS - INTER_BITS);
3785 ty0 = _mm_srai_epi32(ty0, AB_BITS - INTER_BITS);
3786 tx1 = _mm_srai_epi32(tx1, AB_BITS - INTER_BITS);
3787 ty1 = _mm_srai_epi32(ty1, AB_BITS - INTER_BITS);
3789 __m128i fx_ = _mm_packs_epi32(_mm_and_si128(tx0, fxy_mask),
3790 _mm_and_si128(tx1, fxy_mask));
3791 __m128i fy_ = _mm_packs_epi32(_mm_and_si128(ty0, fxy_mask),
3792 _mm_and_si128(ty1, fxy_mask));
3793 tx0 = _mm_packs_epi32(_mm_srai_epi32(tx0, INTER_BITS),
3794 _mm_srai_epi32(tx1, INTER_BITS));
3795 ty0 = _mm_packs_epi32(_mm_srai_epi32(ty0, INTER_BITS),
3796 _mm_srai_epi32(ty1, INTER_BITS));
3797 fx_ = _mm_adds_epi16(fx_, _mm_slli_epi16(fy_, INTER_BITS));
3799 _mm_storeu_si128((__m128i*)(xy + x1*2), _mm_unpacklo_epi16(tx0, ty0));
3800 _mm_storeu_si128((__m128i*)(xy + x1*2 + 8), _mm_unpackhi_epi16(tx0, ty0));
3801 _mm_storeu_si128((__m128i*)(alpha + x1), fx_);
3805 for( ; x1 < bw; x1++ )
3807 int X = (X0 + adelta[x+x1]) >> (AB_BITS - INTER_BITS);
3808 int Y = (Y0 + bdelta[x+x1]) >> (AB_BITS - INTER_BITS);
3809 xy[x1*2] = saturate_cast<short>(X >> INTER_BITS);
3810 xy[x1*2+1] = saturate_cast<short>(Y >> INTER_BITS);
3811 alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
3812 (X & (INTER_TAB_SIZE-1)));
3817 if( interpolation == INTER_NEAREST )
3818 remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
3821 Mat _matA(bh, bw, CV_16U, A);
3822 remap( src, dpart, _XY, _matA, interpolation, borderType, borderValue );
3831 int interpolation, borderType;
3833 int *adelta, *bdelta;
3837 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
3838 class IPPwarpAffineInvoker :
3839 public ParallelLoopBody
3842 IPPwarpAffineInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[2][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpAffineBackFunc _func, bool *_ok) :
3843 ParallelLoopBody(), src(_src), dst(_dst), mode(_interpolation), coeffs(_coeffs), borderType(_borderType), borderValue(_borderValue), func(_func), ok(_ok)
3848 virtual void operator() (const Range& range) const
3850 IppiSize srcsize = { src.cols, src.rows };
3851 IppiRect srcroi = { 0, 0, src.cols, src.rows };
3852 IppiRect dstroi = { 0, range.start, dst.cols, range.end - range.start };
3853 int cnn = src.channels();
3854 if( borderType == BORDER_CONSTANT )
3856 IppiSize setSize = { dst.cols, range.end - range.start };
3857 void *dataPointer = dst.data + dst.step[0] * range.start;
3858 if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) )
3864 if( func( src.data, srcsize, (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, coeffs, mode ) < 0) ////Aug 2013: problem in IPP 7.1, 8.0 : sometimes function return ippStsCoeffErr
3870 double (&coeffs)[2][3];
3874 ippiWarpAffineBackFunc func;
3876 const IPPwarpAffineInvoker& operator= (const IPPwarpAffineInvoker&);
3882 enum { OCL_OP_PERSPECTIVE = 1, OCL_OP_AFFINE = 0 };
3884 static bool ocl_warpTransform(InputArray _src, OutputArray _dst, InputArray _M0,
3885 Size dsize, int flags, int borderType, const Scalar& borderValue,
3888 CV_Assert(op_type == OCL_OP_AFFINE || op_type == OCL_OP_PERSPECTIVE);
3890 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), wdepth = depth;
3891 double doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
3893 int interpolation = flags & INTER_MAX;
3894 if( interpolation == INTER_AREA )
3895 interpolation = INTER_LINEAR;
3897 if ( !(borderType == cv::BORDER_CONSTANT &&
3898 (interpolation == cv::INTER_NEAREST || interpolation == cv::INTER_LINEAR || interpolation == cv::INTER_CUBIC)) ||
3899 (!doubleSupport && depth == CV_64F) || cn > 4 || cn == 3)
3902 const char * const interpolationMap[3] = { "NEAREST", "LINEAR", "CUBIC" };
3903 ocl::ProgramSource program = op_type == OCL_OP_AFFINE ?
3904 ocl::imgproc::warp_affine_oclsrc : ocl::imgproc::warp_perspective_oclsrc;
3905 const char * const kernelName = op_type == OCL_OP_AFFINE ? "warpAffine" : "warpPerspective";
3908 if (interpolation == INTER_NEAREST)
3910 k.create(kernelName, program,
3911 format("-D INTER_NEAREST -D T=%s%s", ocl::typeToStr(type),
3912 doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
3917 wdepth = std::max(CV_32S, depth);
3918 k.create(kernelName, program,
3919 format("-D INTER_%s -D T=%s -D WT=%s -D depth=%d -D convertToWT=%s -D convertToT=%s%s",
3920 interpolationMap[interpolation], ocl::typeToStr(type),
3921 ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)), depth,
3922 ocl::convertTypeStr(depth, wdepth, cn, cvt[0]),
3923 ocl::convertTypeStr(wdepth, depth, cn, cvt[1]),
3924 doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
3929 UMat src = _src.getUMat(), M0;
3930 _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() );
3931 UMat dst = _dst.getUMat();
3934 int matRows = (op_type == OCL_OP_AFFINE ? 2 : 3);
3935 Mat matM(matRows, 3, CV_64F, M), M1 = _M0.getMat();
3936 CV_Assert( (M1.type() == CV_32F || M1.type() == CV_64F) &&
3937 M1.rows == matRows && M1.cols == 3 );
3938 M1.convertTo(matM, matM.type());
3940 if( !(flags & WARP_INVERSE_MAP) )
3942 if (op_type == OCL_OP_PERSPECTIVE)
3946 double D = M[0]*M[4] - M[1]*M[3];
3947 D = D != 0 ? 1./D : 0;
3948 double A11 = M[4]*D, A22=M[0]*D;
3949 M[0] = A11; M[1] *= -D;
3950 M[3] *= -D; M[4] = A22;
3951 double b1 = -M[0]*M[2] - M[1]*M[5];
3952 double b2 = -M[3]*M[2] - M[4]*M[5];
3953 M[2] = b1; M[5] = b2;
3956 matM.convertTo(M0, doubleSupport ? CV_64F : CV_32F);
3958 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst), ocl::KernelArg::PtrReadOnly(M0),
3959 ocl::KernelArg::Constant(Mat(1, 1, CV_MAKE_TYPE(wdepth, cn), borderValue)));
3961 size_t globalThreads[2] = { dst.cols, dst.rows };
3962 return k.run(2, globalThreads, NULL, false);
3970 void cv::warpAffine( InputArray _src, OutputArray _dst,
3971 InputArray _M0, Size dsize,
3972 int flags, int borderType, const Scalar& borderValue )
3974 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
3975 ocl_warpTransform(_src, _dst, _M0, dsize, flags, borderType,
3976 borderValue, OCL_OP_AFFINE))
3978 Mat src = _src.getMat(), M0 = _M0.getMat();
3979 _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() );
3980 Mat dst = _dst.getMat();
3981 CV_Assert( src.cols > 0 && src.rows > 0 );
3982 if( dst.data == src.data )
3986 Mat matM(2, 3, CV_64F, M);
3987 int interpolation = flags & INTER_MAX;
3988 if( interpolation == INTER_AREA )
3989 interpolation = INTER_LINEAR;
3991 CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 2 && M0.cols == 3 );
3992 M0.convertTo(matM, matM.type());
3994 #ifdef HAVE_TEGRA_OPTIMIZATION
3995 if( tegra::warpAffine(src, dst, M, flags, borderType, borderValue) )
3999 if( !(flags & WARP_INVERSE_MAP) )
4001 double D = M[0]*M[4] - M[1]*M[3];
4002 D = D != 0 ? 1./D : 0;
4003 double A11 = M[4]*D, A22=M[0]*D;
4004 M[0] = A11; M[1] *= -D;
4005 M[3] *= -D; M[4] = A22;
4006 double b1 = -M[0]*M[2] - M[1]*M[5];
4007 double b2 = -M[3]*M[2] - M[4]*M[5];
4008 M[2] = b1; M[5] = b2;
4012 AutoBuffer<int> _abdelta(dst.cols*2);
4013 int* adelta = &_abdelta[0], *bdelta = adelta + dst.cols;
4014 const int AB_BITS = MAX(10, (int)INTER_BITS);
4015 const int AB_SCALE = 1 << AB_BITS;
4017 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
4018 int depth = src.depth();
4019 int channels = src.channels();
4020 if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) &&
4021 ( channels == 1 || channels == 3 || channels == 4 ) &&
4022 ( borderType == cv::BORDER_TRANSPARENT || ( borderType == cv::BORDER_CONSTANT ) ) )
4024 int type = src.type();
4025 ippiWarpAffineBackFunc ippFunc =
4026 type == CV_8UC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C1R :
4027 type == CV_8UC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C3R :
4028 type == CV_8UC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C4R :
4029 type == CV_16UC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C1R :
4030 type == CV_16UC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C3R :
4031 type == CV_16UC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C4R :
4032 type == CV_32FC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C1R :
4033 type == CV_32FC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C3R :
4034 type == CV_32FC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C4R :
4037 flags == INTER_LINEAR ? IPPI_INTER_LINEAR :
4038 flags == INTER_NEAREST ? IPPI_INTER_NN :
4039 flags == INTER_CUBIC ? IPPI_INTER_CUBIC :
4041 if( mode && ippFunc )
4043 double coeffs[2][3];
4044 for( int i = 0; i < 2; i++ )
4046 for( int j = 0; j < 3; j++ )
4048 coeffs[i][j] = matM.at<double>(i, j);
4052 Range range(0, dst.rows);
4053 IPPwarpAffineInvoker invoker(src, dst, coeffs, mode, borderType, borderValue, ippFunc, &ok);
4054 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4061 for( x = 0; x < dst.cols; x++ )
4063 adelta[x] = saturate_cast<int>(M[0]*x*AB_SCALE);
4064 bdelta[x] = saturate_cast<int>(M[3]*x*AB_SCALE);
4067 Range range(0, dst.rows);
4068 warpAffineInvoker invoker(src, dst, interpolation, borderType,
4069 borderValue, adelta, bdelta, M);
4070 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4077 class warpPerspectiveInvoker :
4078 public ParallelLoopBody
4082 warpPerspectiveInvoker(const Mat &_src, Mat &_dst, double *_M, int _interpolation,
4083 int _borderType, const Scalar &_borderValue) :
4084 ParallelLoopBody(), src(_src), dst(_dst), M(_M), interpolation(_interpolation),
4085 borderType(_borderType), borderValue(_borderValue)
4089 virtual void operator() (const Range& range) const
4091 const int BLOCK_SZ = 32;
4092 short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
4093 int x, y, x1, y1, width = dst.cols, height = dst.rows;
4095 int bh0 = std::min(BLOCK_SZ/2, height);
4096 int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width);
4097 bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height);
4099 for( y = range.start; y < range.end; y += bh0 )
4101 for( x = 0; x < width; x += bw0 )
4103 int bw = std::min( bw0, width - x);
4104 int bh = std::min( bh0, range.end - y); // height
4106 Mat _XY(bh, bw, CV_16SC2, XY), matA;
4107 Mat dpart(dst, Rect(x, y, bw, bh));
4109 for( y1 = 0; y1 < bh; y1++ )
4111 short* xy = XY + y1*bw*2;
4112 double X0 = M[0]*x + M[1]*(y + y1) + M[2];
4113 double Y0 = M[3]*x + M[4]*(y + y1) + M[5];
4114 double W0 = M[6]*x + M[7]*(y + y1) + M[8];
4116 if( interpolation == INTER_NEAREST )
4117 for( x1 = 0; x1 < bw; x1++ )
4119 double W = W0 + M[6]*x1;
4121 double fX = std::max((double)INT_MIN, std::min((double)INT_MAX, (X0 + M[0]*x1)*W));
4122 double fY = std::max((double)INT_MIN, std::min((double)INT_MAX, (Y0 + M[3]*x1)*W));
4123 int X = saturate_cast<int>(fX);
4124 int Y = saturate_cast<int>(fY);
4126 xy[x1*2] = saturate_cast<short>(X);
4127 xy[x1*2+1] = saturate_cast<short>(Y);
4131 short* alpha = A + y1*bw;
4132 for( x1 = 0; x1 < bw; x1++ )
4134 double W = W0 + M[6]*x1;
4135 W = W ? INTER_TAB_SIZE/W : 0;
4136 double fX = std::max((double)INT_MIN, std::min((double)INT_MAX, (X0 + M[0]*x1)*W));
4137 double fY = std::max((double)INT_MIN, std::min((double)INT_MAX, (Y0 + M[3]*x1)*W));
4138 int X = saturate_cast<int>(fX);
4139 int Y = saturate_cast<int>(fY);
4141 xy[x1*2] = saturate_cast<short>(X >> INTER_BITS);
4142 xy[x1*2+1] = saturate_cast<short>(Y >> INTER_BITS);
4143 alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
4144 (X & (INTER_TAB_SIZE-1)));
4149 if( interpolation == INTER_NEAREST )
4150 remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
4153 Mat _matA(bh, bw, CV_16U, A);
4154 remap( src, dpart, _XY, _matA, interpolation, borderType, borderValue );
4164 int interpolation, borderType;
4168 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
4169 class IPPwarpPerspectiveInvoker :
4170 public ParallelLoopBody
4173 IPPwarpPerspectiveInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[3][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpPerspectiveBackFunc _func, bool *_ok) :
4174 ParallelLoopBody(), src(_src), dst(_dst), mode(_interpolation), coeffs(_coeffs), borderType(_borderType), borderValue(_borderValue), func(_func), ok(_ok)
4179 virtual void operator() (const Range& range) const
4181 IppiSize srcsize = {src.cols, src.rows};
4182 IppiRect srcroi = {0, 0, src.cols, src.rows};
4183 IppiRect dstroi = {0, range.start, dst.cols, range.end - range.start};
4184 int cnn = src.channels();
4186 if( borderType == BORDER_CONSTANT )
4188 IppiSize setSize = {dst.cols, range.end - range.start};
4189 void *dataPointer = dst.data + dst.step[0] * range.start;
4190 if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) )
4196 if( func(src.data, srcsize, (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, coeffs, mode) < 0)
4202 double (&coeffs)[3][3];
4205 const Scalar borderValue;
4206 ippiWarpPerspectiveBackFunc func;
4208 const IPPwarpPerspectiveInvoker& operator= (const IPPwarpPerspectiveInvoker&);
4214 void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0,
4215 Size dsize, int flags, int borderType, const Scalar& borderValue )
4217 CV_Assert( _src.total() > 0 );
4219 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
4220 ocl_warpTransform(_src, _dst, _M0, dsize, flags, borderType, borderValue,
4221 OCL_OP_PERSPECTIVE))
4223 Mat src = _src.getMat(), M0 = _M0.getMat();
4224 _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() );
4225 Mat dst = _dst.getMat();
4227 if( dst.data == src.data )
4231 Mat matM(3, 3, CV_64F, M);
4232 int interpolation = flags & INTER_MAX;
4233 if( interpolation == INTER_AREA )
4234 interpolation = INTER_LINEAR;
4236 CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
4237 M0.convertTo(matM, matM.type());
4239 #ifdef HAVE_TEGRA_OPTIMIZATION
4240 if( tegra::warpPerspective(src, dst, M, flags, borderType, borderValue) )
4244 if( !(flags & WARP_INVERSE_MAP) )
4247 #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
4248 int depth = src.depth();
4249 int channels = src.channels();
4250 if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) &&
4251 ( channels == 1 || channels == 3 || channels == 4 ) &&
4252 ( borderType == cv::BORDER_TRANSPARENT || borderType == cv::BORDER_CONSTANT ) )
4254 int type = src.type();
4255 ippiWarpPerspectiveBackFunc ippFunc =
4256 type == CV_8UC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C1R :
4257 type == CV_8UC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C3R :
4258 type == CV_8UC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C4R :
4259 type == CV_16UC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C1R :
4260 type == CV_16UC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C3R :
4261 type == CV_16UC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C4R :
4262 type == CV_32FC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C1R :
4263 type == CV_32FC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C3R :
4264 type == CV_32FC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C4R :
4267 flags == INTER_LINEAR ? IPPI_INTER_LINEAR :
4268 flags == INTER_NEAREST ? IPPI_INTER_NN :
4269 flags == INTER_CUBIC ? IPPI_INTER_CUBIC :
4271 if( mode && ippFunc )
4273 double coeffs[3][3];
4274 for( int i = 0; i < 3; i++ )
4276 for( int j = 0; j < 3; j++ )
4278 coeffs[i][j] = matM.at<double>(i, j);
4282 Range range(0, dst.rows);
4283 IPPwarpPerspectiveInvoker invoker(src, dst, coeffs, mode, borderType, borderValue, ippFunc, &ok);
4284 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4291 Range range(0, dst.rows);
4292 warpPerspectiveInvoker invoker(src, dst, M, interpolation, borderType, borderValue);
4293 parallel_for_(range, invoker, dst.total()/(double)(1<<16));
4297 cv::Mat cv::getRotationMatrix2D( Point2f center, double angle, double scale )
4300 double alpha = cos(angle)*scale;
4301 double beta = sin(angle)*scale;
4303 Mat M(2, 3, CV_64F);
4304 double* m = (double*)M.data;
4308 m[2] = (1-alpha)*center.x - beta*center.y;
4311 m[5] = beta*center.x + (1-alpha)*center.y;
4316 /* Calculates coefficients of perspective transformation
4317 * which maps (xi,yi) to (ui,vi), (i=1,2,3,4):
4319 * c00*xi + c01*yi + c02
4320 * ui = ---------------------
4321 * c20*xi + c21*yi + c22
4323 * c10*xi + c11*yi + c12
4324 * vi = ---------------------
4325 * c20*xi + c21*yi + c22
4327 * Coefficients are calculated by solving linear system:
4328 * / x0 y0 1 0 0 0 -x0*u0 -y0*u0 \ /c00\ /u0\
4329 * | x1 y1 1 0 0 0 -x1*u1 -y1*u1 | |c01| |u1|
4330 * | x2 y2 1 0 0 0 -x2*u2 -y2*u2 | |c02| |u2|
4331 * | x3 y3 1 0 0 0 -x3*u3 -y3*u3 |.|c10|=|u3|,
4332 * | 0 0 0 x0 y0 1 -x0*v0 -y0*v0 | |c11| |v0|
4333 * | 0 0 0 x1 y1 1 -x1*v1 -y1*v1 | |c12| |v1|
4334 * | 0 0 0 x2 y2 1 -x2*v2 -y2*v2 | |c20| |v2|
4335 * \ 0 0 0 x3 y3 1 -x3*v3 -y3*v3 / \c21/ \v3/
4338 * cij - matrix coefficients, c22 = 1
4340 cv::Mat cv::getPerspectiveTransform( const Point2f src[], const Point2f dst[] )
4342 Mat M(3, 3, CV_64F), X(8, 1, CV_64F, M.data);
4343 double a[8][8], b[8];
4344 Mat A(8, 8, CV_64F, a), B(8, 1, CV_64F, b);
4346 for( int i = 0; i < 4; ++i )
4348 a[i][0] = a[i+4][3] = src[i].x;
4349 a[i][1] = a[i+4][4] = src[i].y;
4350 a[i][2] = a[i+4][5] = 1;
4351 a[i][3] = a[i][4] = a[i][5] =
4352 a[i+4][0] = a[i+4][1] = a[i+4][2] = 0;
4353 a[i][6] = -src[i].x*dst[i].x;
4354 a[i][7] = -src[i].y*dst[i].x;
4355 a[i+4][6] = -src[i].x*dst[i].y;
4356 a[i+4][7] = -src[i].y*dst[i].y;
4361 solve( A, B, X, DECOMP_SVD );
4362 ((double*)M.data)[8] = 1.;
4367 /* Calculates coefficients of affine transformation
4368 * which maps (xi,yi) to (ui,vi), (i=1,2,3):
4370 * ui = c00*xi + c01*yi + c02
4372 * vi = c10*xi + c11*yi + c12
4374 * Coefficients are calculated by solving linear system:
4375 * / x0 y0 1 0 0 0 \ /c00\ /u0\
4376 * | x1 y1 1 0 0 0 | |c01| |u1|
4377 * | x2 y2 1 0 0 0 | |c02| |u2|
4378 * | 0 0 0 x0 y0 1 | |c10| |v0|
4379 * | 0 0 0 x1 y1 1 | |c11| |v1|
4380 * \ 0 0 0 x2 y2 1 / |c12| |v2|
4383 * cij - matrix coefficients
4386 cv::Mat cv::getAffineTransform( const Point2f src[], const Point2f dst[] )
4388 Mat M(2, 3, CV_64F), X(6, 1, CV_64F, M.data);
4389 double a[6*6], b[6];
4390 Mat A(6, 6, CV_64F, a), B(6, 1, CV_64F, b);
4392 for( int i = 0; i < 3; i++ )
4396 a[j] = a[k+3] = src[i].x;
4397 a[j+1] = a[k+4] = src[i].y;
4398 a[j+2] = a[k+5] = 1;
4399 a[j+3] = a[j+4] = a[j+5] = 0;
4400 a[k] = a[k+1] = a[k+2] = 0;
4402 b[i*2+1] = dst[i].y;
4409 void cv::invertAffineTransform(InputArray _matM, OutputArray __iM)
4411 Mat matM = _matM.getMat();
4412 CV_Assert(matM.rows == 2 && matM.cols == 3);
4413 __iM.create(2, 3, matM.type());
4414 Mat _iM = __iM.getMat();
4416 if( matM.type() == CV_32F )
4418 const float* M = (const float*)matM.data;
4419 float* iM = (float*)_iM.data;
4420 int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0]));
4422 double D = M[0]*M[step+1] - M[1]*M[step];
4423 D = D != 0 ? 1./D : 0;
4424 double A11 = M[step+1]*D, A22 = M[0]*D, A12 = -M[1]*D, A21 = -M[step]*D;
4425 double b1 = -A11*M[2] - A12*M[step+2];
4426 double b2 = -A21*M[2] - A22*M[step+2];
4428 iM[0] = (float)A11; iM[1] = (float)A12; iM[2] = (float)b1;
4429 iM[istep] = (float)A21; iM[istep+1] = (float)A22; iM[istep+2] = (float)b2;
4431 else if( matM.type() == CV_64F )
4433 const double* M = (const double*)matM.data;
4434 double* iM = (double*)_iM.data;
4435 int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0]));
4437 double D = M[0]*M[step+1] - M[1]*M[step];
4438 D = D != 0 ? 1./D : 0;
4439 double A11 = M[step+1]*D, A22 = M[0]*D, A12 = -M[1]*D, A21 = -M[step]*D;
4440 double b1 = -A11*M[2] - A12*M[step+2];
4441 double b2 = -A21*M[2] - A22*M[step+2];
4443 iM[0] = A11; iM[1] = A12; iM[2] = b1;
4444 iM[istep] = A21; iM[istep+1] = A22; iM[istep+2] = b2;
4447 CV_Error( CV_StsUnsupportedFormat, "" );
4450 cv::Mat cv::getPerspectiveTransform(InputArray _src, InputArray _dst)
4452 Mat src = _src.getMat(), dst = _dst.getMat();
4453 CV_Assert(src.checkVector(2, CV_32F) == 4 && dst.checkVector(2, CV_32F) == 4);
4454 return getPerspectiveTransform((const Point2f*)src.data, (const Point2f*)dst.data);
4457 cv::Mat cv::getAffineTransform(InputArray _src, InputArray _dst)
4459 Mat src = _src.getMat(), dst = _dst.getMat();
4460 CV_Assert(src.checkVector(2, CV_32F) == 3 && dst.checkVector(2, CV_32F) == 3);
4461 return getAffineTransform((const Point2f*)src.data, (const Point2f*)dst.data);
4465 cvResize( const CvArr* srcarr, CvArr* dstarr, int method )
4467 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
4468 CV_Assert( src.type() == dst.type() );
4469 cv::resize( src, dst, dst.size(), (double)dst.cols/src.cols,
4470 (double)dst.rows/src.rows, method );
4475 cvWarpAffine( const CvArr* srcarr, CvArr* dstarr, const CvMat* marr,
4476 int flags, CvScalar fillval )
4478 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
4479 cv::Mat matrix = cv::cvarrToMat(marr);
4480 CV_Assert( src.type() == dst.type() );
4481 cv::warpAffine( src, dst, matrix, dst.size(), flags,
4482 (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT,
4487 cvWarpPerspective( const CvArr* srcarr, CvArr* dstarr, const CvMat* marr,
4488 int flags, CvScalar fillval )
4490 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
4491 cv::Mat matrix = cv::cvarrToMat(marr);
4492 CV_Assert( src.type() == dst.type() );
4493 cv::warpPerspective( src, dst, matrix, dst.size(), flags,
4494 (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT,
4499 cvRemap( const CvArr* srcarr, CvArr* dstarr,
4500 const CvArr* _mapx, const CvArr* _mapy,
4501 int flags, CvScalar fillval )
4503 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst;
4504 cv::Mat mapx = cv::cvarrToMat(_mapx), mapy = cv::cvarrToMat(_mapy);
4505 CV_Assert( src.type() == dst.type() && dst.size() == mapx.size() );
4506 cv::remap( src, dst, mapx, mapy, flags & cv::INTER_MAX,
4507 (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT,
4509 CV_Assert( dst0.data == dst.data );
4514 cv2DRotationMatrix( CvPoint2D32f center, double angle,
4515 double scale, CvMat* matrix )
4517 cv::Mat M0 = cv::cvarrToMat(matrix), M = cv::getRotationMatrix2D(center, angle, scale);
4518 CV_Assert( M.size() == M0.size() );
4519 M.convertTo(M0, M0.type());
4525 cvGetPerspectiveTransform( const CvPoint2D32f* src,
4526 const CvPoint2D32f* dst,
4529 cv::Mat M0 = cv::cvarrToMat(matrix),
4530 M = cv::getPerspectiveTransform((const cv::Point2f*)src, (const cv::Point2f*)dst);
4531 CV_Assert( M.size() == M0.size() );
4532 M.convertTo(M0, M0.type());
4538 cvGetAffineTransform( const CvPoint2D32f* src,
4539 const CvPoint2D32f* dst,
4542 cv::Mat M0 = cv::cvarrToMat(matrix),
4543 M = cv::getAffineTransform((const cv::Point2f*)src, (const cv::Point2f*)dst);
4544 CV_Assert( M.size() == M0.size() );
4545 M.convertTo(M0, M0.type());
4551 cvConvertMaps( const CvArr* arr1, const CvArr* arr2, CvArr* dstarr1, CvArr* dstarr2 )
4553 cv::Mat map1 = cv::cvarrToMat(arr1), map2;
4554 cv::Mat dstmap1 = cv::cvarrToMat(dstarr1), dstmap2;
4557 map2 = cv::cvarrToMat(arr2);
4560 dstmap2 = cv::cvarrToMat(dstarr2);
4561 if( dstmap2.type() == CV_16SC1 )
4562 dstmap2 = cv::Mat(dstmap2.size(), CV_16UC1, dstmap2.data, dstmap2.step);
4565 cv::convertMaps( map1, map2, dstmap1, dstmap2, dstmap1.type(), false );
4568 /****************************************************************************************\
4569 * Log-Polar Transform *
4570 \****************************************************************************************/
4572 /* now it is done via Remap; more correct implementation should use
4573 some super-sampling technique outside of the "fovea" circle */
4575 cvLogPolar( const CvArr* srcarr, CvArr* dstarr,
4576 CvPoint2D32f center, double M, int flags )
4578 cv::Ptr<CvMat> mapx, mapy;
4580 CvMat srcstub, *src = cvGetMat(srcarr, &srcstub);
4581 CvMat dststub, *dst = cvGetMat(dstarr, &dststub);
4582 CvSize ssize, dsize;
4584 if( !CV_ARE_TYPES_EQ( src, dst ))
4585 CV_Error( CV_StsUnmatchedFormats, "" );
4588 CV_Error( CV_StsOutOfRange, "M should be >0" );
4590 ssize = cvGetMatSize(src);
4591 dsize = cvGetMatSize(dst);
4593 mapx.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4594 mapy.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4596 if( !(flags & CV_WARP_INVERSE_MAP) )
4599 cv::AutoBuffer<double> _exp_tab(dsize.width);
4600 double* exp_tab = _exp_tab;
4602 for( rho = 0; rho < dst->width; rho++ )
4603 exp_tab[rho] = std::exp(rho/M);
4605 for( phi = 0; phi < dsize.height; phi++ )
4607 double cp = cos(phi*2*CV_PI/dsize.height);
4608 double sp = sin(phi*2*CV_PI/dsize.height);
4609 float* mx = (float*)(mapx->data.ptr + phi*mapx->step);
4610 float* my = (float*)(mapy->data.ptr + phi*mapy->step);
4612 for( rho = 0; rho < dsize.width; rho++ )
4614 double r = exp_tab[rho];
4615 double x = r*cp + center.x;
4616 double y = r*sp + center.y;
4626 CvMat bufx, bufy, bufp, bufa;
4627 double ascale = ssize.height/(2*CV_PI);
4628 cv::AutoBuffer<float> _buf(4*dsize.width);
4631 bufx = cvMat( 1, dsize.width, CV_32F, buf );
4632 bufy = cvMat( 1, dsize.width, CV_32F, buf + dsize.width );
4633 bufp = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*2 );
4634 bufa = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*3 );
4636 for( x = 0; x < dsize.width; x++ )
4637 bufx.data.fl[x] = (float)x - center.x;
4639 for( y = 0; y < dsize.height; y++ )
4641 float* mx = (float*)(mapx->data.ptr + y*mapx->step);
4642 float* my = (float*)(mapy->data.ptr + y*mapy->step);
4644 for( x = 0; x < dsize.width; x++ )
4645 bufy.data.fl[x] = (float)y - center.y;
4648 cvCartToPolar( &bufx, &bufy, &bufp, &bufa );
4650 for( x = 0; x < dsize.width; x++ )
4651 bufp.data.fl[x] += 1.f;
4653 cvLog( &bufp, &bufp );
4655 for( x = 0; x < dsize.width; x++ )
4657 double rho = bufp.data.fl[x]*M;
4658 double phi = bufa.data.fl[x]*ascale;
4664 for( x = 0; x < dsize.width; x++ )
4666 double xx = bufx.data.fl[x];
4667 double yy = bufy.data.fl[x];
4669 double p = log(std::sqrt(xx*xx + yy*yy) + 1.)*M;
4670 double a = atan2(yy,xx);
4682 cvRemap( src, dst, mapx, mapy, flags, cvScalarAll(0) );
4685 void cv::logPolar( InputArray _src, OutputArray _dst,
4686 Point2f center, double M, int flags )
4688 Mat src = _src.getMat();
4689 _dst.create( src.size(), src.type() );
4690 CvMat c_src = src, c_dst = _dst.getMat();
4691 cvLogPolar( &c_src, &c_dst, center, M, flags );
4694 /****************************************************************************************
4695 Linear-Polar Transform
4696 J.L. Blanco, Apr 2009
4697 ****************************************************************************************/
4699 void cvLinearPolar( const CvArr* srcarr, CvArr* dstarr,
4700 CvPoint2D32f center, double maxRadius, int flags )
4702 cv::Ptr<CvMat> mapx, mapy;
4704 CvMat srcstub, *src = (CvMat*)srcarr;
4705 CvMat dststub, *dst = (CvMat*)dstarr;
4706 CvSize ssize, dsize;
4708 src = cvGetMat( srcarr, &srcstub,0,0 );
4709 dst = cvGetMat( dstarr, &dststub,0,0 );
4711 if( !CV_ARE_TYPES_EQ( src, dst ))
4712 CV_Error( CV_StsUnmatchedFormats, "" );
4714 ssize.width = src->cols;
4715 ssize.height = src->rows;
4716 dsize.width = dst->cols;
4717 dsize.height = dst->rows;
4719 mapx.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4720 mapy.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
4722 if( !(flags & CV_WARP_INVERSE_MAP) )
4726 for( phi = 0; phi < dsize.height; phi++ )
4728 double cp = cos(phi*2*CV_PI/dsize.height);
4729 double sp = sin(phi*2*CV_PI/dsize.height);
4730 float* mx = (float*)(mapx->data.ptr + phi*mapx->step);
4731 float* my = (float*)(mapy->data.ptr + phi*mapy->step);
4733 for( rho = 0; rho < dsize.width; rho++ )
4735 double r = maxRadius*(rho+1)/dsize.width;
4736 double x = r*cp + center.x;
4737 double y = r*sp + center.y;
4747 CvMat bufx, bufy, bufp, bufa;
4748 const double ascale = ssize.height/(2*CV_PI);
4749 const double pscale = ssize.width/maxRadius;
4751 cv::AutoBuffer<float> _buf(4*dsize.width);
4754 bufx = cvMat( 1, dsize.width, CV_32F, buf );
4755 bufy = cvMat( 1, dsize.width, CV_32F, buf + dsize.width );
4756 bufp = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*2 );
4757 bufa = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*3 );
4759 for( x = 0; x < dsize.width; x++ )
4760 bufx.data.fl[x] = (float)x - center.x;
4762 for( y = 0; y < dsize.height; y++ )
4764 float* mx = (float*)(mapx->data.ptr + y*mapx->step);
4765 float* my = (float*)(mapy->data.ptr + y*mapy->step);
4767 for( x = 0; x < dsize.width; x++ )
4768 bufy.data.fl[x] = (float)y - center.y;
4770 cvCartToPolar( &bufx, &bufy, &bufp, &bufa, 0 );
4772 for( x = 0; x < dsize.width; x++ )
4773 bufp.data.fl[x] += 1.f;
4775 for( x = 0; x < dsize.width; x++ )
4777 double rho = bufp.data.fl[x]*pscale;
4778 double phi = bufa.data.fl[x]*ascale;
4785 cvRemap( src, dst, mapx, mapy, flags, cvScalarAll(0) );
4788 void cv::linearPolar( InputArray _src, OutputArray _dst,
4789 Point2f center, double maxRadius, int flags )
4791 Mat src = _src.getMat();
4792 _dst.create( src.size(), src.type() );
4793 CvMat c_src = src, c_dst = _dst.getMat();
4794 cvLinearPolar( &c_src, &c_dst, center, maxRadius, flags );