1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
44 #include "opencl_kernels.hpp"
49 template<typename T, int shift> struct FixPtCast
53 rtype operator ()(type1 arg) const { return (T)((arg + (1 << (shift-1))) >> shift); }
56 template<typename T, int shift> struct FltCast
60 rtype operator ()(type1 arg) const { return arg*(T)(1./(1 << shift)); }
63 template<typename T1, typename T2> struct NoVec
65 int operator()(T1**, T2*, int, int) const { return 0; }
70 struct PyrDownVec_32s8u
72 int operator()(int** src, uchar* dst, int, int width) const
74 if( !checkHardwareSupport(CV_CPU_SSE2) )
78 const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4];
79 __m128i delta = _mm_set1_epi16(128);
81 for( ; x <= width - 16; x += 16 )
83 __m128i r0, r1, r2, r3, r4, t0, t1;
84 r0 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row0 + x)),
85 _mm_load_si128((const __m128i*)(row0 + x + 4)));
86 r1 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row1 + x)),
87 _mm_load_si128((const __m128i*)(row1 + x + 4)));
88 r2 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row2 + x)),
89 _mm_load_si128((const __m128i*)(row2 + x + 4)));
90 r3 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row3 + x)),
91 _mm_load_si128((const __m128i*)(row3 + x + 4)));
92 r4 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row4 + x)),
93 _mm_load_si128((const __m128i*)(row4 + x + 4)));
94 r0 = _mm_add_epi16(r0, r4);
95 r1 = _mm_add_epi16(_mm_add_epi16(r1, r3), r2);
96 r0 = _mm_add_epi16(r0, _mm_add_epi16(r2, r2));
97 t0 = _mm_add_epi16(r0, _mm_slli_epi16(r1, 2));
98 r0 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row0 + x + 8)),
99 _mm_load_si128((const __m128i*)(row0 + x + 12)));
100 r1 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row1 + x + 8)),
101 _mm_load_si128((const __m128i*)(row1 + x + 12)));
102 r2 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row2 + x + 8)),
103 _mm_load_si128((const __m128i*)(row2 + x + 12)));
104 r3 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row3 + x + 8)),
105 _mm_load_si128((const __m128i*)(row3 + x + 12)));
106 r4 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row4 + x + 8)),
107 _mm_load_si128((const __m128i*)(row4 + x + 12)));
108 r0 = _mm_add_epi16(r0, r4);
109 r1 = _mm_add_epi16(_mm_add_epi16(r1, r3), r2);
110 r0 = _mm_add_epi16(r0, _mm_add_epi16(r2, r2));
111 t1 = _mm_add_epi16(r0, _mm_slli_epi16(r1, 2));
112 t0 = _mm_srli_epi16(_mm_add_epi16(t0, delta), 8);
113 t1 = _mm_srli_epi16(_mm_add_epi16(t1, delta), 8);
114 _mm_storeu_si128((__m128i*)(dst + x), _mm_packus_epi16(t0, t1));
117 for( ; x <= width - 4; x += 4 )
119 __m128i r0, r1, r2, r3, r4, z = _mm_setzero_si128();
120 r0 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row0 + x)), z);
121 r1 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row1 + x)), z);
122 r2 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row2 + x)), z);
123 r3 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row3 + x)), z);
124 r4 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row4 + x)), z);
125 r0 = _mm_add_epi16(r0, r4);
126 r1 = _mm_add_epi16(_mm_add_epi16(r1, r3), r2);
127 r0 = _mm_add_epi16(r0, _mm_add_epi16(r2, r2));
128 r0 = _mm_add_epi16(r0, _mm_slli_epi16(r1, 2));
129 r0 = _mm_srli_epi16(_mm_add_epi16(r0, delta), 8);
130 *(int*)(dst + x) = _mm_cvtsi128_si32(_mm_packus_epi16(r0, r0));
137 struct PyrDownVec_32f
139 int operator()(float** src, float* dst, int, int width) const
141 if( !checkHardwareSupport(CV_CPU_SSE) )
145 const float *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4];
146 __m128 _4 = _mm_set1_ps(4.f), _scale = _mm_set1_ps(1.f/256);
147 for( ; x <= width - 8; x += 8 )
149 __m128 r0, r1, r2, r3, r4, t0, t1;
150 r0 = _mm_load_ps(row0 + x);
151 r1 = _mm_load_ps(row1 + x);
152 r2 = _mm_load_ps(row2 + x);
153 r3 = _mm_load_ps(row3 + x);
154 r4 = _mm_load_ps(row4 + x);
155 r0 = _mm_add_ps(r0, r4);
156 r1 = _mm_add_ps(_mm_add_ps(r1, r3), r2);
157 r0 = _mm_add_ps(r0, _mm_add_ps(r2, r2));
158 t0 = _mm_add_ps(r0, _mm_mul_ps(r1, _4));
160 r0 = _mm_load_ps(row0 + x + 4);
161 r1 = _mm_load_ps(row1 + x + 4);
162 r2 = _mm_load_ps(row2 + x + 4);
163 r3 = _mm_load_ps(row3 + x + 4);
164 r4 = _mm_load_ps(row4 + x + 4);
165 r0 = _mm_add_ps(r0, r4);
166 r1 = _mm_add_ps(_mm_add_ps(r1, r3), r2);
167 r0 = _mm_add_ps(r0, _mm_add_ps(r2, r2));
168 t1 = _mm_add_ps(r0, _mm_mul_ps(r1, _4));
170 t0 = _mm_mul_ps(t0, _scale);
171 t1 = _mm_mul_ps(t1, _scale);
173 _mm_storeu_ps(dst + x, t0);
174 _mm_storeu_ps(dst + x + 4, t1);
183 typedef NoVec<int, uchar> PyrDownVec_32s8u;
184 typedef NoVec<float, float> PyrDownVec_32f;
188 template<class CastOp, class VecOp> void
189 pyrDown_( const Mat& _src, Mat& _dst, int borderType )
192 typedef typename CastOp::type1 WT;
193 typedef typename CastOp::rtype T;
195 CV_Assert( !_src.empty() );
196 Size ssize = _src.size(), dsize = _dst.size();
197 int cn = _src.channels();
198 int bufstep = (int)alignSize(dsize.width*cn, 16);
199 AutoBuffer<WT> _buf(bufstep*PD_SZ + 16);
200 WT* buf = alignPtr((WT*)_buf, 16);
201 int tabL[CV_CN_MAX*(PD_SZ+2)], tabR[CV_CN_MAX*(PD_SZ+2)];
202 AutoBuffer<int> _tabM(dsize.width*cn);
208 CV_Assert( ssize.width > 0 && ssize.height > 0 &&
209 std::abs(dsize.width*2 - ssize.width) <= 2 &&
210 std::abs(dsize.height*2 - ssize.height) <= 2 );
211 int k, x, sy0 = -PD_SZ/2, sy = sy0, width0 = std::min((ssize.width-PD_SZ/2-1)/2 + 1, dsize.width);
213 for( x = 0; x <= PD_SZ+1; x++ )
215 int sx0 = borderInterpolate(x - PD_SZ/2, ssize.width, borderType)*cn;
216 int sx1 = borderInterpolate(x + width0*2 - PD_SZ/2, ssize.width, borderType)*cn;
217 for( k = 0; k < cn; k++ )
219 tabL[x*cn + k] = sx0 + k;
220 tabR[x*cn + k] = sx1 + k;
228 for( x = 0; x < dsize.width; x++ )
229 tabM[x] = (x/cn)*2*cn + x % cn;
231 for( int y = 0; y < dsize.height; y++ )
233 T* dst = (T*)(_dst.data + _dst.step*y);
234 WT *row0, *row1, *row2, *row3, *row4;
236 // fill the ring buffer (horizontal convolution and decimation)
237 for( ; sy <= y*2 + 2; sy++ )
239 WT* row = buf + ((sy - sy0) % PD_SZ)*bufstep;
240 int _sy = borderInterpolate(sy, ssize.height, borderType);
241 const T* src = (const T*)(_src.data + _src.step*_sy);
243 const int* tab = tabL;
247 for( ; x < limit; x++ )
249 row[x] = src[tab[x+cn*2]]*6 + (src[tab[x+cn]] + src[tab[x+cn*3]])*4 +
250 src[tab[x]] + src[tab[x+cn*4]];
253 if( x == dsize.width )
258 for( ; x < width0; x++ )
259 row[x] = src[x*2]*6 + (src[x*2 - 1] + src[x*2 + 1])*4 +
260 src[x*2 - 2] + src[x*2 + 2];
264 for( ; x < width0; x += 3 )
266 const T* s = src + x*2;
267 WT t0 = s[0]*6 + (s[-3] + s[3])*4 + s[-6] + s[6];
268 WT t1 = s[1]*6 + (s[-2] + s[4])*4 + s[-5] + s[7];
269 WT t2 = s[2]*6 + (s[-1] + s[5])*4 + s[-4] + s[8];
270 row[x] = t0; row[x+1] = t1; row[x+2] = t2;
275 for( ; x < width0; x += 4 )
277 const T* s = src + x*2;
278 WT t0 = s[0]*6 + (s[-4] + s[4])*4 + s[-8] + s[8];
279 WT t1 = s[1]*6 + (s[-3] + s[5])*4 + s[-7] + s[9];
280 row[x] = t0; row[x+1] = t1;
281 t0 = s[2]*6 + (s[-2] + s[6])*4 + s[-6] + s[10];
282 t1 = s[3]*6 + (s[-1] + s[7])*4 + s[-5] + s[11];
283 row[x+2] = t0; row[x+3] = t1;
288 for( ; x < width0; x++ )
291 row[x] = src[sx]*6 + (src[sx - cn] + src[sx + cn])*4 +
292 src[sx - cn*2] + src[sx + cn*2];
301 // do vertical convolution and decimation and write the result to the destination image
302 for( k = 0; k < PD_SZ; k++ )
303 rows[k] = buf + ((y*2 - PD_SZ/2 + k - sy0) % PD_SZ)*bufstep;
304 row0 = rows[0]; row1 = rows[1]; row2 = rows[2]; row3 = rows[3]; row4 = rows[4];
306 x = vecOp(rows, dst, (int)_dst.step, dsize.width);
307 for( ; x < dsize.width; x++ )
308 dst[x] = castOp(row2[x]*6 + (row1[x] + row3[x])*4 + row0[x] + row4[x]);
313 template<class CastOp, class VecOp> void
314 pyrUp_( const Mat& _src, Mat& _dst, int)
317 typedef typename CastOp::type1 WT;
318 typedef typename CastOp::rtype T;
320 Size ssize = _src.size(), dsize = _dst.size();
321 int cn = _src.channels();
322 int bufstep = (int)alignSize((dsize.width+1)*cn, 16);
323 AutoBuffer<WT> _buf(bufstep*PU_SZ + 16);
324 WT* buf = alignPtr((WT*)_buf, 16);
325 AutoBuffer<int> _dtab(ssize.width*cn);
331 CV_Assert( std::abs(dsize.width - ssize.width*2) == dsize.width % 2 &&
332 std::abs(dsize.height - ssize.height*2) == dsize.height % 2);
333 int k, x, sy0 = -PU_SZ/2, sy = sy0;
338 for( x = 0; x < ssize.width; x++ )
339 dtab[x] = (x/cn)*2*cn + x % cn;
341 for( int y = 0; y < ssize.height; y++ )
343 T* dst0 = (T*)(_dst.data + _dst.step*y*2);
344 T* dst1 = (T*)(_dst.data + _dst.step*(y*2+1));
345 WT *row0, *row1, *row2;
347 if( y*2+1 >= dsize.height )
350 // fill the ring buffer (horizontal convolution and decimation)
351 for( ; sy <= y + 1; sy++ )
353 WT* row = buf + ((sy - sy0) % PU_SZ)*bufstep;
354 int _sy = borderInterpolate(sy*2, dsize.height, BORDER_REFLECT_101)/2;
355 const T* src = (const T*)(_src.data + _src.step*_sy);
357 if( ssize.width == cn )
359 for( x = 0; x < cn; x++ )
360 row[x] = row[x + cn] = src[x]*8;
364 for( x = 0; x < cn; x++ )
367 WT t0 = src[x]*6 + src[x + cn]*2;
368 WT t1 = (src[x] + src[x + cn])*4;
369 row[dx] = t0; row[dx + cn] = t1;
370 dx = dtab[ssize.width - cn + x];
371 int sx = ssize.width - cn + x;
372 t0 = src[sx - cn] + src[sx]*7;
374 row[dx] = t0; row[dx + cn] = t1;
377 for( x = cn; x < ssize.width - cn; x++ )
380 WT t0 = src[x-cn] + src[x]*6 + src[x+cn];
381 WT t1 = (src[x] + src[x+cn])*4;
387 // do vertical convolution and decimation and write the result to the destination image
388 for( k = 0; k < PU_SZ; k++ )
389 rows[k] = buf + ((y - PU_SZ/2 + k - sy0) % PU_SZ)*bufstep;
390 row0 = rows[0]; row1 = rows[1]; row2 = rows[2];
392 x = vecOp(rows, dst0, (int)_dst.step, dsize.width);
393 for( ; x < dsize.width; x++ )
395 T t1 = castOp((row1[x] + row2[x])*4);
396 T t0 = castOp(row0[x] + row1[x]*6 + row2[x]);
397 dst1[x] = t1; dst0[x] = t0;
402 typedef void (*PyrFunc)(const Mat&, Mat&, int);
406 static bool ocl_pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType)
408 int type = _src.type(), depth = CV_MAT_DEPTH(type), channels = CV_MAT_CN(type);
410 if (channels > 4 || borderType != BORDER_DEFAULT)
413 bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
414 if ((depth == CV_64F) && !(doubleSupport))
417 Size ssize = _src.size();
418 Size dsize = _dsz.area() == 0 ? Size((ssize.width + 1) / 2, (ssize.height + 1) / 2) : _dsz;
419 CV_Assert( ssize.width > 0 && ssize.height > 0 &&
420 std::abs(dsize.width*2 - ssize.width) <= 2 &&
421 std::abs(dsize.height*2 - ssize.height) <= 2 );
423 UMat src = _src.getUMat();
424 _dst.create( dsize, src.type() );
425 UMat dst = _dst.getUMat();
427 int float_depth = depth == CV_64F ? CV_64F : CV_32F;
429 String buildOptions = format(
430 "-D T=%s -D FT=%s -D convertToT=%s -D convertToFT=%s%s "
432 ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, channels)),
433 ocl::convertTypeStr(float_depth, depth, channels, cvt[0]),
434 ocl::convertTypeStr(depth, float_depth, channels, cvt[1]),
435 doubleSupport ? " -D DOUBLE_SUPPORT" : "",
436 ocl::typeToStr(depth), channels
438 ocl::Kernel k("pyrDown", ocl::imgproc::pyr_down_oclsrc, buildOptions);
442 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst));
444 size_t localThreads[2] = { 256, 1 };
445 size_t globalThreads[2] = { src.cols, dst.rows };
446 return k.run(2, globalThreads, localThreads, false);
449 static bool ocl_pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType)
451 int type = _src.type(), depth = CV_MAT_DEPTH(type), channels = CV_MAT_CN(type);
453 if (channels > 4 || borderType != BORDER_DEFAULT)
456 bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
457 if (depth == CV_64F && !doubleSupport)
460 Size ssize = _src.size();
461 if ((_dsz.area() != 0) && (_dsz != Size(ssize.width * 2, ssize.height * 2)))
464 UMat src = _src.getUMat();
465 Size dsize = Size(ssize.width * 2, ssize.height * 2);
466 _dst.create( dsize, src.type() );
467 UMat dst = _dst.getUMat();
469 int float_depth = depth == CV_64F ? CV_64F : CV_32F;
471 String buildOptions = format(
472 "-D T=%s -D FT=%s -D convertToT=%s -D convertToFT=%s%s "
474 ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, channels)),
475 ocl::convertTypeStr(float_depth, depth, channels, cvt[0]),
476 ocl::convertTypeStr(depth, float_depth, channels, cvt[1]),
477 doubleSupport ? " -D DOUBLE_SUPPORT" : "",
478 ocl::typeToStr(depth), channels
480 ocl::Kernel k("pyrUp", ocl::imgproc::pyr_up_oclsrc, buildOptions);
484 k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst));
485 size_t globalThreads[2] = {dst.cols, dst.rows};
486 size_t localThreads[2] = {16, 16};
488 return k.run(2, globalThreads, localThreads, false);
495 void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType )
497 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
498 ocl_pyrDown(_src, _dst, _dsz, borderType))
500 Mat src = _src.getMat();
501 Size dsz = _dsz.area() == 0 ? Size((src.cols + 1)/2, (src.rows + 1)/2) : _dsz;
502 _dst.create( dsz, src.type() );
503 Mat dst = _dst.getMat();
504 int depth = src.depth();
506 #ifdef HAVE_TEGRA_OPTIMIZATION
507 if(borderType == BORDER_DEFAULT && tegra::pyrDown(src, dst))
511 #if (defined(HAVE_IPP) && !defined(HAVE_IPP_ICV_ONLY) && IPP_VERSION_X100 >= 801)
512 typedef IppStatus (CV_STDCALL * ippiPyrDown)(const void* pSrc, int srcStep, void* pDst, int dstStep, IppiSize srcRoi, Ipp8u* buffer);
513 int type = src.type();
514 CV_SUPPRESS_DEPRECATED_START
515 ippiPyrDown pyrDownFunc = type == CV_8UC1 ? (ippiPyrDown) ippiPyrDown_Gauss5x5_8u_C1R :
516 type == CV_8UC3 ? (ippiPyrDown) ippiPyrDown_Gauss5x5_8u_C3R :
517 type == CV_32FC1 ? (ippiPyrDown) ippiPyrDown_Gauss5x5_32f_C1R :
518 type == CV_32FC3 ? (ippiPyrDown) ippiPyrDown_Gauss5x5_32f_C3R : 0;
519 CV_SUPPRESS_DEPRECATED_END
524 IppiSize srcRoi = { src.cols, src.rows };
525 IppDataType dataType = depth == CV_8U ? ipp8u : ipp32f;
526 CV_SUPPRESS_DEPRECATED_START
527 IppStatus ok = ippiPyrDownGetBufSize_Gauss5x5(srcRoi.width, dataType, src.channels(), &bufferSize);
528 CV_SUPPRESS_DEPRECATED_END
531 Ipp8u* buffer = ippsMalloc_8u(bufferSize);
532 ok = pyrDownFunc(src.data, (int) src.step, dst.data, (int) dst.step, srcRoi, buffer);
543 func = pyrDown_<FixPtCast<uchar, 8>, PyrDownVec_32s8u>;
544 else if( depth == CV_16S )
545 func = pyrDown_<FixPtCast<short, 8>, NoVec<int, short> >;
546 else if( depth == CV_16U )
547 func = pyrDown_<FixPtCast<ushort, 8>, NoVec<int, ushort> >;
548 else if( depth == CV_32F )
549 func = pyrDown_<FltCast<float, 8>, PyrDownVec_32f>;
550 else if( depth == CV_64F )
551 func = pyrDown_<FltCast<double, 8>, NoVec<double, double> >;
553 CV_Error( CV_StsUnsupportedFormat, "" );
555 func( src, dst, borderType );
558 void cv::pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType )
560 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
561 ocl_pyrUp(_src, _dst, _dsz, borderType))
563 Mat src = _src.getMat();
564 Size dsz = _dsz.area() == 0 ? Size(src.cols*2, src.rows*2) : _dsz;
565 _dst.create( dsz, src.type() );
566 Mat dst = _dst.getMat();
568 #ifdef HAVE_TEGRA_OPTIMIZATION
569 if(borderType == BORDER_DEFAULT && tegra::pyrUp(src, dst))
573 int depth = src.depth();
576 func = pyrUp_<FixPtCast<uchar, 6>, NoVec<int, uchar> >;
577 else if( depth == CV_16S )
578 func = pyrUp_<FixPtCast<short, 6>, NoVec<int, short> >;
579 else if( depth == CV_16U )
580 func = pyrUp_<FixPtCast<ushort, 6>, NoVec<int, ushort> >;
581 else if( depth == CV_32F )
582 func = pyrUp_<FltCast<float, 6>, NoVec<float, float> >;
583 else if( depth == CV_64F )
584 func = pyrUp_<FltCast<double, 6>, NoVec<double, double> >;
586 CV_Error( CV_StsUnsupportedFormat, "" );
588 func( src, dst, borderType );
591 void cv::buildPyramid( InputArray _src, OutputArrayOfArrays _dst, int maxlevel, int borderType )
593 if (_src.dims() <= 2 && _dst.isUMatVector())
595 UMat src = _src.getUMat();
596 _dst.create( maxlevel + 1, 1, 0 );
597 _dst.getUMatRef(0) = src;
598 for( int i = 1; i <= maxlevel; i++ )
599 pyrDown( _dst.getUMatRef(i-1), _dst.getUMatRef(i), Size(), borderType );
603 Mat src = _src.getMat();
604 _dst.create( maxlevel + 1, 1, 0 );
605 _dst.getMatRef(0) = src;
606 for( int i = 1; i <= maxlevel; i++ )
607 pyrDown( _dst.getMatRef(i-1), _dst.getMatRef(i), Size(), borderType );
610 CV_IMPL void cvPyrDown( const void* srcarr, void* dstarr, int _filter )
612 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
614 CV_Assert( _filter == CV_GAUSSIAN_5x5 && src.type() == dst.type());
615 cv::pyrDown( src, dst, dst.size() );
618 CV_IMPL void cvPyrUp( const void* srcarr, void* dstarr, int _filter )
620 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
622 CV_Assert( _filter == CV_GAUSSIAN_5x5 && src.type() == dst.type());
623 cv::pyrUp( src, dst, dst.size() );
628 cvReleasePyramid( CvMat*** _pyramid, int extra_layers )
631 CV_Error( CV_StsNullPtr, "" );
634 for( int i = 0; i <= extra_layers; i++ )
635 cvReleaseMat( &(*_pyramid)[i] );
642 cvCreatePyramid( const CvArr* srcarr, int extra_layers, double rate,
643 const CvSize* layer_sizes, CvArr* bufarr,
644 int calc, int filter )
646 const float eps = 0.1f;
649 CvMat stub, *src = cvGetMat( srcarr, &stub );
651 if( extra_layers < 0 )
652 CV_Error( CV_StsOutOfRange, "The number of extra layers must be non negative" );
654 int i, layer_step, elem_size = CV_ELEM_SIZE(src->type);
655 CvSize layer_size, size = cvGetMatSize(src);
662 buf = cvGetMat( bufarr, &bstub );
663 bufsize = buf->rows*buf->cols*CV_ELEM_SIZE(buf->type);
665 for( i = 1; i <= extra_layers; i++ )
669 layer_size.width = cvRound(layer_size.width*rate+eps);
670 layer_size.height = cvRound(layer_size.height*rate+eps);
673 layer_size = layer_sizes[i-1];
674 layer_step = layer_size.width*elem_size;
675 bufsize -= layer_step*layer_size.height;
679 CV_Error( CV_StsOutOfRange, "The buffer is too small to fit the pyramid" );
683 CvMat** pyramid = (CvMat**)cvAlloc( (extra_layers+1)*sizeof(pyramid[0]) );
684 memset( pyramid, 0, (extra_layers+1)*sizeof(pyramid[0]) );
686 pyramid[0] = cvCreateMatHeader( size.height, size.width, src->type );
687 cvSetData( pyramid[0], src->data.ptr, src->step );
690 for( i = 1; i <= extra_layers; i++ )
694 layer_size.width = cvRound(layer_size.width*rate + eps);
695 layer_size.height = cvRound(layer_size.height*rate + eps);
698 layer_size = layer_sizes[i];
702 pyramid[i] = cvCreateMatHeader( layer_size.height, layer_size.width, src->type );
703 layer_step = layer_size.width*elem_size;
704 cvSetData( pyramid[i], ptr, layer_step );
705 ptr += layer_step*layer_size.height;
708 pyramid[i] = cvCreateMat( layer_size.height, layer_size.width, src->type );
711 cvPyrDown( pyramid[i-1], pyramid[i], filter );
712 //cvResize( pyramid[i-1], pyramid[i], CV_INTER_LINEAR );