1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // Intel License Agreement
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of Intel Corporation may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
41 #include "precomp.hpp"
42 #include "opencl_kernels.hpp"
47 // The function calculates center of gravity and the central second order moments
48 static void completeMomentState( Moments* moments )
50 double cx = 0, cy = 0;
51 double mu20, mu11, mu02;
53 assert( moments != 0 );
55 if( fabs(moments->m00) > DBL_EPSILON )
57 inv_m00 = 1. / moments->m00;
58 cx = moments->m10 * inv_m00;
59 cy = moments->m01 * inv_m00;
62 // mu20 = m20 - m10*cx
63 mu20 = moments->m20 - moments->m10 * cx;
64 // mu11 = m11 - m10*cy
65 mu11 = moments->m11 - moments->m10 * cy;
66 // mu02 = m02 - m01*cy
67 mu02 = moments->m02 - moments->m01 * cy;
73 // mu30 = m30 - cx*(3*mu20 + cx*m10)
74 moments->mu30 = moments->m30 - cx * (3 * mu20 + cx * moments->m10);
76 // mu21 = m21 - cx*(2*mu11 + cx*m01) - cy*mu20
77 moments->mu21 = moments->m21 - cx * (mu11 + cx * moments->m01) - cy * mu20;
78 // mu12 = m12 - cy*(2*mu11 + cy*m10) - cx*mu02
79 moments->mu12 = moments->m12 - cy * (mu11 + cy * moments->m10) - cx * mu02;
80 // mu03 = m03 - cy*(3*mu02 + cy*m01)
81 moments->mu03 = moments->m03 - cy * (3 * mu02 + cy * moments->m01);
84 double inv_sqrt_m00 = std::sqrt(std::abs(inv_m00));
85 double s2 = inv_m00*inv_m00, s3 = s2*inv_sqrt_m00;
87 moments->nu20 = moments->mu20*s2; moments->nu11 = moments->mu11*s2; moments->nu02 = moments->mu02*s2;
88 moments->nu30 = moments->mu30*s3; moments->nu21 = moments->mu21*s3; moments->nu12 = moments->mu12*s3; moments->nu03 = moments->mu03*s3;
93 static Moments contourMoments( const Mat& contour )
96 int lpt = contour.checkVector(2);
97 int is_float = contour.depth() == CV_32F;
98 const Point* ptsi = (const Point*)contour.data;
99 const Point2f* ptsf = (const Point2f*)contour.data;
101 CV_Assert( contour.depth() == CV_32S || contour.depth() == CV_32F );
106 double a00 = 0, a10 = 0, a01 = 0, a20 = 0, a11 = 0, a02 = 0, a30 = 0, a21 = 0, a12 = 0, a03 = 0;
107 double xi, yi, xi2, yi2, xi_1, yi_1, xi_12, yi_12, dxy, xii_1, yii_1;
111 xi_1 = ptsi[lpt-1].x;
112 yi_1 = ptsi[lpt-1].y;
116 xi_1 = ptsf[lpt-1].x;
117 yi_1 = ptsf[lpt-1].y;
123 for( int i = 0; i < lpt; i++ )
138 dxy = xi_1 * yi - xi * yi_1;
145 a20 += dxy * (xi_1 * xii_1 + xi2);
146 a11 += dxy * (xi_1 * (yii_1 + yi_1) + xi * (yii_1 + yi));
147 a02 += dxy * (yi_1 * yii_1 + yi2);
148 a30 += dxy * xii_1 * (xi_12 + xi2);
149 a03 += dxy * yii_1 * (yi_12 + yi2);
150 a21 += dxy * (xi_12 * (3 * yi_1 + yi) + 2 * xi * xi_1 * yii_1 +
151 xi2 * (yi_1 + 3 * yi));
152 a12 += dxy * (yi_12 * (3 * xi_1 + xi) + 2 * yi * yi_1 * xii_1 +
153 yi2 * (xi_1 + 3 * xi));
160 if( fabs(a00) > FLT_EPSILON )
162 double db1_2, db1_6, db1_12, db1_24, db1_20, db1_60;
167 db1_6 = 0.16666666666666666666666666666667;
168 db1_12 = 0.083333333333333333333333333333333;
169 db1_24 = 0.041666666666666666666666666666667;
171 db1_60 = 0.016666666666666666666666666666667;
176 db1_6 = -0.16666666666666666666666666666667;
177 db1_12 = -0.083333333333333333333333333333333;
178 db1_24 = -0.041666666666666666666666666666667;
180 db1_60 = -0.016666666666666666666666666666667;
187 m.m20 = a20 * db1_12;
188 m.m11 = a11 * db1_24;
189 m.m02 = a02 * db1_12;
190 m.m30 = a30 * db1_20;
191 m.m21 = a21 * db1_60;
192 m.m12 = a12 * db1_60;
193 m.m03 = a03 * db1_20;
195 completeMomentState( &m );
201 /****************************************************************************************\
202 * Spatial Raster Moments *
203 \****************************************************************************************/
205 template<typename T, typename WT, typename MT>
206 struct MomentsInTile_SSE
208 int operator() (const T *, int, WT &, WT &, WT &, MT &)
217 struct MomentsInTile_SSE<uchar, int, int>
221 useSIMD = checkHardwareSupport(CV_CPU_SSE2);
224 int operator() (const uchar * ptr, int len, int & x0, int & x1, int & x2, int & x3)
230 __m128i qx_init = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
231 __m128i dx = _mm_set1_epi16(8);
232 __m128i z = _mm_setzero_si128(), qx0 = z, qx1 = z, qx2 = z, qx3 = z, qx = qx_init;
234 for( ; x <= len - 8; x += 8 )
236 __m128i p = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(ptr + x)), z);
237 qx0 = _mm_add_epi32(qx0, _mm_sad_epu8(p, z));
238 __m128i px = _mm_mullo_epi16(p, qx);
239 __m128i sx = _mm_mullo_epi16(qx, qx);
240 qx1 = _mm_add_epi32(qx1, _mm_madd_epi16(p, qx));
241 qx2 = _mm_add_epi32(qx2, _mm_madd_epi16(p, sx));
242 qx3 = _mm_add_epi32(qx3, _mm_madd_epi16(px, sx));
244 qx = _mm_add_epi16(qx, dx);
247 int CV_DECL_ALIGNED(16) buf[4];
248 _mm_store_si128((__m128i*)buf, qx0);
249 x0 = buf[0] + buf[1] + buf[2] + buf[3];
250 _mm_store_si128((__m128i*)buf, qx1);
251 x1 = buf[0] + buf[1] + buf[2] + buf[3];
252 _mm_store_si128((__m128i*)buf, qx2);
253 x2 = buf[0] + buf[1] + buf[2] + buf[3];
254 _mm_store_si128((__m128i*)buf, qx3);
255 x3 = buf[0] + buf[1] + buf[2] + buf[3];
269 struct MomentsInTile_SSE<ushort, int, int64>
273 useSIMD = checkHardwareSupport(CV_CPU_SSE4_1);
276 int operator() (const ushort * ptr, int len, int & x0, int & x1, int & x2, int64 & x3)
282 __m128i vx_init0 = _mm_setr_epi32(0, 1, 2, 3), vx_init1 = _mm_setr_epi32(4, 5, 6, 7),
283 v_delta = _mm_set1_epi32(8), v_zero = _mm_setzero_si128(), v_x0 = v_zero,
284 v_x1 = v_zero, v_x2 = v_zero, v_x3 = v_zero, v_ix0 = vx_init0, v_ix1 = vx_init1;
286 for( ; x <= len - 8; x += 8 )
288 __m128i v_src = _mm_loadu_si128((const __m128i *)(ptr + x));
289 __m128i v_src0 = _mm_unpacklo_epi16(v_src, v_zero), v_src1 = _mm_unpackhi_epi16(v_src, v_zero);
291 v_x0 = _mm_add_epi32(v_x0, _mm_add_epi32(v_src0, v_src1));
292 __m128i v_x1_0 = _mm_mullo_epi32(v_src0, v_ix0), v_x1_1 = _mm_mullo_epi32(v_src1, v_ix1);
293 v_x1 = _mm_add_epi32(v_x1, _mm_add_epi32(v_x1_0, v_x1_1));
295 __m128i v_2ix0 = _mm_mullo_epi32(v_ix0, v_ix0), v_2ix1 = _mm_mullo_epi32(v_ix1, v_ix1);
296 v_x2 = _mm_add_epi32(v_x2, _mm_add_epi32(_mm_mullo_epi32(v_2ix0, v_src0), _mm_mullo_epi32(v_2ix1, v_src1)));
298 __m128i t = _mm_add_epi32(_mm_mullo_epi32(v_2ix0, v_x1_0), _mm_mullo_epi32(v_2ix1, v_x1_1));
299 v_x3 = _mm_add_epi64(v_x3, _mm_add_epi64(_mm_unpacklo_epi32(t, v_zero), _mm_unpackhi_epi32(t, v_zero)));
301 v_ix0 = _mm_add_epi32(v_ix0, v_delta);
302 v_ix1 = _mm_add_epi32(v_ix1, v_delta);
305 int CV_DECL_ALIGNED(16) buf[4];
306 int64 CV_DECL_ALIGNED(16) buf64[2];
308 _mm_store_si128((__m128i*)buf, v_x0);
309 x0 = buf[0] + buf[1] + buf[2] + buf[3];
310 _mm_store_si128((__m128i*)buf, v_x1);
311 x1 = buf[0] + buf[1] + buf[2] + buf[3];
312 _mm_store_si128((__m128i*)buf, v_x2);
313 x2 = buf[0] + buf[1] + buf[2] + buf[3];
315 _mm_store_si128((__m128i*)buf64, v_x3);
316 x3 = buf64[0] + buf64[1];
327 template<typename T, typename WT, typename MT>
328 #if defined __GNUC__ && __GNUC__ == 4 && __GNUC_MINOR__ >= 5 && __GNUC_MINOR__ < 9
329 // Workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60196
330 __attribute__((optimize("no-tree-vectorize")))
332 static void momentsInTile( const Mat& img, double* moments )
334 Size size = img.size();
336 MT mom[10] = {0,0,0,0,0,0,0,0,0,0};
337 MomentsInTile_SSE<T, WT, MT> vop;
339 for( y = 0; y < size.height; y++ )
341 const T* ptr = (const T*)(img.data + y*img.step);
342 WT x0 = 0, x1 = 0, x2 = 0;
344 x = vop(ptr, size.width, x0, x1, x2, x3);
346 for( ; x < size.width; x++ )
358 WT py = y * x0, sy = y*y;
360 mom[9] += ((MT)py) * sy; // m03
361 mom[8] += ((MT)x1) * sy; // m12
362 mom[7] += ((MT)x2) * y; // m21
364 mom[5] += x0 * sy; // m02
365 mom[4] += x1 * y; // m11
372 for( x = 0; x < 10; x++ )
373 moments[x] = (double)mom[x];
376 typedef void (*MomentsInTileFunc)(const Mat& img, double* moments);
380 m00 = m10 = m01 = m20 = m11 = m02 = m30 = m21 = m12 = m03 =
381 mu20 = mu11 = mu02 = mu30 = mu21 = mu12 = mu03 =
382 nu20 = nu11 = nu02 = nu30 = nu21 = nu12 = nu03 = 0.;
385 Moments::Moments( double _m00, double _m10, double _m01, double _m20, double _m11,
386 double _m02, double _m30, double _m21, double _m12, double _m03 )
388 m00 = _m00; m10 = _m10; m01 = _m01;
389 m20 = _m20; m11 = _m11; m02 = _m02;
390 m30 = _m30; m21 = _m21; m12 = _m12; m03 = _m03;
392 double cx = 0, cy = 0, inv_m00 = 0;
393 if( std::abs(m00) > DBL_EPSILON )
396 cx = m10*inv_m00; cy = m01*inv_m00;
403 mu30 = m30 - cx*(3*mu20 + cx*m10);
404 mu21 = m21 - cx*(2*mu11 + cx*m01) - cy*mu20;
405 mu12 = m12 - cy*(2*mu11 + cy*m10) - cx*mu02;
406 mu03 = m03 - cy*(3*mu02 + cy*m01);
408 double inv_sqrt_m00 = std::sqrt(std::abs(inv_m00));
409 double s2 = inv_m00*inv_m00, s3 = s2*inv_sqrt_m00;
411 nu20 = mu20*s2; nu11 = mu11*s2; nu02 = mu02*s2;
412 nu30 = mu30*s3; nu21 = mu21*s3; nu12 = mu12*s3; nu03 = mu03*s3;
417 static bool ocl_moments( InputArray _src, Moments& m, bool binary)
419 const int TILE_SIZE = 32;
422 ocl::Kernel k = ocl::Kernel("moments", ocl::imgproc::moments_oclsrc,
423 format("-D TILE_SIZE=%d%s",
425 binary ? " -D OP_MOMENTS_BINARY" : ""));
430 UMat src = _src.getUMat();
431 Size sz = src.size();
432 int xtiles = (sz.width + TILE_SIZE-1)/TILE_SIZE;
433 int ytiles = (sz.height + TILE_SIZE-1)/TILE_SIZE;
434 int ntiles = xtiles*ytiles;
435 UMat umbuf(1, ntiles*K, CV_32S);
437 size_t globalsize[] = {xtiles, sz.height}, localsize[] = {1, TILE_SIZE};
438 bool ok = k.args(ocl::KernelArg::ReadOnly(src),
439 ocl::KernelArg::PtrWriteOnly(umbuf),
440 xtiles).run(2, globalsize, localsize, true);
443 Mat mbuf = umbuf.getMat(ACCESS_READ);
444 for( int i = 0; i < ntiles; i++ )
446 double x = (i % xtiles)*TILE_SIZE, y = (i / xtiles)*TILE_SIZE;
447 const int* mom = mbuf.ptr<int>() + i*K;
448 double xm = x * mom[0], ym = y * mom[0];
450 // accumulate moments computed in each tile
455 // + m10 ( = m10' + x*m00' )
456 m.m10 += mom[1] + xm;
458 // + m01 ( = m01' + y*m00' )
459 m.m01 += mom[2] + ym;
461 // + m20 ( = m20' + 2*x*m10' + x*x*m00' )
462 m.m20 += mom[3] + x * (mom[1] * 2 + xm);
464 // + m11 ( = m11' + x*m01' + y*m10' + x*y*m00' )
465 m.m11 += mom[4] + x * (mom[2] + ym) + y * mom[1];
467 // + m02 ( = m02' + 2*y*m01' + y*y*m00' )
468 m.m02 += mom[5] + y * (mom[2] * 2 + ym);
470 // + m30 ( = m30' + 3*x*m20' + 3*x*x*m10' + x*x*x*m00' )
471 m.m30 += mom[6] + x * (3. * mom[3] + x * (3. * mom[1] + xm));
473 // + m21 ( = m21' + x*(2*m11' + 2*y*m10' + x*m01' + x*y*m00') + y*m20')
474 m.m21 += mom[7] + x * (2 * (mom[4] + y * mom[1]) + x * (mom[2] + ym)) + y * mom[3];
476 // + m12 ( = m12' + y*(2*m11' + 2*x*m01' + y*m10' + x*y*m00') + x*m02')
477 m.m12 += mom[8] + y * (2 * (mom[4] + x * mom[2]) + y * (mom[1] + xm)) + x * mom[5];
479 // + m03 ( = m03' + 3*y*m02' + 3*y*y*m01' + y*y*y*m00' )
480 m.m03 += mom[9] + y * (3. * mom[5] + y * (3. * mom[2] + ym));
491 cv::Moments cv::moments( InputArray _src, bool binary )
493 const int TILE_SIZE = 32;
494 MomentsInTileFunc func = 0;
495 uchar nzbuf[TILE_SIZE*TILE_SIZE];
497 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
498 Size size = _src.size();
500 if( size.width <= 0 || size.height <= 0 )
504 if( !(ocl::useOpenCL() && type == CV_8UC1 &&
505 _src.isUMat() && ocl_moments(_src, m, binary)) )
508 Mat mat = _src.getMat();
509 if( mat.checkVector(2) >= 0 && (depth == CV_32F || depth == CV_32S))
510 return contourMoments(mat);
513 CV_Error( CV_StsBadArg, "Invalid image type (must be single-channel)" );
515 #if IPP_VERSION_X100 >= 801 && 0
518 IppiSize roi = { mat.cols, mat.rows };
519 IppiMomentState_64f * moment = NULL;
520 // ippiMomentInitAlloc_64f, ippiMomentFree_64f are deprecated in 8.1, but there are not another way
521 // to initialize IppiMomentState_64f. When GetStateSize and Init functions will appear we have to
523 CV_SUPPRESS_DEPRECATED_START
524 if (ippiMomentInitAlloc_64f(&moment, ippAlgHintAccurate) >= 0)
526 typedef IppStatus (CV_STDCALL * ippiMoments)(const void * pSrc, int srcStep, IppiSize roiSize, IppiMomentState_64f* pCtx);
527 ippiMoments ippFunc =
528 type == CV_8UC1 ? (ippiMoments)ippiMoments64f_8u_C1R :
529 type == CV_16UC1 ? (ippiMoments)ippiMoments64f_16u_C1R :
530 type == CV_32FC1? (ippiMoments)ippiMoments64f_32f_C1R : 0;
534 if (ippFunc(mat.data, (int)mat.step, roi, moment) >= 0)
536 IppiPoint point = { 0, 0 };
537 ippiGetSpatialMoment_64f(moment, 0, 0, 0, point, &m.m00);
538 ippiGetSpatialMoment_64f(moment, 1, 0, 0, point, &m.m10);
539 ippiGetSpatialMoment_64f(moment, 0, 1, 0, point, &m.m01);
541 ippiGetSpatialMoment_64f(moment, 2, 0, 0, point, &m.m20);
542 ippiGetSpatialMoment_64f(moment, 1, 1, 0, point, &m.m11);
543 ippiGetSpatialMoment_64f(moment, 0, 2, 0, point, &m.m02);
545 ippiGetSpatialMoment_64f(moment, 3, 0, 0, point, &m.m30);
546 ippiGetSpatialMoment_64f(moment, 2, 1, 0, point, &m.m21);
547 ippiGetSpatialMoment_64f(moment, 1, 2, 0, point, &m.m12);
548 ippiGetSpatialMoment_64f(moment, 0, 3, 0, point, &m.m03);
549 ippiGetCentralMoment_64f(moment, 2, 0, 0, &m.mu20);
550 ippiGetCentralMoment_64f(moment, 1, 1, 0, &m.mu11);
551 ippiGetCentralMoment_64f(moment, 0, 2, 0, &m.mu02);
552 ippiGetCentralMoment_64f(moment, 3, 0, 0, &m.mu30);
553 ippiGetCentralMoment_64f(moment, 2, 1, 0, &m.mu21);
554 ippiGetCentralMoment_64f(moment, 1, 2, 0, &m.mu12);
555 ippiGetCentralMoment_64f(moment, 0, 3, 0, &m.mu03);
556 ippiGetNormalizedCentralMoment_64f(moment, 2, 0, 0, &m.nu20);
557 ippiGetNormalizedCentralMoment_64f(moment, 1, 1, 0, &m.nu11);
558 ippiGetNormalizedCentralMoment_64f(moment, 0, 2, 0, &m.nu02);
559 ippiGetNormalizedCentralMoment_64f(moment, 3, 0, 0, &m.nu30);
560 ippiGetNormalizedCentralMoment_64f(moment, 2, 1, 0, &m.nu21);
561 ippiGetNormalizedCentralMoment_64f(moment, 1, 2, 0, &m.nu12);
562 ippiGetNormalizedCentralMoment_64f(moment, 0, 3, 0, &m.nu03);
564 ippiMomentFree_64f(moment);
569 ippiMomentFree_64f(moment);
573 CV_SUPPRESS_DEPRECATED_END
577 if( binary || depth == CV_8U )
578 func = momentsInTile<uchar, int, int>;
579 else if( depth == CV_16U )
580 func = momentsInTile<ushort, int, int64>;
581 else if( depth == CV_16S )
582 func = momentsInTile<short, int, int64>;
583 else if( depth == CV_32F )
584 func = momentsInTile<float, double, double>;
585 else if( depth == CV_64F )
586 func = momentsInTile<double, double, double>;
588 CV_Error( CV_StsUnsupportedFormat, "" );
592 for( int y = 0; y < size.height; y += TILE_SIZE )
595 tileSize.height = std::min(TILE_SIZE, size.height - y);
597 for( int x = 0; x < size.width; x += TILE_SIZE )
599 tileSize.width = std::min(TILE_SIZE, size.width - x);
600 Mat src(src0, cv::Rect(x, y, tileSize.width, tileSize.height));
604 cv::Mat tmp(tileSize, CV_8U, nzbuf);
605 cv::compare( src, 0, tmp, CV_CMP_NE );
615 for( int k = 0; k < 10; k++ )
619 double xm = x * mom[0], ym = y * mom[0];
621 // accumulate moments computed in each tile
626 // + m10 ( = m10' + x*m00' )
627 m.m10 += mom[1] + xm;
629 // + m01 ( = m01' + y*m00' )
630 m.m01 += mom[2] + ym;
632 // + m20 ( = m20' + 2*x*m10' + x*x*m00' )
633 m.m20 += mom[3] + x * (mom[1] * 2 + xm);
635 // + m11 ( = m11' + x*m01' + y*m10' + x*y*m00' )
636 m.m11 += mom[4] + x * (mom[2] + ym) + y * mom[1];
638 // + m02 ( = m02' + 2*y*m01' + y*y*m00' )
639 m.m02 += mom[5] + y * (mom[2] * 2 + ym);
641 // + m30 ( = m30' + 3*x*m20' + 3*x*x*m10' + x*x*x*m00' )
642 m.m30 += mom[6] + x * (3. * mom[3] + x * (3. * mom[1] + xm));
644 // + m21 ( = m21' + x*(2*m11' + 2*y*m10' + x*m01' + x*y*m00') + y*m20')
645 m.m21 += mom[7] + x * (2 * (mom[4] + y * mom[1]) + x * (mom[2] + ym)) + y * mom[3];
647 // + m12 ( = m12' + y*(2*m11' + 2*x*m01' + y*m10' + x*y*m00') + x*m02')
648 m.m12 += mom[8] + y * (2 * (mom[4] + x * mom[2]) + y * (mom[1] + xm)) + x * mom[5];
650 // + m03 ( = m03' + 3*y*m02' + 3*y*y*m01' + y*y*y*m00' )
651 m.m03 += mom[9] + y * (3. * mom[5] + y * (3. * mom[2] + ym));
656 completeMomentState( &m );
661 void cv::HuMoments( const Moments& m, double hu[7] )
663 double t0 = m.nu30 + m.nu12;
664 double t1 = m.nu21 + m.nu03;
666 double q0 = t0 * t0, q1 = t1 * t1;
668 double n4 = 4 * m.nu11;
669 double s = m.nu20 + m.nu02;
670 double d = m.nu20 - m.nu02;
673 hu[1] = d * d + n4 * m.nu11;
675 hu[5] = d * (q0 - q1) + n4 * t0 * t1;
680 q0 = m.nu30 - 3 * m.nu12;
681 q1 = 3 * m.nu21 - m.nu03;
683 hu[2] = q0 * q0 + q1 * q1;
684 hu[4] = q0 * t0 + q1 * t1;
685 hu[6] = q1 * t0 - q0 * t1;
688 void cv::HuMoments( const Moments& m, OutputArray _hu )
690 _hu.create(7, 1, CV_64F);
691 Mat hu = _hu.getMat();
692 CV_Assert( hu.isContinuous() );
693 HuMoments(m, (double*)hu.data);
697 CV_IMPL void cvMoments( const CvArr* arr, CvMoments* moments, int binary )
699 const IplImage* img = (const IplImage*)arr;
701 if( CV_IS_IMAGE(arr) && img->roi && img->roi->coi > 0 )
702 cv::extractImageCOI(arr, src, img->roi->coi-1);
704 src = cv::cvarrToMat(arr);
705 cv::Moments m = cv::moments(src, binary != 0);
706 CV_Assert( moments != 0 );
711 CV_IMPL double cvGetSpatialMoment( CvMoments * moments, int x_order, int y_order )
713 int order = x_order + y_order;
716 CV_Error( CV_StsNullPtr, "" );
717 if( (x_order | y_order) < 0 || order > 3 )
718 CV_Error( CV_StsOutOfRange, "" );
720 return (&(moments->m00))[order + (order >> 1) + (order > 2) * 2 + y_order];
724 CV_IMPL double cvGetCentralMoment( CvMoments * moments, int x_order, int y_order )
726 int order = x_order + y_order;
729 CV_Error( CV_StsNullPtr, "" );
730 if( (x_order | y_order) < 0 || order > 3 )
731 CV_Error( CV_StsOutOfRange, "" );
733 return order >= 2 ? (&(moments->m00))[4 + order * 3 + y_order] :
734 order == 0 ? moments->m00 : 0;
738 CV_IMPL double cvGetNormalizedCentralMoment( CvMoments * moments, int x_order, int y_order )
740 int order = x_order + y_order;
742 double mu = cvGetCentralMoment( moments, x_order, y_order );
743 double m00s = moments->inv_sqrt_m00;
745 while( --order >= 0 )
747 return mu * m00s * m00s;
751 CV_IMPL void cvGetHuMoments( CvMoments * mState, CvHuMoments * HuState )
753 if( !mState || !HuState )
754 CV_Error( CV_StsNullPtr, "" );
756 double m00s = mState->inv_sqrt_m00, m00 = m00s * m00s, s2 = m00 * m00, s3 = s2 * m00s;
758 double nu20 = mState->mu20 * s2,
759 nu11 = mState->mu11 * s2,
760 nu02 = mState->mu02 * s2,
761 nu30 = mState->mu30 * s3,
762 nu21 = mState->mu21 * s3, nu12 = mState->mu12 * s3, nu03 = mState->mu03 * s3;
764 double t0 = nu30 + nu12;
765 double t1 = nu21 + nu03;
767 double q0 = t0 * t0, q1 = t1 * t1;
769 double n4 = 4 * nu11;
770 double s = nu20 + nu02;
771 double d = nu20 - nu02;
774 HuState->hu2 = d * d + n4 * nu11;
775 HuState->hu4 = q0 + q1;
776 HuState->hu6 = d * (q0 - q1) + n4 * t0 * t1;
781 q0 = nu30 - 3 * nu12;
782 q1 = 3 * nu21 - nu03;
784 HuState->hu3 = q0 * q0 + q1 * q1;
785 HuState->hu5 = q0 * t0 + q1 * t1;
786 HuState->hu7 = q1 * t0 - q0 * t1;