--- /dev/null
- # elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_CVDEF_H__
+#define __OPENCV_CORE_CVDEF_H__
+
+#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER && _MSC_VER > 1300
+# define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio warnings */
+#endif
+
+// undef problematic defines sometimes defined by system headers (windows.h in particular)
+#undef small
+#undef min
+#undef max
+#undef abs
+#undef Complex
+
+#if defined __ICL
+# define CV_ICC __ICL
+#elif defined __ICC
+# define CV_ICC __ICC
+#elif defined __ECL
+# define CV_ICC __ECL
+#elif defined __ECC
+# define CV_ICC __ECC
+#elif defined __INTEL_COMPILER
+# define CV_ICC __INTEL_COMPILER
+#endif
+
+#if defined CV_ICC && !defined CV_ENABLE_UNROLLED
+# define CV_ENABLE_UNROLLED 0
+#else
+# define CV_ENABLE_UNROLLED 1
+#endif
+
+#if (defined WIN32 || defined _WIN32 || defined WINCE || defined __CYGWIN__) && defined CVAPI_EXPORTS
+# define CV_EXPORTS __declspec(dllexport)
+#elif defined __GNUC__ && __GNUC__ >= 4
+# define CV_EXPORTS __attribute__ ((visibility ("default")))
+#else
+# define CV_EXPORTS
+#endif
+
+#ifndef CV_INLINE
+# if defined __cplusplus
+# define CV_INLINE static inline
++# elif defined _MSC_VER
+# define CV_INLINE __inline
+# else
+# define CV_INLINE static
+# endif
+#endif
+
+#ifndef CV_EXTERN_C
+# ifdef __cplusplus
+# define CV_EXTERN_C extern "C"
+# else
+# define CV_EXTERN_C
+# endif
+#endif
+
+/* CPU features and intrinsics support */
+#define CV_CPU_NONE 0
+#define CV_CPU_MMX 1
+#define CV_CPU_SSE 2
+#define CV_CPU_SSE2 3
+#define CV_CPU_SSE3 4
+#define CV_CPU_SSSE3 5
+#define CV_CPU_SSE4_1 6
+#define CV_CPU_SSE4_2 7
+#define CV_CPU_POPCNT 8
+#define CV_CPU_AVX 10
+#define CV_CPU_NEON 11
+#define CV_HARDWARE_MAX_FEATURE 255
+
+// do not include SSE/AVX/NEON headers for NVCC compiler
+#ifndef __CUDACC__
+
+#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2)
+# include <emmintrin.h>
+# define CV_SSE 1
+# define CV_SSE2 1
+# if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500)
+# include <pmmintrin.h>
+# define CV_SSE3 1
+# endif
+# if defined __SSSE3__ || (defined _MSC_VER && _MSC_VER >= 1500)
+# include <tmmintrin.h>
+# define CV_SSSE3 1
+# endif
+# if defined __SSE4_1__ || (defined _MSC_VER && _MSC_VER >= 1500)
+# include <smmintrin.h>
+# define CV_SSE4_1 1
+# endif
+# if defined __SSE4_2__ || (defined _MSC_VER && _MSC_VER >= 1500)
+# include <nmmintrin.h>
+# define CV_SSE4_2 1
+# endif
+# if defined __AVX__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219)
+// MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX
+// See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32
+# include <immintrin.h>
+# define CV_AVX 1
+# if defined(_XCR_XFEATURE_ENABLED_MASK)
+# define __xgetbv() _xgetbv(_XCR_XFEATURE_ENABLED_MASK)
+# else
+# define __xgetbv() 0
+# endif
+# endif
+#endif
+
+#ifdef __ARM_NEON__
+# include <arm_neon.h>
+# define CV_NEON 1
+#endif
+
+#endif // __CUDACC__
+
+#ifndef CV_SSE
+# define CV_SSE 0
+#endif
+#ifndef CV_SSE2
+# define CV_SSE2 0
+#endif
+#ifndef CV_SSE3
+# define CV_SSE3 0
+#endif
+#ifndef CV_SSSE3
+# define CV_SSSE3 0
+#endif
+#ifndef CV_SSE4_1
+# define CV_SSE4_1 0
+#endif
+#ifndef CV_SSE4_2
+# define CV_SSE4_2 0
+#endif
+#ifndef CV_AVX
+# define CV_AVX 0
+#endif
+#ifndef CV_NEON
+# define CV_NEON 0
+#endif
+
+/* primitive types */
+/*
+ schar - signed 1 byte integer
+ uchar - unsigned 1 byte integer
+ short - signed 2 byte integer
+ ushort - unsigned 2 byte integer
+ int - signed 4 byte integer
+ uint - unsigned 4 byte integer
+ int64 - signed 8 byte integer
+ uint64 - unsigned 8 byte integer
+*/
+
+#if !defined _MSC_VER && !defined __BORLANDC__
+# if defined __cplusplus && __cplusplus >= 201103L
+# include <cstdint>
+# else
+# include <stdint.h>
+# endif
+#else
+ typedef unsigned uint;
+#endif
+
+typedef signed char schar;
+
+#ifndef __IPL_H__
+ typedef unsigned char uchar;
+ typedef unsigned short ushort;
+#endif
+
+#if defined _MSC_VER || defined __BORLANDC__
+ typedef __int64 int64;
+ typedef unsigned __int64 uint64;
+# define CV_BIG_INT(n) n##I64
+# define CV_BIG_UINT(n) n##UI64
+#else
+ typedef int64_t int64;
+ typedef uint64_t uint64;
+# define CV_BIG_INT(n) n##LL
+# define CV_BIG_UINT(n) n##ULL
+#endif
+
+/* special informative macros for wrapper generators */
+#define CV_EXPORTS_W CV_EXPORTS
+#define CV_EXPORTS_W_SIMPLE CV_EXPORTS
+#define CV_EXPORTS_AS(synonym) CV_EXPORTS
+#define CV_EXPORTS_W_MAP CV_EXPORTS
+#define CV_IN_OUT
+#define CV_OUT
+#define CV_PROP
+#define CV_PROP_RW
+#define CV_WRAP
+#define CV_WRAP_AS(synonym)
+
+/* fundamental constants */
+#define CV_PI 3.1415926535897932384626433832795
+#define CV_LOG2 0.69314718055994530941723212145818
+
+/****************************************************************************************\
+* Matrix type (Mat) *
+\****************************************************************************************/
+
+#define CV_CN_MAX 512
+#define CV_CN_SHIFT 3
+#define CV_DEPTH_MAX (1 << CV_CN_SHIFT)
+
+#define CV_8U 0
+#define CV_8S 1
+#define CV_16U 2
+#define CV_16S 3
+#define CV_32S 4
+#define CV_32F 5
+#define CV_64F 6
+#define CV_USRTYPE1 7
+
+#define CV_MAT_DEPTH_MASK (CV_DEPTH_MAX - 1)
+#define CV_MAT_DEPTH(flags) ((flags) & CV_MAT_DEPTH_MASK)
+
+#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT))
+#define CV_MAKE_TYPE CV_MAKETYPE
+
+#define CV_8UC1 CV_MAKETYPE(CV_8U,1)
+#define CV_8UC2 CV_MAKETYPE(CV_8U,2)
+#define CV_8UC3 CV_MAKETYPE(CV_8U,3)
+#define CV_8UC4 CV_MAKETYPE(CV_8U,4)
+#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n))
+
+#define CV_8SC1 CV_MAKETYPE(CV_8S,1)
+#define CV_8SC2 CV_MAKETYPE(CV_8S,2)
+#define CV_8SC3 CV_MAKETYPE(CV_8S,3)
+#define CV_8SC4 CV_MAKETYPE(CV_8S,4)
+#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n))
+
+#define CV_16UC1 CV_MAKETYPE(CV_16U,1)
+#define CV_16UC2 CV_MAKETYPE(CV_16U,2)
+#define CV_16UC3 CV_MAKETYPE(CV_16U,3)
+#define CV_16UC4 CV_MAKETYPE(CV_16U,4)
+#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n))
+
+#define CV_16SC1 CV_MAKETYPE(CV_16S,1)
+#define CV_16SC2 CV_MAKETYPE(CV_16S,2)
+#define CV_16SC3 CV_MAKETYPE(CV_16S,3)
+#define CV_16SC4 CV_MAKETYPE(CV_16S,4)
+#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n))
+
+#define CV_32SC1 CV_MAKETYPE(CV_32S,1)
+#define CV_32SC2 CV_MAKETYPE(CV_32S,2)
+#define CV_32SC3 CV_MAKETYPE(CV_32S,3)
+#define CV_32SC4 CV_MAKETYPE(CV_32S,4)
+#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n))
+
+#define CV_32FC1 CV_MAKETYPE(CV_32F,1)
+#define CV_32FC2 CV_MAKETYPE(CV_32F,2)
+#define CV_32FC3 CV_MAKETYPE(CV_32F,3)
+#define CV_32FC4 CV_MAKETYPE(CV_32F,4)
+#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n))
+
+#define CV_64FC1 CV_MAKETYPE(CV_64F,1)
+#define CV_64FC2 CV_MAKETYPE(CV_64F,2)
+#define CV_64FC3 CV_MAKETYPE(CV_64F,3)
+#define CV_64FC4 CV_MAKETYPE(CV_64F,4)
+#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n))
+
+#define CV_MAT_CN_MASK ((CV_CN_MAX - 1) << CV_CN_SHIFT)
+#define CV_MAT_CN(flags) ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1)
+#define CV_MAT_TYPE_MASK (CV_DEPTH_MAX*CV_CN_MAX - 1)
+#define CV_MAT_TYPE(flags) ((flags) & CV_MAT_TYPE_MASK)
+#define CV_MAT_CONT_FLAG_SHIFT 14
+#define CV_MAT_CONT_FLAG (1 << CV_MAT_CONT_FLAG_SHIFT)
+#define CV_IS_MAT_CONT(flags) ((flags) & CV_MAT_CONT_FLAG)
+#define CV_IS_CONT_MAT CV_IS_MAT_CONT
+#define CV_SUBMAT_FLAG_SHIFT 15
+#define CV_SUBMAT_FLAG (1 << CV_SUBMAT_FLAG_SHIFT)
+#define CV_IS_SUBMAT(flags) ((flags) & CV_MAT_SUBMAT_FLAG)
+
+/* Size of each channel item,
+ 0x124489 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */
+#define CV_ELEM_SIZE1(type) \
+ ((((sizeof(size_t)<<28)|0x8442211) >> CV_MAT_DEPTH(type)*4) & 15)
+
+/* 0x3a50 = 11 10 10 01 01 00 00 ~ array of log2(sizeof(arr_type_elem)) */
+#define CV_ELEM_SIZE(type) \
+ (CV_MAT_CN(type) << ((((sizeof(size_t)/4+1)*16384|0x3a50) >> CV_MAT_DEPTH(type)*2) & 3))
+
+
+/****************************************************************************************\
+* fast math *
+\****************************************************************************************/
+
+#if defined __BORLANDC__
+# include <fastmath.h>
+#elif defined __cplusplus
+# include <cmath>
+#else
+# include <math.h>
+#endif
+
+#ifndef MIN
+# define MIN(a,b) ((a) > (b) ? (b) : (a))
+#endif
+
+#ifndef MAX
+# define MAX(a,b) ((a) < (b) ? (b) : (a))
+#endif
+
+#ifdef HAVE_TEGRA_OPTIMIZATION
+# include "tegra_round.hpp"
+#endif
+
+CV_INLINE int cvRound( double value )
+{
+#if ((defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ && defined __SSE2__ && !defined __APPLE__)) && !defined(__CUDACC__)
+ __m128d t = _mm_set_sd( value );
+ return _mm_cvtsd_si32(t);
+#elif defined _MSC_VER && defined _M_IX86
+ int t;
+ __asm
+ {
+ fld value;
+ fistp t;
+ }
+ return t;
+#elif defined _MSC_VER && defined _M_ARM && defined HAVE_TEGRA_OPTIMIZATION
+ TEGRA_ROUND(value);
+#elif defined HAVE_LRINT || defined CV_ICC || defined __GNUC__
+# ifdef HAVE_TEGRA_OPTIMIZATION
+ TEGRA_ROUND(value);
+# else
+ return (int)lrint(value);
+# endif
+#else
+ double intpart, fractpart;
+ fractpart = modf(value, &intpart);
+ if ((fabs(fractpart) != 0.5) || ((((int)intpart) % 2) != 0))
+ return (int)(value + (value >= 0 ? 0.5 : -0.5));
+ else
+ return (int)intpart;
+#endif
+}
+
+CV_INLINE int cvFloor( double value )
+{
+#if (defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__ && !defined __APPLE__)) && !defined(__CUDACC__)
+ __m128d t = _mm_set_sd( value );
+ int i = _mm_cvtsd_si32(t);
+ return i - _mm_movemask_pd(_mm_cmplt_sd(t, _mm_cvtsi32_sd(t,i)));
+#elif defined __GNUC__
+ int i = (int)value;
+ return i - (i > value);
+#else
+ int i = cvRound(value);
+ float diff = (float)(value - i);
+ return i - (diff < 0);
+#endif
+}
+
+CV_INLINE int cvCeil( double value )
+{
+#if (defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__&& !defined __APPLE__)) && !defined(__CUDACC__)
+ __m128d t = _mm_set_sd( value );
+ int i = _mm_cvtsd_si32(t);
+ return i + _mm_movemask_pd(_mm_cmplt_sd(_mm_cvtsi32_sd(t,i), t));
+#elif defined __GNUC__
+ int i = (int)value;
+ return i + (i < value);
+#else
+ int i = cvRound(value);
+ float diff = (float)(i - value);
+ return i + (diff < 0);
+#endif
+}
+
+CV_INLINE int cvIsNaN( double value )
+{
+ union { uint64 u; double f; } ieee754;
+ ieee754.f = value;
+ return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) +
+ ((unsigned)ieee754.u != 0) > 0x7ff00000;
+}
+
+CV_INLINE int cvIsInf( double value )
+{
+ union { uint64 u; double f; } ieee754;
+ ieee754.f = value;
+ return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 &&
+ (unsigned)ieee754.u == 0;
+}
+
+/****************************************************************************************\
+* exchange-add operation for atomic operations on reference counters *
+\****************************************************************************************/
+
+#if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32)
+ // atomic increment on the linux version of the Intel(tm) compiler
+# define CV_XADD(addr, delta) (int)_InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(addr)), delta)
+#elif defined __GNUC__
+# if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__
+# ifdef __ATOMIC_ACQ_REL
+# define CV_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), delta, __ATOMIC_ACQ_REL)
+# else
+# define CV_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), delta, 4)
+# endif
+# else
+# if defined __ATOMIC_ACQ_REL && !defined __clang__
+ // version for gcc >= 4.7
+# define CV_XADD(addr, delta) (int)__atomic_fetch_add((unsigned*)(addr), (unsigned)(delta), __ATOMIC_ACQ_REL)
+# else
+# define CV_XADD(addr, delta) (int)__sync_fetch_and_add((unsigned*)(addr), (unsigned)(delta))
+# endif
+# endif
+#elif (defined WIN32 || defined _WIN32 || defined WINCE) && (!defined RC_INVOKED)
+# if !defined(_M_AMD64) && !defined(_M_IA64) && !defined(_M_ARM)
+ CV_EXTERN_C __declspec(dllimport) long __stdcall InterlockedExchangeAdd(long volatile *Addend, long Value);
+# define CV_XADD(addr, delta) (int)InterlockedExchangeAdd((long volatile*)addr, delta)
+# else
+ CV_EXTERN_C long _InterlockedExchangeAdd (long volatile *Addend, long Value);
+# pragma intrinsic(_InterlockedExchangeAdd)
+# define CV_XADD(addr, delta) (int)_InterlockedExchangeAdd((long volatile*)addr, delta)
+# endif
+#else
+ CV_INLINE CV_XADD(int* addr, int delta) { int tmp = *addr; *addr += delta; return tmp; }
+#endif
+
+#endif // __OPENCV_CORE_CVDEF_H__
}
-/* area of a whole sequence */
-static CvStatus
-icvContourArea( const CvSeq* contour, double *area )
+cv::RotatedRect cv::fitEllipse( InputArray _points )
{
- if( contour->total )
+ Mat points = _points.getMat();
+ int i, n = points.checkVector(2);
+ int depth = points.depth();
+ CV_Assert( n >= 0 && (depth == CV_32F || depth == CV_32S));
+
+ RotatedRect box;
+
+ if( n < 5 )
+ CV_Error( CV_StsBadSize, "There should be at least 5 points to fit the ellipse" );
+
+ // New fitellipse algorithm, contributed by Dr. Daniel Weiss
+ Point2f c(0,0);
+ double gfp[5], rp[5], t;
- const double min_eps = 1e-6;
++ const double min_eps = 1e-8;
+ bool is_float = depth == CV_32F;
+ const Point* ptsi = (const Point*)points.data;
+ const Point2f* ptsf = (const Point2f*)points.data;
+
+ AutoBuffer<double> _Ad(n*5), _bd(n);
+ double *Ad = _Ad, *bd = _bd;
+
+ // first fit for parameters A - E
+ Mat A( n, 5, CV_64F, Ad );
+ Mat b( n, 1, CV_64F, bd );
+ Mat x( 5, 1, CV_64F, gfp );
+
+ for( i = 0; i < n; i++ )
{
- CvSeqReader reader;
- int lpt = contour->total;
- double a00 = 0, xi_1, yi_1;
- int is_float = CV_SEQ_ELTYPE(contour) == CV_32FC2;
+ Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
+ c += p;
+ }
+ c.x /= n;
+ c.y /= n;
- cvStartReadSeq( contour, &reader, 0 );
+ for( i = 0; i < n; i++ )
+ {
+ Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
+ p -= c;
+
+ bd[i] = 10000.0; // 1.0?
+ Ad[i*5] = -(double)p.x * p.x; // A - C signs inverted as proposed by APP
+ Ad[i*5 + 1] = -(double)p.y * p.y;
+ Ad[i*5 + 2] = -(double)p.x * p.y;
+ Ad[i*5 + 3] = p.x;
+ Ad[i*5 + 4] = p.y;
+ }
+
+ solve(A, b, x, DECOMP_SVD);
+
+ // now use general-form parameters A - E to find the ellipse center:
+ // differentiate general form wrt x/y to get two equations for cx and cy
+ A = Mat( 2, 2, CV_64F, Ad );
+ b = Mat( 2, 1, CV_64F, bd );
+ x = Mat( 2, 1, CV_64F, rp );
+ Ad[0] = 2 * gfp[0];
+ Ad[1] = Ad[2] = gfp[2];
+ Ad[3] = 2 * gfp[1];
+ bd[0] = gfp[3];
+ bd[1] = gfp[4];
+ solve( A, b, x, DECOMP_SVD );
+
+ // re-fit for parameters A - C with those center coordinates
+ A = Mat( n, 3, CV_64F, Ad );
+ b = Mat( n, 1, CV_64F, bd );
+ x = Mat( 3, 1, CV_64F, gfp );
+ for( i = 0; i < n; i++ )
+ {
+ Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
+ p -= c;
+ bd[i] = 1.0;
+ Ad[i * 3] = (p.x - rp[0]) * (p.x - rp[0]);
+ Ad[i * 3 + 1] = (p.y - rp[1]) * (p.y - rp[1]);
+ Ad[i * 3 + 2] = (p.x - rp[0]) * (p.y - rp[1]);
+ }
+ solve(A, b, x, DECOMP_SVD);
+
+ // store angle and radii
+ rp[4] = -0.5 * atan2(gfp[2], gfp[1] - gfp[0]); // convert from APP angle usage
+ t = sin(-2.0 * rp[4]);
+ if( fabs(t) > fabs(gfp[2])*min_eps )
+ t = gfp[2]/t;
+ else
+ t = gfp[1] - gfp[0];
+ rp[2] = fabs(gfp[0] + gfp[1] - t);
+ if( rp[2] > min_eps )
+ rp[2] = std::sqrt(2.0 / rp[2]);
+ rp[3] = fabs(gfp[0] + gfp[1] + t);
+ if( rp[3] > min_eps )
+ rp[3] = std::sqrt(2.0 / rp[3]);
+
+ box.center.x = (float)rp[0] + c.x;
+ box.center.y = (float)rp[1] + c.y;
+ box.size.width = (float)(rp[2]*2);
+ box.size.height = (float)(rp[3]*2);
+ if( box.size.width > box.size.height )
+ {
+ float tmp;
+ CV_SWAP( box.size.width, box.size.height, tmp );
+ box.angle = (float)(90 + rp[4]*180/CV_PI);
+ }
+ if( box.angle < -180 )
+ box.angle += 360;
+ if( box.angle > 360 )
+ box.angle -= 360;
+
+ return box;
+}
+
+
+namespace cv
+{
+
+// Calculates bounding rectagnle of a point set or retrieves already calculated
+static Rect pointSetBoundingRect( const Mat& points )
+{
+ int npoints = points.checkVector(2);
+ int depth = points.depth();
+ CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_32S));
+ int xmin = 0, ymin = 0, xmax = -1, ymax = -1, i;
+ bool is_float = depth == CV_32F;
+
+ if( npoints == 0 )
+ return Rect();
+
+ const Point* pts = (const Point*)points.data;
+ Point pt = pts[0];
+
+#if CV_SSE4_2
+ if(cv::checkHardwareSupport(CV_CPU_SSE4_2))
+ {
if( !is_float )
{
- xi_1 = ((CvPoint*)(reader.ptr))->x;
- yi_1 = ((CvPoint*)(reader.ptr))->y;
+ __m128i minval, maxval;
+ minval = maxval = _mm_loadl_epi64((const __m128i*)(&pt)); //min[0]=pt.x, min[1]=pt.y
+
+ for( i = 1; i < npoints; i++ )
+ {
+ __m128i ptXY = _mm_loadl_epi64((const __m128i*)&pts[i]);
+ minval = _mm_min_epi32(ptXY, minval);
+ maxval = _mm_max_epi32(ptXY, maxval);
+ }
+ xmin = _mm_cvtsi128_si32(minval);
+ ymin = _mm_cvtsi128_si32(_mm_srli_si128(minval, 4));
+ xmax = _mm_cvtsi128_si32(maxval);
+ ymax = _mm_cvtsi128_si32(_mm_srli_si128(maxval, 4));
}
else
{
#include <Python.h>
-#if !PYTHON_USE_NUMPY
-#error "The module can only be built if NumPy is available"
-#endif
-
#define MODULESTR "cv2"
-
+ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#include "numpy/ndarrayobject.h"
+#include <numpy/ndarrayobject.h>
-#include "opencv2/core/core.hpp"
-#include "opencv2/contrib/contrib.hpp"
+#include "opencv2/core.hpp"
+#include "opencv2/core/utility.hpp"
+#include "opencv2/contrib.hpp"
#include "opencv2/flann/miniflann.hpp"
-#include "opencv2/imgproc/imgproc.hpp"
-#include "opencv2/calib3d/calib3d.hpp"
-#include "opencv2/ml/ml.hpp"
-#include "opencv2/features2d/features2d.hpp"
-#include "opencv2/objdetect/objdetect.hpp"
-#include "opencv2/video/tracking.hpp"
-#include "opencv2/video/background_segm.hpp"
-#include "opencv2/photo/photo.hpp"
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc.hpp"
+#include "opencv2/calib3d.hpp"
+#include "opencv2/features2d.hpp"
+#include "opencv2/objdetect.hpp"
+#include "opencv2/softcascade.hpp"
+#include "opencv2/video.hpp"
+#include "opencv2/photo.hpp"
+#include "opencv2/highgui.hpp"
-#include "opencv2/opencv_modules.hpp"
+#include "opencv2/ml.hpp"
+#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_NONFREE
-# include "opencv2/nonfree/nonfree.hpp"
+# include "opencv2/nonfree.hpp"
#endif
+#include "pycompat.hpp"
+
using cv::flann::IndexParams;
using cv::flann::SearchParams;
for( i = 0; i < dims; i++ )
_sizes[i] = sizes[i];
if( cn > 1 )
- {
- /*if( _sizes[dims-1] == 1 )
- _sizes[dims-1] = cn;
- else*/
- _sizes[dims++] = cn;
- }
+ _sizes[dims++] = cn;
PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum);
if(!o)
- CV_Error_(CV_StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
+ CV_Error_(Error::StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
refcount = refcountFromPyObject(o);
- npy_intp* _strides = PyArray_STRIDES(o);
+ npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o);
for( i = 0; i < dims - (cn > 1); i++ )
step[i] = (size_t)_strides[i];
- datastart = data = (uchar*)PyArray_DATA(o);
+ datastart = data = (uchar*)PyArray_DATA((PyArrayObject*) o);
}
void deallocate(int* refcount, uchar*, uchar*)
}
}
- int ndims = PyArray_NDIM(o);
+#ifndef CV_MAX_DIM
+ const int CV_MAX_DIM = 32;
+#endif
+
+ int ndims = PyArray_NDIM(oarr);
if(ndims >= CV_MAX_DIM)
{
failmsg("%s dimensionality (=%d) is too high", info.name, ndims);
}
int size[CV_MAX_DIM+1];
- size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type);
+ size_t step[CV_MAX_DIM+1];
+ size_t elemsize = CV_ELEM_SIZE1(type);
- const npy_intp* _sizes = PyArray_DIMS(o);
- const npy_intp* _strides = PyArray_STRIDES(o);
+ const npy_intp* _sizes = PyArray_DIMS(oarr);
+ const npy_intp* _strides = PyArray_STRIDES(oarr);
bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
for( int i = ndims-1; i >= 0 && !needcopy; i-- )